From e629b2844e24ef5ff4853aa6433e770cf18b54c2 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Wed, 1 Feb 2023 19:13:13 +0000 Subject: [PATCH] Regenerated Clients --- .../6e78833d838b49be987e932e4b3e9f07.json | 8 + .../8446fc9b264641ef8488ba9f0ef2b86d.json | 8 + .../8fc5e2f4fe94477e8e7e2c9b9acfe9ad.json | 8 + .../b65e002a70cc4554b8c48a85a7731bc7.json | 8 + .../fde248e3fa99421d9c4cebb91467449d.json | 8 + .../api_op_ListAnomaliesForInsight.go | 3 + service/devopsguru/serializers.go | 21 ++ service/devopsguru/types/types.go | 9 + service/fms/internal/endpoints/endpoints.go | 6 + .../forecast/api_op_CreateAutoPredictor.go | 35 +- service/forecast/api_op_CreateDataset.go | 41 ++- service/forecast/api_op_CreateDatasetGroup.go | 10 +- .../forecast/api_op_CreateDatasetImportJob.go | 29 +- .../forecast/api_op_CreateExplainability.go | 7 +- .../api_op_CreateExplainabilityExport.go | 14 +- service/forecast/api_op_CreateForecast.go | 10 +- .../api_op_CreateForecastExportJob.go | 32 +- service/forecast/api_op_CreatePredictor.go | 24 +- ...api_op_CreatePredictorBacktestExportJob.go | 10 +- .../forecast/api_op_CreateWhatIfAnalysis.go | 8 +- .../api_op_CreateWhatIfForecastExport.go | 20 +- .../forecast/api_op_DescribeAutoPredictor.go | 6 +- service/forecast/api_op_DescribeDataset.go | 4 +- .../api_op_DescribeDatasetImportJob.go | 7 +- .../forecast/api_op_DescribeExplainability.go | 4 +- .../api_op_DescribeExplainabilityExport.go | 6 +- service/forecast/api_op_DescribePredictor.go | 8 +- ...i_op_DescribePredictorBacktestExportJob.go | 6 +- .../forecast/api_op_DescribeWhatIfForecast.go | 2 +- .../api_op_DescribeWhatIfForecastExport.go | 6 +- service/forecast/api_op_GetAccuracyMetrics.go | 4 +- service/forecast/api_op_TagResource.go | 10 +- service/forecast/deserializers.go | 18 + service/forecast/serializers.go | 5 + service/forecast/types/enums.go | 18 + service/forecast/types/types.go | 110 +++--- .../iam/api_op_CreateOpenIDConnectProvider.go | 12 +- service/iam/api_op_CreateRole.go | 11 +- service/iam/api_op_CreateUser.go | 11 +- service/iam/api_op_DeleteRole.go | 29 +- .../iam/api_op_PutRolePermissionsBoundary.go | 11 +- .../iam/api_op_PutUserPermissionsBoundary.go | 11 +- service/iam/api_op_SimulateCustomPolicy.go | 11 +- service/iam/api_op_SimulatePrincipalPolicy.go | 38 +- ...p_UpdateOpenIDConnectProviderThumbprint.go | 6 +- .../internal/endpoints/endpoints.go | 13 +- .../kinesis/internal/endpoints/endpoints.go | 36 ++ service/mediatailor/api_op_CreateProgram.go | 6 + service/mediatailor/api_op_DescribeProgram.go | 6 + service/mediatailor/api_op_UpdateProgram.go | 165 +++++++++ service/mediatailor/deserializers.go | 328 ++++++++++++++++++ service/mediatailor/generated.json | 1 + service/mediatailor/serializers.go | 156 +++++++++ service/mediatailor/types/types.go | 39 +++ service/mediatailor/validators.go | 83 +++++ service/ram/internal/endpoints/endpoints.go | 36 ++ service/sns/api_op_CreateTopic.go | 7 - service/sns/api_op_GetTopicAttributes.go | 7 - service/sns/api_op_SetTopicAttributes.go | 98 +++++- service/swf/internal/endpoints/endpoints.go | 36 ++ 60 files changed, 1453 insertions(+), 237 deletions(-) create mode 100644 .changelog/6e78833d838b49be987e932e4b3e9f07.json create mode 100644 .changelog/8446fc9b264641ef8488ba9f0ef2b86d.json create mode 100644 .changelog/8fc5e2f4fe94477e8e7e2c9b9acfe9ad.json create mode 100644 .changelog/b65e002a70cc4554b8c48a85a7731bc7.json create mode 100644 .changelog/fde248e3fa99421d9c4cebb91467449d.json create mode 100644 service/mediatailor/api_op_UpdateProgram.go diff --git a/.changelog/6e78833d838b49be987e932e4b3e9f07.json b/.changelog/6e78833d838b49be987e932e4b3e9f07.json new file mode 100644 index 00000000000..b6899b5d9a6 --- /dev/null +++ b/.changelog/6e78833d838b49be987e932e4b3e9f07.json @@ -0,0 +1,8 @@ +{ + "id": "6e78833d-838b-49be-987e-932e4b3e9f07", + "type": "documentation", + "description": "Documentation updates for AWS Identity and Access Management (IAM).", + "modules": [ + "service/iam" + ] +} \ No newline at end of file diff --git a/.changelog/8446fc9b264641ef8488ba9f0ef2b86d.json b/.changelog/8446fc9b264641ef8488ba9f0ef2b86d.json new file mode 100644 index 00000000000..58cc873fe8c --- /dev/null +++ b/.changelog/8446fc9b264641ef8488ba9f0ef2b86d.json @@ -0,0 +1,8 @@ +{ + "id": "8446fc9b-2646-41ef-8488-ba9f0ef2b86d", + "type": "feature", + "description": "The AWS Elemental MediaTailor SDK for Channel Assembly has added support for program updates, and the ability to clip the end of VOD sources in programs.", + "modules": [ + "service/mediatailor" + ] +} \ No newline at end of file diff --git a/.changelog/8fc5e2f4fe94477e8e7e2c9b9acfe9ad.json b/.changelog/8fc5e2f4fe94477e8e7e2c9b9acfe9ad.json new file mode 100644 index 00000000000..72bffae8229 --- /dev/null +++ b/.changelog/8fc5e2f4fe94477e8e7e2c9b9acfe9ad.json @@ -0,0 +1,8 @@ +{ + "id": "8fc5e2f4-fe94-477e-8e7e-2c9b9acfe9ad", + "type": "feature", + "description": "This release will enable customer select INCREMENTAL as ImportModel in Forecast's CreateDatasetImportJob API. Verified latest SDK containing required attribute, following https://w.amazon.com/bin/view/AWS-Seer/Launch/Trebuchet/", + "modules": [ + "service/forecast" + ] +} \ No newline at end of file diff --git a/.changelog/b65e002a70cc4554b8c48a85a7731bc7.json b/.changelog/b65e002a70cc4554b8c48a85a7731bc7.json new file mode 100644 index 00000000000..ab90d03d526 --- /dev/null +++ b/.changelog/b65e002a70cc4554b8c48a85a7731bc7.json @@ -0,0 +1,8 @@ +{ + "id": "b65e002a-70cc-4554-b8c4-8a85a7731bc7", + "type": "feature", + "description": "This release adds filter support ListAnomalyForInsight API.", + "modules": [ + "service/devopsguru" + ] +} \ No newline at end of file diff --git a/.changelog/fde248e3fa99421d9c4cebb91467449d.json b/.changelog/fde248e3fa99421d9c4cebb91467449d.json new file mode 100644 index 00000000000..e11a319856f --- /dev/null +++ b/.changelog/fde248e3fa99421d9c4cebb91467449d.json @@ -0,0 +1,8 @@ +{ + "id": "fde248e3-fa99-421d-9c4c-ebb91467449d", + "type": "feature", + "description": "Additional attributes added for set-topic-attributes.", + "modules": [ + "service/sns" + ] +} \ No newline at end of file diff --git a/service/devopsguru/api_op_ListAnomaliesForInsight.go b/service/devopsguru/api_op_ListAnomaliesForInsight.go index 6420c4e19ea..dced20a237f 100644 --- a/service/devopsguru/api_op_ListAnomaliesForInsight.go +++ b/service/devopsguru/api_op_ListAnomaliesForInsight.go @@ -39,6 +39,9 @@ type ListAnomaliesForInsightInput struct { // The ID of the Amazon Web Services account. AccountId *string + // Specifies one or more service names that are used to list anomalies. + Filters *types.ListAnomaliesForInsightFilters + // The maximum number of results to return with a single call. To retrieve the // remaining results, make another call with the returned nextToken value. MaxResults *int32 diff --git a/service/devopsguru/serializers.go b/service/devopsguru/serializers.go index 2dd5b11d901..dec39cd8a12 100644 --- a/service/devopsguru/serializers.go +++ b/service/devopsguru/serializers.go @@ -1054,6 +1054,13 @@ func awsRestjson1_serializeOpDocumentListAnomaliesForInsightInput(v *ListAnomali ok.String(*v.AccountId) } + if v.Filters != nil { + ok := object.Key("Filters") + if err := awsRestjson1_serializeDocumentListAnomaliesForInsightFilters(v.Filters, ok); err != nil { + return err + } + } + if v.MaxResults != nil { ok := object.Key("MaxResults") ok.Integer(*v.MaxResults) @@ -2432,6 +2439,20 @@ func awsRestjson1_serializeDocumentInsightStatuses(v []types.InsightStatus, valu return nil } +func awsRestjson1_serializeDocumentListAnomaliesForInsightFilters(v *types.ListAnomaliesForInsightFilters, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ServiceCollection != nil { + ok := object.Key("ServiceCollection") + if err := awsRestjson1_serializeDocumentServiceCollection(v.ServiceCollection, ok); err != nil { + return err + } + } + + return nil +} + func awsRestjson1_serializeDocumentListEventsFilters(v *types.ListEventsFilters, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/devopsguru/types/types.go b/service/devopsguru/types/types.go index 7363a2a3060..21f1d5f3b94 100644 --- a/service/devopsguru/types/types.go +++ b/service/devopsguru/types/types.go @@ -480,6 +480,15 @@ type InsightTimeRange struct { noSmithyDocumentSerde } +// Specifies one or more service names that are used to list anomalies. +type ListAnomaliesForInsightFilters struct { + + // A collection of the names of Amazon Web Services services. + ServiceCollection *ServiceCollection + + noSmithyDocumentSerde +} + // Filters you can use to specify which events are returned when ListEvents is // called. type ListEventsFilters struct { diff --git a/service/fms/internal/endpoints/endpoints.go b/service/fms/internal/endpoints/endpoints.go index 8b4a31cc588..ba816d62ce5 100644 --- a/service/fms/internal/endpoints/endpoints.go +++ b/service/fms/internal/endpoints/endpoints.go @@ -201,6 +201,9 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "fms-fips.ap-southeast-2.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, @@ -429,6 +432,9 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, diff --git a/service/forecast/api_op_CreateAutoPredictor.go b/service/forecast/api_op_CreateAutoPredictor.go index c1c9c64f907..a73d0902dc8 100644 --- a/service/forecast/api_op_CreateAutoPredictor.go +++ b/service/forecast/api_op_CreateAutoPredictor.go @@ -67,9 +67,9 @@ type CreateAutoPredictorInput struct { // The data configuration for your dataset group and any additional datasets. DataConfig *types.DataConfig - // An AWS Key Management Service (KMS) key and an AWS Identity and Access - // Management (IAM) role that Amazon Forecast can assume to access the key. You can - // specify this optional object in the CreateDataset and CreatePredictor requests. + // An Key Management Service (KMS) key and an Identity and Access Management (IAM) + // role that Amazon Forecast can assume to access the key. You can specify this + // optional object in the CreateDataset and CreatePredictor requests. EncryptionConfig *types.EncryptionConfig // Create an Explainability resource for the predictor. @@ -81,11 +81,30 @@ type CreateAutoPredictorInput struct { // store_id as a dimension to group sales forecasts for each store. ForecastDimensions []string - // The frequency of predictions in a forecast. Valid intervals are Y (Year), M - // (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), - // 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, "Y" - // indicates every year and "5min" indicates every five minutes. The frequency must - // be greater than or equal to the TARGET_TIME_SERIES dataset frequency. When a + // The frequency of predictions in a forecast. Valid intervals are an integer + // followed by Y (Year), M (Month), W (Week), D (Day), H (Hour), and min (Minute). + // For example, "1D" indicates every day and "15min" indicates every 15 minutes. + // You cannot specify a value that would overlap with the next larger frequency. + // That means, for example, you cannot specify a frequency of 60 minutes, because + // that is equivalent to 1 hour. The valid values for each frequency are the + // following: + // + // * Minute - 1-59 + // + // * Hour - 1-23 + // + // * Day - 1-6 + // + // * Week - 1-4 + // + // * Month - + // 1-11 + // + // * Year - 1 + // + // Thus, if you want every other week forecasts, specify "2W". + // Or, if you want quarterly forecasts, you specify "3M". The frequency must be + // greater than or equal to the TARGET_TIME_SERIES dataset frequency. When a // RELATED_TIME_SERIES dataset is provided, the frequency must be equal to the // RELATED_TIME_SERIES dataset frequency. ForecastFrequency *string diff --git a/service/forecast/api_op_CreateDataset.go b/service/forecast/api_op_CreateDataset.go index 1a61143ebde..0d98c7712ac 100644 --- a/service/forecast/api_op_CreateDataset.go +++ b/service/forecast/api_op_CreateDataset.go @@ -92,14 +92,33 @@ type CreateDatasetInput struct { Schema *types.Schema // The frequency of data collection. This parameter is required for - // RELATED_TIME_SERIES datasets. Valid intervals are Y (Year), M (Month), W (Week), - // D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), - // 5min (5 minutes), and 1min (1 minute). For example, "D" indicates every day and - // "15min" indicates every 15 minutes. + // RELATED_TIME_SERIES datasets. Valid intervals are an integer followed by Y + // (Year), M (Month), W (Week), D (Day), H (Hour), and min (Minute). For example, + // "1D" indicates every day and "15min" indicates every 15 minutes. You cannot + // specify a value that would overlap with the next larger frequency. That means, + // for example, you cannot specify a frequency of 60 minutes, because that is + // equivalent to 1 hour. The valid values for each frequency are the following: + // + // * + // Minute - 1-59 + // + // * Hour - 1-23 + // + // * Day - 1-6 + // + // * Week - 1-4 + // + // * Month - 1-11 + // + // * Year + // - 1 + // + // Thus, if you want every other week forecasts, specify "2W". Or, if you want + // quarterly forecasts, you specify "3M". DataFrequency *string - // An AWS Key Management Service (KMS) key and the AWS Identity and Access - // Management (IAM) role that Amazon Forecast can assume to access the key. + // An Key Management Service (KMS) key and the Identity and Access Management (IAM) + // role that Amazon Forecast can assume to access the key. EncryptionConfig *types.EncryptionConfig // The optional metadata that you apply to the dataset to help you categorize and @@ -127,11 +146,11 @@ type CreateDatasetInput struct { // case sensitive. // // * Do not use aws:, AWS:, or any upper or lowercase combination - // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or - // delete tag keys with this prefix. Values can have this prefix. If a tag value - // has aws as its prefix but the key does not, then Forecast considers it to be a - // user tag and will count against the limit of 50 tags. Tags with only the key - // prefix of aws do not count against your tags per resource limit. + // of such as a prefix for keys as it is reserved for Amazon Web Services use. You + // cannot edit or delete tag keys with this prefix. Values can have this prefix. If + // a tag value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags with only + // the key prefix of aws do not count against your tags per resource limit. Tags []types.Tag noSmithyDocumentSerde diff --git a/service/forecast/api_op_CreateDatasetGroup.go b/service/forecast/api_op_CreateDatasetGroup.go index 918db543904..b6edb07a5cb 100644 --- a/service/forecast/api_op_CreateDatasetGroup.go +++ b/service/forecast/api_op_CreateDatasetGroup.go @@ -91,11 +91,11 @@ type CreateDatasetGroupInput struct { // case sensitive. // // * Do not use aws:, AWS:, or any upper or lowercase combination - // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or - // delete tag keys with this prefix. Values can have this prefix. If a tag value - // has aws as its prefix but the key does not, then Forecast considers it to be a - // user tag and will count against the limit of 50 tags. Tags with only the key - // prefix of aws do not count against your tags per resource limit. + // of such as a prefix for keys as it is reserved for Amazon Web Services use. You + // cannot edit or delete tag keys with this prefix. Values can have this prefix. If + // a tag value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags with only + // the key prefix of aws do not count against your tags per resource limit. Tags []types.Tag noSmithyDocumentSerde diff --git a/service/forecast/api_op_CreateDatasetImportJob.go b/service/forecast/api_op_CreateDatasetImportJob.go index 14f285807a5..e7a6dbad62c 100644 --- a/service/forecast/api_op_CreateDatasetImportJob.go +++ b/service/forecast/api_op_CreateDatasetImportJob.go @@ -16,10 +16,10 @@ import ( // bucket and the Amazon Resource Name (ARN) of the dataset that you want to import // the data to. You must specify a DataSource // (https://docs.aws.amazon.com/forecast/latest/dg/API_DataSource.html) object that -// includes an AWS Identity and Access Management (IAM) role that Amazon Forecast -// can assume to access the data, as Amazon Forecast makes a copy of your data and -// processes it in an internal AWS system. For more information, see Set up -// permissions +// includes an Identity and Access Management (IAM) role that Amazon Forecast can +// assume to access the data, as Amazon Forecast makes a copy of your data and +// processes it in an internal Amazon Web Services system. For more information, +// see Set up permissions // (https://docs.aws.amazon.com/forecast/latest/dg/aws-forecast-iam-roles.html). // The training data must be in CSV or Parquet format. The delimiter must be a // comma (,). You can specify the path to a specific file, the S3 bucket, or to a @@ -50,10 +50,10 @@ func (c *Client) CreateDatasetImportJob(ctx context.Context, params *CreateDatas type CreateDatasetImportJobInput struct { - // The location of the training data to import and an AWS Identity and Access + // The location of the training data to import and an Identity and Access // Management (IAM) role that Amazon Forecast can assume to access the data. The // training data must be stored in an Amazon S3 bucket. If encryption is used, - // DataSource must include an AWS Key Management Service (KMS) key and the IAM role + // DataSource must include an Key Management Service (KMS) key and the IAM role // must allow Amazon Forecast permission to access the key. The KMS key and IAM // role must match those specified in the EncryptionConfig parameter of the // CreateDataset @@ -89,6 +89,11 @@ type CreateDatasetImportJobInput struct { // country code (US), followed by the 5-digit ZIP code (Example: US_98121). GeolocationFormat *string + // Specifies whether the dataset import job is a FULL or INCREMENTAL import. A FULL + // dataset import replaces all of the existing data with the newly imported data. + // An INCREMENTAL import appends the imported data to the existing data. + ImportMode types.ImportMode + // The optional metadata that you apply to the dataset import job to help you // categorize and organize them. Each tag consists of a key and an optional value, // both of which you define. The following basic restrictions apply to tags: @@ -114,12 +119,12 @@ type CreateDatasetImportJobInput struct { // keys and values are case sensitive. // // * Do not use aws:, AWS:, or any upper or - // lowercase combination of such as a prefix for keys as it is reserved for AWS - // use. You cannot edit or delete tag keys with this prefix. Values can have this - // prefix. If a tag value has aws as its prefix but the key does not, then Forecast - // considers it to be a user tag and will count against the limit of 50 tags. Tags - // with only the key prefix of aws do not count against your tags per resource - // limit. + // lowercase combination of such as a prefix for keys as it is reserved for Amazon + // Web Services use. You cannot edit or delete tag keys with this prefix. Values + // can have this prefix. If a tag value has aws as its prefix but the key does not, + // then Forecast considers it to be a user tag and will count against the limit of + // 50 tags. Tags with only the key prefix of aws do not count against your tags per + // resource limit. Tags []types.Tag // A single time zone for every item in your dataset. This option is ideal for diff --git a/service/forecast/api_op_CreateExplainability.go b/service/forecast/api_op_CreateExplainability.go index d9b8dc70e41..ea6a8be9024 100644 --- a/service/forecast/api_op_CreateExplainability.go +++ b/service/forecast/api_op_CreateExplainability.go @@ -119,12 +119,13 @@ type CreateExplainabilityInput struct { // This member is required. ResourceArn *string - // The source of your data, an AWS Identity and Access Management (IAM) role that - // allows Amazon Forecast to access the data and, optionally, an AWS Key Management + // The source of your data, an Identity and Access Management (IAM) role that + // allows Amazon Forecast to access the data and, optionally, an Key Management // Service (KMS) key. DataSource *types.DataSource - // Create an Explainability visualization that is viewable within the AWS console. + // Create an Explainability visualization that is viewable within the Amazon Web + // Services console. EnableVisualization *bool // If TimePointGranularity is set to SPECIFIC, define the last time point for the diff --git a/service/forecast/api_op_CreateExplainabilityExport.go b/service/forecast/api_op_CreateExplainabilityExport.go index 80b47c9d52d..e496a96a0bf 100644 --- a/service/forecast/api_op_CreateExplainabilityExport.go +++ b/service/forecast/api_op_CreateExplainabilityExport.go @@ -14,10 +14,10 @@ import ( // Exports an Explainability resource created by the CreateExplainability // operation. Exported files are exported to an Amazon Simple Storage Service // (Amazon S3) bucket. You must specify a DataDestination object that includes an -// Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that -// Amazon Forecast can assume to access the Amazon S3 bucket. For more information, -// see aws-forecast-iam-roles. The Status of the export job must be ACTIVE before -// you can access the export in your Amazon S3 bucket. To get the status, use the +// Amazon S3 bucket and an Identity and Access Management (IAM) role that Amazon +// Forecast can assume to access the Amazon S3 bucket. For more information, see +// aws-forecast-iam-roles. The Status of the export job must be ACTIVE before you +// can access the export in your Amazon S3 bucket. To get the status, use the // DescribeExplainabilityExport operation. func (c *Client) CreateExplainabilityExport(ctx context.Context, params *CreateExplainabilityExportInput, optFns ...func(*Options)) (*CreateExplainabilityExportOutput, error) { if params == nil { @@ -36,9 +36,9 @@ func (c *Client) CreateExplainabilityExport(ctx context.Context, params *CreateE type CreateExplainabilityExportInput struct { - // The destination for an export job. Provide an S3 path, an AWS Identity and - // Access Management (IAM) role that allows Amazon Forecast to access the location, - // and an AWS Key Management Service (KMS) key (optional). + // The destination for an export job. Provide an S3 path, an Identity and Access + // Management (IAM) role that allows Amazon Forecast to access the location, and an + // Key Management Service (KMS) key (optional). // // This member is required. Destination *types.DataDestination diff --git a/service/forecast/api_op_CreateForecast.go b/service/forecast/api_op_CreateForecast.go index 40cd6b3b276..52293129166 100644 --- a/service/forecast/api_op_CreateForecast.go +++ b/service/forecast/api_op_CreateForecast.go @@ -90,11 +90,11 @@ type CreateForecastInput struct { // case sensitive. // // * Do not use aws:, AWS:, or any upper or lowercase combination - // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or - // delete tag keys with this prefix. Values can have this prefix. If a tag value - // has aws as its prefix but the key does not, then Forecast considers it to be a - // user tag and will count against the limit of 50 tags. Tags with only the key - // prefix of aws do not count against your tags per resource limit. + // of such as a prefix for keys as it is reserved for Amazon Web Services use. You + // cannot edit or delete tag keys with this prefix. Values can have this prefix. If + // a tag value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags with only + // the key prefix of aws do not count against your tags per resource limit. Tags []types.Tag // Defines the set of time series that are used to create the forecasts in a diff --git a/service/forecast/api_op_CreateForecastExportJob.go b/service/forecast/api_op_CreateForecastExportJob.go index 1db5a2f0311..7e0c1927154 100644 --- a/service/forecast/api_op_CreateForecastExportJob.go +++ b/service/forecast/api_op_CreateForecastExportJob.go @@ -15,13 +15,13 @@ import ( // Storage Service (Amazon S3) bucket. The forecast file name will match the // following conventions: __ where the component is in Java SimpleDateFormat // (yyyy-MM-ddTHH-mm-ssZ). You must specify a DataDestination object that includes -// an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume -// to access the Amazon S3 bucket. For more information, see -// aws-forecast-iam-roles. For more information, see howitworks-forecast. To get a -// list of all your forecast export jobs, use the ListForecastExportJobs operation. -// The Status of the forecast export job must be ACTIVE before you can access the -// forecast in your Amazon S3 bucket. To get the status, use the -// DescribeForecastExportJob operation. +// an Identity and Access Management (IAM) role that Amazon Forecast can assume to +// access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles. +// For more information, see howitworks-forecast. To get a list of all your +// forecast export jobs, use the ListForecastExportJobs operation. The Status of +// the forecast export job must be ACTIVE before you can access the forecast in +// your Amazon S3 bucket. To get the status, use the DescribeForecastExportJob +// operation. func (c *Client) CreateForecastExportJob(ctx context.Context, params *CreateForecastExportJobInput, optFns ...func(*Options)) (*CreateForecastExportJobOutput, error) { if params == nil { params = &CreateForecastExportJobInput{} @@ -39,11 +39,11 @@ func (c *Client) CreateForecastExportJob(ctx context.Context, params *CreateFore type CreateForecastExportJobInput struct { - // The location where you want to save the forecast and an AWS Identity and Access + // The location where you want to save the forecast and an Identity and Access // Management (IAM) role that Amazon Forecast can assume to access the location. // The forecast must be exported to an Amazon S3 bucket. If encryption is used, - // Destination must include an AWS Key Management Service (KMS) key. The IAM role - // must allow Amazon Forecast permission to access the key. + // Destination must include an Key Management Service (KMS) key. The IAM role must + // allow Amazon Forecast permission to access the key. // // This member is required. Destination *types.DataDestination @@ -86,12 +86,12 @@ type CreateForecastExportJobInput struct { // keys and values are case sensitive. // // * Do not use aws:, AWS:, or any upper or - // lowercase combination of such as a prefix for keys as it is reserved for AWS - // use. You cannot edit or delete tag keys with this prefix. Values can have this - // prefix. If a tag value has aws as its prefix but the key does not, then Forecast - // considers it to be a user tag and will count against the limit of 50 tags. Tags - // with only the key prefix of aws do not count against your tags per resource - // limit. + // lowercase combination of such as a prefix for keys as it is reserved for Amazon + // Web Services use. You cannot edit or delete tag keys with this prefix. Values + // can have this prefix. If a tag value has aws as its prefix but the key does not, + // then Forecast considers it to be a user tag and will count against the limit of + // 50 tags. Tags with only the key prefix of aws do not count against your tags per + // resource limit. Tags []types.Tag noSmithyDocumentSerde diff --git a/service/forecast/api_op_CreatePredictor.go b/service/forecast/api_op_CreatePredictor.go index dd7b3ea4a92..a320af9aeae 100644 --- a/service/forecast/api_op_CreatePredictor.go +++ b/service/forecast/api_op_CreatePredictor.go @@ -113,15 +113,15 @@ type CreatePredictorInput struct { AlgorithmArn *string // The LatencyOptimized AutoML override strategy is only available in private beta. - // Contact AWS Support or your account manager to learn more about access - // privileges. Used to overide the default AutoML strategy, which is to optimize - // predictor accuracy. To apply an AutoML strategy that minimizes training time, - // use LatencyOptimized. This parameter is only valid for predictors trained using - // AutoML. + // Contact Amazon Web Services Support or your account manager to learn more about + // access privileges. Used to overide the default AutoML strategy, which is to + // optimize predictor accuracy. To apply an AutoML strategy that minimizes training + // time, use LatencyOptimized. This parameter is only valid for predictors trained + // using AutoML. AutoMLOverrideStrategy types.AutoMLOverrideStrategy - // An AWS Key Management Service (KMS) key and the AWS Identity and Access - // Management (IAM) role that Amazon Forecast can assume to access the key. + // An Key Management Service (KMS) key and the Identity and Access Management (IAM) + // role that Amazon Forecast can assume to access the key. EncryptionConfig *types.EncryptionConfig // Used to override the default evaluation parameters of the specified algorithm. @@ -196,11 +196,11 @@ type CreatePredictorInput struct { // case sensitive. // // * Do not use aws:, AWS:, or any upper or lowercase combination - // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or - // delete tag keys with this prefix. Values can have this prefix. If a tag value - // has aws as its prefix but the key does not, then Forecast considers it to be a - // user tag and will count against the limit of 50 tags. Tags with only the key - // prefix of aws do not count against your tags per resource limit. + // of such as a prefix for keys as it is reserved for Amazon Web Services use. You + // cannot edit or delete tag keys with this prefix. Values can have this prefix. If + // a tag value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags with only + // the key prefix of aws do not count against your tags per resource limit. Tags []types.Tag // The hyperparameters to override for model training. The hyperparameters that you diff --git a/service/forecast/api_op_CreatePredictorBacktestExportJob.go b/service/forecast/api_op_CreatePredictorBacktestExportJob.go index 258d7f692f8..26e5e3db594 100644 --- a/service/forecast/api_op_CreatePredictorBacktestExportJob.go +++ b/service/forecast/api_op_CreatePredictorBacktestExportJob.go @@ -16,8 +16,8 @@ import ( // Parquet files are exported to your specified S3 bucket. The export file names // will match the following conventions: __.csv The component is in Java SimpleDate // format (yyyy-MM-ddTHH-mm-ssZ). You must specify a DataDestination object that -// includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) -// role that Amazon Forecast can assume to access the Amazon S3 bucket. For more +// includes an Amazon S3 bucket and an Identity and Access Management (IAM) role +// that Amazon Forecast can assume to access the Amazon S3 bucket. For more // information, see aws-forecast-iam-roles. The Status of the export job must be // ACTIVE before you can access the export in your Amazon S3 bucket. To get the // status, use the DescribePredictorBacktestExportJob operation. @@ -38,9 +38,9 @@ func (c *Client) CreatePredictorBacktestExportJob(ctx context.Context, params *C type CreatePredictorBacktestExportJobInput struct { - // The destination for an export job. Provide an S3 path, an AWS Identity and - // Access Management (IAM) role that allows Amazon Forecast to access the location, - // and an AWS Key Management Service (KMS) key (optional). + // The destination for an export job. Provide an S3 path, an Identity and Access + // Management (IAM) role that allows Amazon Forecast to access the location, and an + // Key Management Service (KMS) key (optional). // // This member is required. Destination *types.DataDestination diff --git a/service/forecast/api_op_CreateWhatIfAnalysis.go b/service/forecast/api_op_CreateWhatIfAnalysis.go index 20af2798d91..5ca8b4d2b36 100644 --- a/service/forecast/api_op_CreateWhatIfAnalysis.go +++ b/service/forecast/api_op_CreateWhatIfAnalysis.go @@ -19,10 +19,10 @@ import ( // you are a clothing retailer who is considering an end of season sale to clear // space for new styles. After creating a baseline forecast, you can use a what-if // analysis to investigate how different sales tactics might affect your goals. You -// could create a scenario where everything is given a 25% markdown and another -// where everything is given a fixed dollar markdown. You can create a scenario -// where the sale lasts for 1 week and another where the sale lasts for 1 month. -// Your what-if analysis enables you to compare many different scenarios against +// could create a scenario where everything is given a 25% markdown, and another +// where everything is given a fixed dollar markdown. You could create a scenario +// where the sale lasts for one week and another where the sale lasts for one +// month. With a what-if analysis, you can compare many different scenarios against // each other. Note that a what-if analysis is meant to display what the // forecasting model has learned and how it will behave in the scenarios that you // are evaluating. Do not blindly use the results of the what-if analysis to make diff --git a/service/forecast/api_op_CreateWhatIfForecastExport.go b/service/forecast/api_op_CreateWhatIfForecastExport.go index a5f764aa4ed..7a529930d45 100644 --- a/service/forecast/api_op_CreateWhatIfForecastExport.go +++ b/service/forecast/api_op_CreateWhatIfForecastExport.go @@ -15,13 +15,13 @@ import ( // Simple Storage Service (Amazon S3) bucket. The forecast file name will match the // following conventions: ≈__ The component is in Java SimpleDateFormat // (yyyy-MM-ddTHH-mm-ssZ). You must specify a DataDestination object that includes -// an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume -// to access the Amazon S3 bucket. For more information, see -// aws-forecast-iam-roles. For more information, see howitworks-forecast. To get a -// list of all your what-if forecast export jobs, use the ListWhatIfForecastExports -// operation. The Status of the forecast export job must be ACTIVE before you can -// access the forecast in your Amazon S3 bucket. To get the status, use the -// DescribeWhatIfForecastExport operation. +// an Identity and Access Management (IAM) role that Amazon Forecast can assume to +// access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles. +// For more information, see howitworks-forecast. To get a list of all your what-if +// forecast export jobs, use the ListWhatIfForecastExports operation. The Status of +// the forecast export job must be ACTIVE before you can access the forecast in +// your Amazon S3 bucket. To get the status, use the DescribeWhatIfForecastExport +// operation. func (c *Client) CreateWhatIfForecastExport(ctx context.Context, params *CreateWhatIfForecastExportInput, optFns ...func(*Options)) (*CreateWhatIfForecastExportOutput, error) { if params == nil { params = &CreateWhatIfForecastExportInput{} @@ -39,11 +39,11 @@ func (c *Client) CreateWhatIfForecastExport(ctx context.Context, params *CreateW type CreateWhatIfForecastExportInput struct { - // The location where you want to save the forecast and an AWS Identity and Access + // The location where you want to save the forecast and an Identity and Access // Management (IAM) role that Amazon Forecast can assume to access the location. // The forecast must be exported to an Amazon S3 bucket. If encryption is used, - // Destination must include an AWS Key Management Service (KMS) key. The IAM role - // must allow Amazon Forecast permission to access the key. + // Destination must include an Key Management Service (KMS) key. The IAM role must + // allow Amazon Forecast permission to access the key. // // This member is required. Destination *types.DataDestination diff --git a/service/forecast/api_op_DescribeAutoPredictor.go b/service/forecast/api_op_DescribeAutoPredictor.go index d838cc6ca4a..8f7bbb5f7fc 100644 --- a/service/forecast/api_op_DescribeAutoPredictor.go +++ b/service/forecast/api_op_DescribeAutoPredictor.go @@ -50,9 +50,9 @@ type DescribeAutoPredictorOutput struct { // the predictor. DatasetImportJobArns []string - // An AWS Key Management Service (KMS) key and an AWS Identity and Access - // Management (IAM) role that Amazon Forecast can assume to access the key. You can - // specify this optional object in the CreateDataset and CreatePredictor requests. + // An Key Management Service (KMS) key and an Identity and Access Management (IAM) + // role that Amazon Forecast can assume to access the key. You can specify this + // optional object in the CreateDataset and CreatePredictor requests. EncryptionConfig *types.EncryptionConfig // The estimated time remaining in minutes for the predictor training job to diff --git a/service/forecast/api_op_DescribeDataset.go b/service/forecast/api_op_DescribeDataset.go index d819caa54c4..7fb11f14c52 100644 --- a/service/forecast/api_op_DescribeDataset.go +++ b/service/forecast/api_op_DescribeDataset.go @@ -71,8 +71,8 @@ type DescribeDatasetOutput struct { // The domain associated with the dataset. Domain types.Domain - // The AWS Key Management Service (KMS) key and the AWS Identity and Access - // Management (IAM) role that Amazon Forecast can assume to access the key. + // The Key Management Service (KMS) key and the Identity and Access Management + // (IAM) role that Amazon Forecast can assume to access the key. EncryptionConfig *types.EncryptionConfig // When you create a dataset, LastModificationTime is the same as CreationTime. diff --git a/service/forecast/api_op_DescribeDatasetImportJob.go b/service/forecast/api_op_DescribeDatasetImportJob.go index d60f9d4d14a..ff172c520c1 100644 --- a/service/forecast/api_op_DescribeDatasetImportJob.go +++ b/service/forecast/api_op_DescribeDatasetImportJob.go @@ -64,9 +64,9 @@ type DescribeDatasetImportJobOutput struct { // The size of the dataset in gigabytes (GB) after the import job has finished. DataSize *float64 - // The location of the training data to import and an AWS Identity and Access + // The location of the training data to import and an Identity and Access // Management (IAM) role that Amazon Forecast can assume to access the data. If - // encryption is used, DataSource includes an AWS Key Management Service (KMS) key. + // encryption is used, DataSource includes an Key Management Service (KMS) key. DataSource *types.DataSource // The Amazon Resource Name (ARN) of the dataset that the training data was @@ -92,6 +92,9 @@ type DescribeDatasetImportJobOutput struct { // "CC_POSTALCODE". GeolocationFormat *string + // The import mode of the dataset import job, FULL or INCREMENTAL. + ImportMode types.ImportMode + // The last time the resource was modified. The timestamp depends on the status of // the job: // diff --git a/service/forecast/api_op_DescribeExplainability.go b/service/forecast/api_op_DescribeExplainability.go index bec50ae4f51..e8ec873e810 100644 --- a/service/forecast/api_op_DescribeExplainability.go +++ b/service/forecast/api_op_DescribeExplainability.go @@ -44,8 +44,8 @@ type DescribeExplainabilityOutput struct { // When the Explainability resource was created. CreationTime *time.Time - // The source of your data, an AWS Identity and Access Management (IAM) role that - // allows Amazon Forecast to access the data and, optionally, an AWS Key Management + // The source of your data, an Identity and Access Management (IAM) role that + // allows Amazon Forecast to access the data and, optionally, an Key Management // Service (KMS) key. DataSource *types.DataSource diff --git a/service/forecast/api_op_DescribeExplainabilityExport.go b/service/forecast/api_op_DescribeExplainabilityExport.go index 0170989829b..f05efccaa27 100644 --- a/service/forecast/api_op_DescribeExplainabilityExport.go +++ b/service/forecast/api_op_DescribeExplainabilityExport.go @@ -44,9 +44,9 @@ type DescribeExplainabilityExportOutput struct { // When the Explainability export was created. CreationTime *time.Time - // The destination for an export job. Provide an S3 path, an AWS Identity and - // Access Management (IAM) role that allows Amazon Forecast to access the location, - // and an AWS Key Management Service (KMS) key (optional). + // The destination for an export job. Provide an S3 path, an Identity and Access + // Management (IAM) role that allows Amazon Forecast to access the location, and an + // Key Management Service (KMS) key (optional). Destination *types.DataDestination // The Amazon Resource Name (ARN) of the Explainability export. diff --git a/service/forecast/api_op_DescribePredictor.go b/service/forecast/api_op_DescribePredictor.go index 0089424a43c..c82f19bd33d 100644 --- a/service/forecast/api_op_DescribePredictor.go +++ b/service/forecast/api_op_DescribePredictor.go @@ -66,8 +66,8 @@ type DescribePredictorOutput struct { AutoMLAlgorithmArns []string // The LatencyOptimized AutoML override strategy is only available in private beta. - // Contact AWS Support or your account manager to learn more about access - // privileges. The AutoML strategy used to train the predictor. Unless + // Contact Amazon Web Services Support or your account manager to learn more about + // access privileges. The AutoML strategy used to train the predictor. Unless // LatencyOptimized is specified, the AutoML strategy optimizes predictor accuracy. // This parameter is only valid for predictors trained using AutoML. AutoMLOverrideStrategy types.AutoMLOverrideStrategy @@ -79,8 +79,8 @@ type DescribePredictorOutput struct { // the predictor. DatasetImportJobArns []string - // An AWS Key Management Service (KMS) key and the AWS Identity and Access - // Management (IAM) role that Amazon Forecast can assume to access the key. + // An Key Management Service (KMS) key and the Identity and Access Management (IAM) + // role that Amazon Forecast can assume to access the key. EncryptionConfig *types.EncryptionConfig // The estimated time remaining in minutes for the predictor training job to diff --git a/service/forecast/api_op_DescribePredictorBacktestExportJob.go b/service/forecast/api_op_DescribePredictorBacktestExportJob.go index d05b7a7046b..7b02c1e9562 100644 --- a/service/forecast/api_op_DescribePredictorBacktestExportJob.go +++ b/service/forecast/api_op_DescribePredictorBacktestExportJob.go @@ -55,9 +55,9 @@ type DescribePredictorBacktestExportJobOutput struct { // When the predictor backtest export job was created. CreationTime *time.Time - // The destination for an export job. Provide an S3 path, an AWS Identity and - // Access Management (IAM) role that allows Amazon Forecast to access the location, - // and an AWS Key Management Service (KMS) key (optional). + // The destination for an export job. Provide an S3 path, an Identity and Access + // Management (IAM) role that allows Amazon Forecast to access the location, and an + // Key Management Service (KMS) key (optional). Destination *types.DataDestination // The format of the exported data, CSV or PARQUET. diff --git a/service/forecast/api_op_DescribeWhatIfForecast.go b/service/forecast/api_op_DescribeWhatIfForecast.go index e1dac70989c..04e2fdc84c0 100644 --- a/service/forecast/api_op_DescribeWhatIfForecast.go +++ b/service/forecast/api_op_DescribeWhatIfForecast.go @@ -60,7 +60,7 @@ type DescribeWhatIfForecastOutput struct { EstimatedTimeRemainingInMinutes *int64 // The quantiles at which probabilistic forecasts are generated. You can specify up - // to 5 quantiles per what-if forecast in the CreateWhatIfForecast operation. If + // to five quantiles per what-if forecast in the CreateWhatIfForecast operation. If // you didn't specify quantiles, the default values are ["0.1", "0.5", "0.9"]. ForecastTypes []string diff --git a/service/forecast/api_op_DescribeWhatIfForecastExport.go b/service/forecast/api_op_DescribeWhatIfForecastExport.go index 27cb80e7642..cb0abfc9cfe 100644 --- a/service/forecast/api_op_DescribeWhatIfForecastExport.go +++ b/service/forecast/api_op_DescribeWhatIfForecastExport.go @@ -56,9 +56,9 @@ type DescribeWhatIfForecastExportOutput struct { // When the what-if forecast export was created. CreationTime *time.Time - // The destination for an export job. Provide an S3 path, an AWS Identity and - // Access Management (IAM) role that allows Amazon Forecast to access the location, - // and an AWS Key Management Service (KMS) key (optional). + // The destination for an export job. Provide an S3 path, an Identity and Access + // Management (IAM) role that allows Amazon Forecast to access the location, and an + // Key Management Service (KMS) key (optional). Destination *types.DataDestination // The approximate time remaining to complete the what-if forecast export, in diff --git a/service/forecast/api_op_GetAccuracyMetrics.go b/service/forecast/api_op_GetAccuracyMetrics.go index c905d627eba..ee803d5c5c1 100644 --- a/service/forecast/api_op_GetAccuracyMetrics.go +++ b/service/forecast/api_op_GetAccuracyMetrics.go @@ -54,8 +54,8 @@ type GetAccuracyMetricsInput struct { type GetAccuracyMetricsOutput struct { // The LatencyOptimized AutoML override strategy is only available in private beta. - // Contact AWS Support or your account manager to learn more about access - // privileges. The AutoML strategy used to train the predictor. Unless + // Contact Amazon Web Services Support or your account manager to learn more about + // access privileges. The AutoML strategy used to train the predictor. Unless // LatencyOptimized is specified, the AutoML strategy optimizes predictor accuracy. // This parameter is only valid for predictors trained using AutoML. AutoMLOverrideStrategy types.AutoMLOverrideStrategy diff --git a/service/forecast/api_op_TagResource.go b/service/forecast/api_op_TagResource.go index c9a3e44d833..1c78f129863 100644 --- a/service/forecast/api_op_TagResource.go +++ b/service/forecast/api_op_TagResource.go @@ -62,11 +62,11 @@ type TagResourceInput struct { // sensitive. // // * Do not use aws:, AWS:, or any upper or lowercase combination of - // such as a prefix for keys as it is reserved for AWS use. You cannot edit or - // delete tag keys with this prefix. Values can have this prefix. If a tag value - // has aws as its prefix but the key does not, then Forecast considers it to be a - // user tag and will count against the limit of 50 tags. Tags with only the key - // prefix of aws do not count against your tags per resource limit. + // such as a prefix for keys as it is reserved for Amazon Web Services use. You + // cannot edit or delete tag keys with this prefix. Values can have this prefix. If + // a tag value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags with only + // the key prefix of aws do not count against your tags per resource limit. // // This member is required. Tags []types.Tag diff --git a/service/forecast/deserializers.go b/service/forecast/deserializers.go index ed02b2c1d59..b99fa7ad3f4 100644 --- a/service/forecast/deserializers.go +++ b/service/forecast/deserializers.go @@ -8219,6 +8219,15 @@ func awsAwsjson11_deserializeDocumentDatasetImportJobSummary(v **types.DatasetIm return err } + case "ImportMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImportMode to be of type string, got %T instead", value) + } + sv.ImportMode = types.ImportMode(jtv) + } + case "LastModificationTime": if value != nil { switch jtv := value.(type) { @@ -14440,6 +14449,15 @@ func awsAwsjson11_deserializeOpDocumentDescribeDatasetImportJobOutput(v **Descri sv.GeolocationFormat = ptr.String(jtv) } + case "ImportMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImportMode to be of type string, got %T instead", value) + } + sv.ImportMode = types.ImportMode(jtv) + } + case "LastModificationTime": if value != nil { switch jtv := value.(type) { diff --git a/service/forecast/serializers.go b/service/forecast/serializers.go index 5711822ca06..57ff3abcc61 100644 --- a/service/forecast/serializers.go +++ b/service/forecast/serializers.go @@ -4576,6 +4576,11 @@ func awsAwsjson11_serializeOpDocumentCreateDatasetImportJobInput(v *CreateDatase ok.String(*v.GeolocationFormat) } + if len(v.ImportMode) > 0 { + ok := object.Key("ImportMode") + ok.String(string(v.ImportMode)) + } + if v.Tags != nil { ok := object.Key("Tags") if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { diff --git a/service/forecast/types/enums.go b/service/forecast/types/enums.go index d68861adace..527e53fa462 100644 --- a/service/forecast/types/enums.go +++ b/service/forecast/types/enums.go @@ -194,6 +194,24 @@ func (FilterConditionString) Values() []FilterConditionString { } } +type ImportMode string + +// Enum values for ImportMode +const ( + ImportModeFull ImportMode = "FULL" + ImportModeIncremental ImportMode = "INCREMENTAL" +) + +// Values returns all known values for ImportMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (ImportMode) Values() []ImportMode { + return []ImportMode{ + "FULL", + "INCREMENTAL", + } +} + type Month string // Enum values for Month diff --git a/service/forecast/types/types.go b/service/forecast/types/types.go index 16505267595..c3efe412556 100644 --- a/service/forecast/types/types.go +++ b/service/forecast/types/types.go @@ -378,9 +378,9 @@ type DataConfig struct { noSmithyDocumentSerde } -// The destination for an export job. Provide an S3 path, an AWS Identity and -// Access Management (IAM) role that allows Amazon Forecast to access the location, -// and an AWS Key Management Service (KMS) key (optional). +// The destination for an export job. Provide an S3 path, an Identity and Access +// Management (IAM) role that allows Amazon Forecast to access the location, and an +// Key Management Service (KMS) key (optional). type DataDestination struct { // The path to an Amazon Simple Storage Service (Amazon S3) bucket along with the @@ -430,10 +430,10 @@ type DatasetImportJobSummary struct { // When the dataset import job was created. CreationTime *time.Time - // The location of the training data to import and an AWS Identity and Access + // The location of the training data to import and an Identity and Access // Management (IAM) role that Amazon Forecast can assume to access the data. The // training data must be stored in an Amazon S3 bucket. If encryption is used, - // DataSource includes an AWS Key Management Service (KMS) key. + // DataSource includes an Key Management Service (KMS) key. DataSource *DataSource // The Amazon Resource Name (ARN) of the dataset import job. @@ -442,6 +442,9 @@ type DatasetImportJobSummary struct { // The name of the dataset import job. DatasetImportJobName *string + // The import mode of the dataset import job, FULL or INCREMENTAL. + ImportMode ImportMode + // The last time the resource was modified. The timestamp depends on the status of // the job: // @@ -511,8 +514,8 @@ type DatasetSummary struct { noSmithyDocumentSerde } -// The source of your data, an AWS Identity and Access Management (IAM) role that -// allows Amazon Forecast to access the data and, optionally, an AWS Key Management +// The source of your data, an Identity and Access Management (IAM) role that +// allows Amazon Forecast to access the data and, optionally, an Key Management // Service (KMS) key. type DataSource struct { @@ -525,9 +528,9 @@ type DataSource struct { noSmithyDocumentSerde } -// An AWS Key Management Service (KMS) key and an AWS Identity and Access -// Management (IAM) role that Amazon Forecast can assume to access the key. You can -// specify this optional object in the CreateDataset and CreatePredictor requests. +// An Key Management Service (KMS) key and an Identity and Access Management (IAM) +// role that Amazon Forecast can assume to access the key. You can specify this +// optional object in the CreateDataset and CreatePredictor requests. type EncryptionConfig struct { // The Amazon Resource Name (ARN) of the KMS key. @@ -535,9 +538,9 @@ type EncryptionConfig struct { // This member is required. KMSKeyArn *string - // The ARN of the IAM role that Amazon Forecast can assume to access the AWS KMS - // key. Passing a role across AWS accounts is not allowed. If you pass a role that - // isn't in your account, you get an InvalidInputException error. + // The ARN of the IAM role that Amazon Forecast can assume to access the KMS key. + // Passing a role across Amazon Web Services accounts is not allowed. If you pass a + // role that isn't in your account, you get an InvalidInputException error. // // This member is required. RoleArn *string @@ -641,9 +644,9 @@ type ExplainabilityExportSummary struct { // When the Explainability was created. CreationTime *time.Time - // The destination for an export job. Provide an S3 path, an AWS Identity and - // Access Management (IAM) role that allows Amazon Forecast to access the location, - // and an AWS Key Management Service (KMS) key (optional). + // The destination for an export job. Provide an S3 path, an Identity and Access + // Management (IAM) role that allows Amazon Forecast to access the location, and an + // Key Management Service (KMS) key (optional). Destination *DataDestination // The Amazon Resource Name (ARN) of the Explainability export. @@ -818,13 +821,32 @@ type Featurization struct { // operation twice by specifying different featurization configurations. type FeaturizationConfig struct { - // The frequency of predictions in a forecast. Valid intervals are Y (Year), M - // (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), - // 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, "Y" - // indicates every year and "5min" indicates every five minutes. The frequency must - // be greater than or equal to the TARGET_TIME_SERIES dataset frequency. When a + // The frequency of predictions in a forecast. Valid intervals are an integer + // followed by Y (Year), M (Month), W (Week), D (Day), H (Hour), and min (Minute). + // For example, "1D" indicates every day and "15min" indicates every 15 minutes. + // You cannot specify a value that would overlap with the next larger frequency. + // That means, for example, you cannot specify a frequency of 60 minutes, because + // that is equivalent to 1 hour. The valid values for each frequency are the + // following: + // + // * Minute - 1-59 + // + // * Hour - 1-23 + // + // * Day - 1-6 + // + // * Week - 1-4 + // + // * Month - + // 1-11 + // + // * Year - 1 + // + // Thus, if you want every other week forecasts, specify "2W". + // Or, if you want quarterly forecasts, you specify "3M". The frequency must be + // greater than or equal to the TARGET_TIME_SERIES dataset frequency. When a // RELATED_TIME_SERIES dataset is provided, the frequency must be equal to the - // RELATED_TIME_SERIES dataset frequency. + // TARGET_TIME_SERIES dataset frequency. // // This member is required. ForecastFrequency *string @@ -1291,9 +1313,9 @@ type PredictorBacktestExportJobSummary struct { // When the predictor backtest export job was created. CreationTime *time.Time - // The destination for an export job. Provide an S3 path, an AWS Identity and - // Access Management (IAM) role that allows Amazon Forecast to access the location, - // and an AWS Key Management Service (KMS) key (optional). + // The destination for an export job. Provide an S3 path, an Identity and Access + // Management (IAM) role that allows Amazon Forecast to access the location, and an + // Key Management Service (KMS) key (optional). Destination *DataDestination // The last time the resource was modified. The timestamp depends on the status of @@ -1516,9 +1538,9 @@ type ReferencePredictorSummary struct { } // The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, -// and an AWS Identity and Access Management (IAM) role that Amazon Forecast can -// assume to access the file(s). Optionally, includes an AWS Key Management Service -// (KMS) key. This object is part of the DataSource object that is submitted in the +// and an Identity and Access Management (IAM) role that Amazon Forecast can assume +// to access the file(s). Optionally, includes an Key Management Service (KMS) key. +// This object is part of the DataSource object that is submitted in the // CreateDatasetImportJob request, and part of the DataDestination object. type S3Config struct { @@ -1528,16 +1550,16 @@ type S3Config struct { // This member is required. Path *string - // The ARN of the AWS Identity and Access Management (IAM) role that Amazon - // Forecast can assume to access the Amazon S3 bucket or files. If you provide a - // value for the KMSKeyArn key, the role must allow access to the key. Passing a - // role across AWS accounts is not allowed. If you pass a role that isn't in your - // account, you get an InvalidInputException error. + // The ARN of the Identity and Access Management (IAM) role that Amazon Forecast + // can assume to access the Amazon S3 bucket or files. If you provide a value for + // the KMSKeyArn key, the role must allow access to the key. Passing a role across + // Amazon Web Services accounts is not allowed. If you pass a role that isn't in + // your account, you get an InvalidInputException error. // // This member is required. RoleArn *string - // The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key. + // The Amazon Resource Name (ARN) of an Key Management Service (KMS) key. KMSKeyArn *string noSmithyDocumentSerde @@ -1828,11 +1850,11 @@ type SupplementaryFeature struct { // case sensitive. // // * Do not use aws:, AWS:, or any upper or lowercase combination -// of such as a prefix for keys as it is reserved for AWS use. You cannot edit or -// delete tag keys with this prefix. Values can have this prefix. If a tag value -// has aws as its prefix but the key does not, then Forecast considers it to be a -// user tag and will count against the limit of 50 tags. Tags with only the key -// prefix of aws do not count against your tags per resource limit. +// of such as a prefix for keys as it is reserved for Amazon Web Services use. You +// cannot edit or delete tag keys with this prefix. Values can have this prefix. If +// a tag value has aws as its prefix but the key does not, then Forecast considers +// it to be a user tag and will count against the limit of 50 tags. Tags with only +// the key prefix of aws do not count against your tags per resource limit. type Tag struct { // One part of a key-value pair that makes up a tag. A key is a general label that @@ -1936,8 +1958,8 @@ type TimeSeriesCondition struct { // to create forecasts. type TimeSeriesIdentifiers struct { - // The source of your data, an AWS Identity and Access Management (IAM) role that - // allows Amazon Forecast to access the data and, optionally, an AWS Key Management + // The source of your data, an Identity and Access Management (IAM) role that + // allows Amazon Forecast to access the data and, optionally, an Key Management // Service (KMS) key. DataSource *DataSource @@ -1959,9 +1981,9 @@ type TimeSeriesIdentifiers struct { type TimeSeriesReplacementsDataSource struct { // The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, - // and an AWS Identity and Access Management (IAM) role that Amazon Forecast can - // assume to access the file(s). Optionally, includes an AWS Key Management Service - // (KMS) key. This object is part of the DataSource object that is submitted in the + // and an Identity and Access Management (IAM) role that Amazon Forecast can assume + // to access the file(s). Optionally, includes an Key Management Service (KMS) key. + // This object is part of the DataSource object that is submitted in the // CreateDatasetImportJob request, and part of the DataDestination object. // // This member is required. diff --git a/service/iam/api_op_CreateOpenIDConnectProvider.go b/service/iam/api_op_CreateOpenIDConnectProvider.go index 18008ad8aa2..ba109179bf0 100644 --- a/service/iam/api_op_CreateOpenIDConnectProvider.go +++ b/service/iam/api_op_CreateOpenIDConnectProvider.go @@ -39,12 +39,12 @@ import ( // Services. Amazon Web Services secures communication with some OIDC identity // providers (IdPs) through our library of trusted certificate authorities (CAs) // instead of using a certificate thumbprint to verify your IdP server certificate. -// These OIDC IdPs include Google, and those that use an Amazon S3 bucket to host a -// JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains -// in your configuration, but is no longer used for validation. The trust for the -// OIDC provider is derived from the IAM provider that this operation creates. -// Therefore, it is best to limit access to the CreateOpenIDConnectProvider -// operation to highly privileged users. +// These OIDC IdPs include Google, Auth0, and those that use an Amazon S3 bucket to +// host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint +// remains in your configuration, but is no longer used for validation. The trust +// for the OIDC provider is derived from the IAM provider that this operation +// creates. Therefore, it is best to limit access to the +// CreateOpenIDConnectProvider operation to highly privileged users. func (c *Client) CreateOpenIDConnectProvider(ctx context.Context, params *CreateOpenIDConnectProviderInput, optFns ...func(*Options)) (*CreateOpenIDConnectProviderOutput, error) { if params == nil { params = &CreateOpenIDConnectProviderInput{} diff --git a/service/iam/api_op_CreateRole.go b/service/iam/api_op_CreateRole.go index 65224dbfffb..e4e22ba4577 100644 --- a/service/iam/api_op_CreateRole.go +++ b/service/iam/api_op_CreateRole.go @@ -95,7 +95,16 @@ type CreateRoleInput struct { // digits, and upper and lowercased letters. Path *string - // The ARN of the policy that is used to set the permissions boundary for the role. + // The ARN of the managed policy that is used to set the permissions boundary for + // the role. A permissions boundary policy defines the maximum permissions that + // identity-based policies can grant to an entity, but does not grant permissions. + // Permissions boundaries do not define the maximum permissions that a + // resource-based policy can grant to an entity. To learn more, see Permissions + // boundaries for IAM entities + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. For more information about policy types, see Policy types + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types) + // in the IAM User Guide. PermissionsBoundary *string // A list of tags that you want to attach to the new role. Each tag consists of a diff --git a/service/iam/api_op_CreateUser.go b/service/iam/api_op_CreateUser.go index 48bc3a60c43..69cd683b85c 100644 --- a/service/iam/api_op_CreateUser.go +++ b/service/iam/api_op_CreateUser.go @@ -51,7 +51,16 @@ type CreateUserInput struct { // digits, and upper and lowercased letters. Path *string - // The ARN of the policy that is used to set the permissions boundary for the user. + // The ARN of the managed policy that is used to set the permissions boundary for + // the user. A permissions boundary policy defines the maximum permissions that + // identity-based policies can grant to an entity, but does not grant permissions. + // Permissions boundaries do not define the maximum permissions that a + // resource-based policy can grant to an entity. To learn more, see Permissions + // boundaries for IAM entities + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. For more information about policy types, see Policy types + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types) + // in the IAM User Guide. PermissionsBoundary *string // A list of tags that you want to attach to the new user. Each tag consists of a diff --git a/service/iam/api_op_DeleteRole.go b/service/iam/api_op_DeleteRole.go index ebfcd098f9f..6282db633d7 100644 --- a/service/iam/api_op_DeleteRole.go +++ b/service/iam/api_op_DeleteRole.go @@ -10,12 +10,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the specified role. The role must not have any policies attached. For -// more information about roles, see Working with roles -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). Make -// sure that you do not have any Amazon EC2 instances running with the role you are -// about to delete. Deleting a role or instance profile that is associated with a -// running instance will break any applications running on the instance. +// Deletes the specified role. Unlike the Amazon Web Services Management Console, +// when you delete a role programmatically, you must delete the items attached to +// the role manually, or the deletion fails. For more information, see Deleting an +// IAM role +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_manage_delete.html#roles-managingrole-deleting-cli). +// Before attempting to delete a role, remove the following attached items: +// +// * +// Inline policies (DeleteRolePolicy) +// +// * Attached managed policies +// (DetachRolePolicy) +// +// * Instance profile (RemoveRoleFromInstanceProfile) +// +// * +// Optional – Delete instance profile after detaching from role for resource clean +// up (DeleteInstanceProfile) +// +// Make sure that you do not have any Amazon EC2 +// instances running with the role you are about to delete. Deleting a role or +// instance profile that is associated with a running instance will break any +// applications running on the instance. func (c *Client) DeleteRole(ctx context.Context, params *DeleteRoleInput, optFns ...func(*Options)) (*DeleteRoleOutput, error) { if params == nil { params = &DeleteRoleInput{} diff --git a/service/iam/api_op_PutRolePermissionsBoundary.go b/service/iam/api_op_PutRolePermissionsBoundary.go index e4df4cc017f..e099376dde5 100644 --- a/service/iam/api_op_PutRolePermissionsBoundary.go +++ b/service/iam/api_op_PutRolePermissionsBoundary.go @@ -38,7 +38,16 @@ func (c *Client) PutRolePermissionsBoundary(ctx context.Context, params *PutRole type PutRolePermissionsBoundaryInput struct { - // The ARN of the policy that is used to set the permissions boundary for the role. + // The ARN of the managed policy that is used to set the permissions boundary for + // the role. A permissions boundary policy defines the maximum permissions that + // identity-based policies can grant to an entity, but does not grant permissions. + // Permissions boundaries do not define the maximum permissions that a + // resource-based policy can grant to an entity. To learn more, see Permissions + // boundaries for IAM entities + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. For more information about policy types, see Policy types + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types) + // in the IAM User Guide. // // This member is required. PermissionsBoundary *string diff --git a/service/iam/api_op_PutUserPermissionsBoundary.go b/service/iam/api_op_PutUserPermissionsBoundary.go index 8091a820214..e5503b934bb 100644 --- a/service/iam/api_op_PutUserPermissionsBoundary.go +++ b/service/iam/api_op_PutUserPermissionsBoundary.go @@ -37,7 +37,16 @@ func (c *Client) PutUserPermissionsBoundary(ctx context.Context, params *PutUser type PutUserPermissionsBoundaryInput struct { - // The ARN of the policy that is used to set the permissions boundary for the user. + // The ARN of the managed policy that is used to set the permissions boundary for + // the user. A permissions boundary policy defines the maximum permissions that + // identity-based policies can grant to an entity, but does not grant permissions. + // Permissions boundaries do not define the maximum permissions that a + // resource-based policy can grant to an entity. To learn more, see Permissions + // boundaries for IAM entities + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. For more information about policy types, see Policy types + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types) + // in the IAM User Guide. // // This member is required. PermissionsBoundary *string diff --git a/service/iam/api_op_SimulateCustomPolicy.go b/service/iam/api_op_SimulateCustomPolicy.go index 77cfc88203f..57e71f8815c 100644 --- a/service/iam/api_op_SimulateCustomPolicy.go +++ b/service/iam/api_op_SimulateCustomPolicy.go @@ -25,6 +25,11 @@ import ( // policy to evaluate context keys. To get the list of context keys that the // policies require for correct simulation, use GetContextKeysForCustomPolicy. If // the output is long, you can use MaxItems and Marker parameters to paginate the +// results. The IAM policy simulator evaluates statements in the identity-based +// policy and the inputs that you provide during simulation. The policy simulator +// results can differ from your live Amazon Web Services environment. We recommend +// that you check your policies against your live Amazon Web Services environment +// after testing using the policy simulator to confirm that you have the desired // results. For more information about using the policy simulator, see Testing IAM // policies with the IAM policy simulator // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_testing-policies.html)in @@ -151,7 +156,8 @@ type SimulateCustomPolicyInput struct { // invalid input error. For more information about ARNs, see Amazon Resource Names // (ARNs) // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. + // the Amazon Web Services General Reference. Simulation of resource-based policies + // isn't supported for IAM roles. ResourceArns []string // Specifies the type of simulation to run. Different API operations that support @@ -216,6 +222,9 @@ type SimulateCustomPolicyInput struct { // // * The special characters // tab (\u0009), line feed (\u000A), and carriage return (\u000D) + // + // Simulation of + // resource-based policies isn't supported for IAM roles. ResourcePolicy *string noSmithyDocumentSerde diff --git a/service/iam/api_op_SimulatePrincipalPolicy.go b/service/iam/api_op_SimulatePrincipalPolicy.go index 7c05d8f022c..75a011c89e5 100644 --- a/service/iam/api_op_SimulatePrincipalPolicy.go +++ b/service/iam/api_op_SimulatePrincipalPolicy.go @@ -21,19 +21,25 @@ import ( // additional policies specified as strings to include in the simulation. If you // want to simulate only policies specified as strings, use SimulateCustomPolicy // instead. You can also optionally include one resource-based policy to be -// evaluated with each of the resources included in the simulation. The simulation -// does not perform the API operations; it only checks the authorization to -// determine if the simulated policies allow or deny the operations. Note: This -// operation discloses information about the permissions granted to other users. If -// you do not want users to see other user's permissions, then consider allowing -// them to use SimulateCustomPolicy instead. Context keys are variables maintained -// by Amazon Web Services and its services that provide details about the context -// of an API query request. You can use the Condition element of an IAM policy to -// evaluate context keys. To get the list of context keys that the policies require -// for correct simulation, use GetContextKeysForPrincipalPolicy. If the output is -// long, you can use the MaxItems and Marker parameters to paginate the results. -// For more information about using the policy simulator, see Testing IAM policies -// with the IAM policy simulator +// evaluated with each of the resources included in the simulation for IAM users +// only. The simulation does not perform the API operations; it only checks the +// authorization to determine if the simulated policies allow or deny the +// operations. Note: This operation discloses information about the permissions +// granted to other users. If you do not want users to see other user's +// permissions, then consider allowing them to use SimulateCustomPolicy instead. +// Context keys are variables maintained by Amazon Web Services and its services +// that provide details about the context of an API query request. You can use the +// Condition element of an IAM policy to evaluate context keys. To get the list of +// context keys that the policies require for correct simulation, use +// GetContextKeysForPrincipalPolicy. If the output is long, you can use the +// MaxItems and Marker parameters to paginate the results. The IAM policy simulator +// evaluates statements in the identity-based policy and the inputs that you +// provide during simulation. The policy simulator results can differ from your +// live Amazon Web Services environment. We recommend that you check your policies +// against your live Amazon Web Services environment after testing using the policy +// simulator to confirm that you have the desired results. For more information +// about using the policy simulator, see Testing IAM policies with the IAM policy +// simulator // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_testing-policies.html)in // the IAM User Guide. func (c *Client) SimulatePrincipalPolicy(ctx context.Context, params *SimulatePrincipalPolicyInput, optFns ...func(*Options)) (*SimulatePrincipalPolicyOutput, error) { @@ -170,7 +176,8 @@ type SimulatePrincipalPolicyInput struct { // ResourcePolicy parameter. For more information about ARNs, see Amazon Resource // Names (ARNs) // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. + // the Amazon Web Services General Reference. Simulation of resource-based policies + // isn't supported for IAM roles. ResourceArns []string // Specifies the type of simulation to run. Different API operations that support @@ -232,6 +239,9 @@ type SimulatePrincipalPolicyInput struct { // // * The special characters // tab (\u0009), line feed (\u000A), and carriage return (\u000D) + // + // Simulation of + // resource-based policies isn't supported for IAM roles. ResourcePolicy *string noSmithyDocumentSerde diff --git a/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go b/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go index d6b3bca7452..1309b26e60a 100644 --- a/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go +++ b/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go @@ -20,9 +20,9 @@ import ( // the certificate thumbprint is updated. Amazon Web Services secures communication // with some OIDC identity providers (IdPs) through our library of trusted // certificate authorities (CAs) instead of using a certificate thumbprint to -// verify your IdP server certificate. These OIDC IdPs include Google, and those -// that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In -// these cases, your legacy thumbprint remains in your configuration, but is no +// verify your IdP server certificate. These OIDC IdPs include Google, Auth0, and +// those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. +// In these cases, your legacy thumbprint remains in your configuration, but is no // longer used for validation. Trust for the OIDC provider is derived from the // provider certificate and is validated by the thumbprint. Therefore, it is best // to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly diff --git a/service/kendraranking/internal/endpoints/endpoints.go b/service/kendraranking/internal/endpoints/endpoints.go index 8d5135e3944..35abb87ed4e 100644 --- a/service/kendraranking/internal/endpoints/endpoints.go +++ b/service/kendraranking/internal/endpoints/endpoints.go @@ -176,14 +176,14 @@ var defaultPartitions = endpoints.Partitions{ Hostname: "kendra-ranking.ap-southeast-3.api.aws", }, endpoints.EndpointKey{ - Region: "ca-central-1", + Region: "ap-southeast-4", }: endpoints.Endpoint{ - Hostname: "kendra-ranking.ca-central-1.api.aws", + Hostname: "kendra-ranking.ap-southeast-4.api.aws", }, endpoints.EndpointKey{ - Region: "eu-central-1", + Region: "ca-central-1", }: endpoints.Endpoint{ - Hostname: "kendra-ranking.eu-central-1.api.aws", + Hostname: "kendra-ranking.ca-central-1.api.aws", }, endpoints.EndpointKey{ Region: "eu-central-2", @@ -210,11 +210,6 @@ var defaultPartitions = endpoints.Partitions{ }: endpoints.Endpoint{ Hostname: "kendra-ranking.eu-west-1.api.aws", }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "kendra-ranking.eu-west-2.api.aws", - }, endpoints.EndpointKey{ Region: "eu-west-3", }: endpoints.Endpoint{ diff --git a/service/kinesis/internal/endpoints/endpoints.go b/service/kinesis/internal/endpoints/endpoints.go index 80b0d88bd02..5c7e1078db6 100644 --- a/service/kinesis/internal/endpoints/endpoints.go +++ b/service/kinesis/internal/endpoints/endpoints.go @@ -411,6 +411,24 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.AwsUsGov, IsRegionalized: true, Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-gov-east-1", }: endpoints.Endpoint{ @@ -419,6 +437,15 @@ var defaultPartitions = endpoints.Partitions{ Region: "us-gov-east-1", }, }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, endpoints.EndpointKey{ Region: "us-gov-west-1", }: endpoints.Endpoint{ @@ -427,6 +454,15 @@ var defaultPartitions = endpoints.Partitions{ Region: "us-gov-west-1", }, }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, } diff --git a/service/mediatailor/api_op_CreateProgram.go b/service/mediatailor/api_op_CreateProgram.go index 60cb752ebb9..651a3dc9b3e 100644 --- a/service/mediatailor/api_op_CreateProgram.go +++ b/service/mediatailor/api_op_CreateProgram.go @@ -76,9 +76,15 @@ type CreateProgramOutput struct { // The name to assign to the channel for this program. ChannelName *string + // The clip range configuration settings. + ClipRange *types.ClipRange + // The time the program was created. CreationTime *time.Time + // The duration of the live program in milliseconds. + DurationMillis int64 + // The name of the LiveSource for this Program. LiveSourceName *string diff --git a/service/mediatailor/api_op_DescribeProgram.go b/service/mediatailor/api_op_DescribeProgram.go index 78692253312..8765daa293e 100644 --- a/service/mediatailor/api_op_DescribeProgram.go +++ b/service/mediatailor/api_op_DescribeProgram.go @@ -57,9 +57,15 @@ type DescribeProgramOutput struct { // The name of the channel that the program belongs to. ChannelName *string + // The clip range configuration settings. + ClipRange *types.ClipRange + // The timestamp of when the program was created. CreationTime *time.Time + // The duration of the live program in milliseconds. + DurationMillis *int64 + // The name of the LiveSource for this Program. LiveSourceName *string diff --git a/service/mediatailor/api_op_UpdateProgram.go b/service/mediatailor/api_op_UpdateProgram.go new file mode 100644 index 00000000000..00dc7d096cb --- /dev/null +++ b/service/mediatailor/api_op_UpdateProgram.go @@ -0,0 +1,165 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package mediatailor + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/mediatailor/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Updates a program within a channel. +func (c *Client) UpdateProgram(ctx context.Context, params *UpdateProgramInput, optFns ...func(*Options)) (*UpdateProgramOutput, error) { + if params == nil { + params = &UpdateProgramInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateProgram", params, optFns, c.addOperationUpdateProgramMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateProgramOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateProgramInput struct { + + // The name of the channel for this Program. + // + // This member is required. + ChannelName *string + + // The name of the Program. + // + // This member is required. + ProgramName *string + + // The schedule configuration settings. + // + // This member is required. + ScheduleConfiguration *types.UpdateProgramScheduleConfiguration + + // The ad break configuration settings. + AdBreaks []types.AdBreak + + noSmithyDocumentSerde +} + +type UpdateProgramOutput struct { + + // The ad break configuration settings. + AdBreaks []types.AdBreak + + // The ARN to assign to the program. + Arn *string + + // The name to assign to the channel for this program. + ChannelName *string + + // The clip range configuration settings. + ClipRange *types.ClipRange + + // The time the program was created. + CreationTime *time.Time + + // The duration of the live program in milliseconds. + DurationMillis int64 + + // The name of the LiveSource for this Program. + LiveSourceName *string + + // The name to assign to this program. + ProgramName *string + + // The scheduled start time for this Program. + ScheduledStartTime *time.Time + + // The name to assign to the source location for this program. + SourceLocationName *string + + // The name that's used to refer to a VOD source. + VodSourceName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateProgramMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateProgram{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateProgram{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateProgramValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateProgram(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateProgram(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "mediatailor", + OperationName: "UpdateProgram", + } +} diff --git a/service/mediatailor/deserializers.go b/service/mediatailor/deserializers.go index 4f1eb221896..d03975702cb 100644 --- a/service/mediatailor/deserializers.go +++ b/service/mediatailor/deserializers.go @@ -1111,6 +1111,11 @@ func awsRestjson1_deserializeOpDocumentCreateProgramOutput(v **CreateProgramOutp sv.ChannelName = ptr.String(jtv) } + case "ClipRange": + if err := awsRestjson1_deserializeDocumentClipRange(&sv.ClipRange, value); err != nil { + return err + } + case "CreationTime": if value != nil { switch jtv := value.(type) { @@ -1127,6 +1132,19 @@ func awsRestjson1_deserializeOpDocumentCreateProgramOutput(v **CreateProgramOutp } } + case "DurationMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DurationMillis = i64 + } + case "LiveSourceName": if value != nil { jtv, ok := value.(string) @@ -2847,6 +2865,11 @@ func awsRestjson1_deserializeOpDocumentDescribeProgramOutput(v **DescribeProgram sv.ChannelName = ptr.String(jtv) } + case "ClipRange": + if err := awsRestjson1_deserializeDocumentClipRange(&sv.ClipRange, value); err != nil { + return err + } + case "CreationTime": if value != nil { switch jtv := value.(type) { @@ -2863,6 +2886,19 @@ func awsRestjson1_deserializeOpDocumentDescribeProgramOutput(v **DescribeProgram } } + case "DurationMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DurationMillis = ptr.Int64(i64) + } + case "LiveSourceName": if value != nil { jtv, ok := value.(string) @@ -6455,6 +6491,254 @@ func awsRestjson1_deserializeOpDocumentUpdateLiveSourceOutput(v **UpdateLiveSour return nil } +type awsRestjson1_deserializeOpUpdateProgram struct { +} + +func (*awsRestjson1_deserializeOpUpdateProgram) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUpdateProgram) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUpdateProgram(response, &metadata) + } + output := &UpdateProgramOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentUpdateProgramOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUpdateProgram(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentUpdateProgramOutput(v **UpdateProgramOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateProgramOutput + if *v == nil { + sv = &UpdateProgramOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AdBreaks": + if err := awsRestjson1_deserializeDocument__listOfAdBreak(&sv.AdBreaks, value); err != nil { + return err + } + + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "ChannelName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ChannelName = ptr.String(jtv) + } + + case "ClipRange": + if err := awsRestjson1_deserializeDocumentClipRange(&sv.ClipRange, value); err != nil { + return err + } + + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected __timestampUnix to be a JSON Number, got %T instead", value) + + } + } + + case "DurationMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DurationMillis = i64 + } + + case "LiveSourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.LiveSourceName = ptr.String(jtv) + } + + case "ProgramName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ProgramName = ptr.String(jtv) + } + + case "ScheduledStartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ScheduledStartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected __timestampUnix to be a JSON Number, got %T instead", value) + + } + } + + case "SourceLocationName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.SourceLocationName = ptr.String(jtv) + } + + case "VodSourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.VodSourceName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpUpdateSourceLocation struct { } @@ -7989,6 +8273,50 @@ func awsRestjson1_deserializeDocumentChannel(v **types.Channel, value interface{ return nil } +func awsRestjson1_deserializeDocumentClipRange(v **types.ClipRange, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ClipRange + if *v == nil { + sv = &types.ClipRange{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EndOffsetMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.EndOffsetMillis = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentConfigurationAliasesResponse(v *map[string]map[string]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/mediatailor/generated.json b/service/mediatailor/generated.json index f1ba723646a..4bebe78b1d9 100644 --- a/service/mediatailor/generated.json +++ b/service/mediatailor/generated.json @@ -49,6 +49,7 @@ "api_op_UntagResource.go", "api_op_UpdateChannel.go", "api_op_UpdateLiveSource.go", + "api_op_UpdateProgram.go", "api_op_UpdateSourceLocation.go", "api_op_UpdateVodSource.go", "deserializers.go", diff --git a/service/mediatailor/serializers.go b/service/mediatailor/serializers.go index 3e2ec99fcdb..8badd34ec3b 100644 --- a/service/mediatailor/serializers.go +++ b/service/mediatailor/serializers.go @@ -3055,6 +3055,105 @@ func awsRestjson1_serializeOpDocumentUpdateLiveSourceInput(v *UpdateLiveSourceIn return nil } +type awsRestjson1_serializeOpUpdateProgram struct { +} + +func (*awsRestjson1_serializeOpUpdateProgram) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUpdateProgram) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateProgramInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/channel/{ChannelName}/program/{ProgramName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsUpdateProgramInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentUpdateProgramInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUpdateProgramInput(v *UpdateProgramInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ChannelName == nil || len(*v.ChannelName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelName must not be empty")} + } + if v.ChannelName != nil { + if err := encoder.SetURI("ChannelName").String(*v.ChannelName); err != nil { + return err + } + } + + if v.ProgramName == nil || len(*v.ProgramName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ProgramName must not be empty")} + } + if v.ProgramName != nil { + if err := encoder.SetURI("ProgramName").String(*v.ProgramName); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentUpdateProgramInput(v *UpdateProgramInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AdBreaks != nil { + ok := object.Key("AdBreaks") + if err := awsRestjson1_serializeDocument__listOfAdBreak(v.AdBreaks, ok); err != nil { + return err + } + } + + if v.ScheduleConfiguration != nil { + ok := object.Key("ScheduleConfiguration") + if err := awsRestjson1_serializeDocumentUpdateProgramScheduleConfiguration(v.ScheduleConfiguration, ok); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpUpdateSourceLocation struct { } @@ -3438,6 +3537,18 @@ func awsRestjson1_serializeDocumentCdnConfiguration(v *types.CdnConfiguration, v return nil } +func awsRestjson1_serializeDocumentClipRange(v *types.ClipRange, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("EndOffsetMillis") + ok.Long(v.EndOffsetMillis) + } + + return nil +} + func awsRestjson1_serializeDocumentConfigurationAliasesRequest(v map[string]map[string]string, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3707,6 +3818,13 @@ func awsRestjson1_serializeDocumentScheduleConfiguration(v *types.ScheduleConfig object := value.Object() defer object.Close() + if v.ClipRange != nil { + ok := object.Key("ClipRange") + if err := awsRestjson1_serializeDocumentClipRange(v.ClipRange, ok); err != nil { + return err + } + } + if v.Transition != nil { ok := object.Key("Transition") if err := awsRestjson1_serializeDocumentTransition(v.Transition, ok); err != nil { @@ -3905,3 +4023,41 @@ func awsRestjson1_serializeDocumentTransition(v *types.Transition, value smithyj return nil } + +func awsRestjson1_serializeDocumentUpdateProgramScheduleConfiguration(v *types.UpdateProgramScheduleConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClipRange != nil { + ok := object.Key("ClipRange") + if err := awsRestjson1_serializeDocumentClipRange(v.ClipRange, ok); err != nil { + return err + } + } + + if v.Transition != nil { + ok := object.Key("Transition") + if err := awsRestjson1_serializeDocumentUpdateProgramTransition(v.Transition, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentUpdateProgramTransition(v *types.UpdateProgramTransition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DurationMillis != 0 { + ok := object.Key("DurationMillis") + ok.Long(v.DurationMillis) + } + + if v.ScheduledStartTimeMillis != 0 { + ok := object.Key("ScheduledStartTimeMillis") + ok.Long(v.ScheduledStartTimeMillis) + } + + return nil +} diff --git a/service/mediatailor/types/types.go b/service/mediatailor/types/types.go index 8ff08ee277e..5f525301921 100644 --- a/service/mediatailor/types/types.go +++ b/service/mediatailor/types/types.go @@ -267,6 +267,18 @@ type Channel struct { noSmithyDocumentSerde } +// Clip range configuration for the VOD source associated with the program. +type ClipRange struct { + + // The end offset of the clip range, in milliseconds, starting from the beginning + // of the VOD source associated with the program. + // + // This member is required. + EndOffsetMillis int64 + + noSmithyDocumentSerde +} + // The configuration for DASH content. type DashConfiguration struct { @@ -781,6 +793,9 @@ type ScheduleConfiguration struct { // This member is required. Transition *Transition + // Program clip range configuration. + ClipRange *ClipRange + noSmithyDocumentSerde } @@ -1058,6 +1073,30 @@ type Transition struct { noSmithyDocumentSerde } +// Schedule configuration parameters. +type UpdateProgramScheduleConfiguration struct { + + // Program clip range configuration. + ClipRange *ClipRange + + // Program transition configuration. + Transition *UpdateProgramTransition + + noSmithyDocumentSerde +} + +// Program transition configuration. +type UpdateProgramTransition struct { + + // The duration of the live program in seconds. + DurationMillis int64 + + // The date and time that the program is scheduled to start, in epoch milliseconds. + ScheduledStartTimeMillis int64 + + noSmithyDocumentSerde +} + // VOD source configuration parameters. type VodSource struct { diff --git a/service/mediatailor/validators.go b/service/mediatailor/validators.go index e89b04d9d35..d29f3a8bded 100644 --- a/service/mediatailor/validators.go +++ b/service/mediatailor/validators.go @@ -770,6 +770,26 @@ func (m *validateOpUpdateLiveSource) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpUpdateProgram struct { +} + +func (*validateOpUpdateProgram) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateProgram) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateProgramInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateProgramInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpUpdateSourceLocation struct { } @@ -962,6 +982,10 @@ func addOpUpdateLiveSourceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateLiveSource{}, middleware.After) } +func addOpUpdateProgramValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateProgram{}, middleware.After) +} + func addOpUpdateSourceLocationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateSourceLocation{}, middleware.After) } @@ -1005,6 +1029,18 @@ func validateAvailMatchingCriteria(v *types.AvailMatchingCriteria) error { } } +func validateClipRange(v *types.ClipRange) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ClipRange"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateHttpConfiguration(v *types.HttpConfiguration) error { if v == nil { return nil @@ -1140,6 +1176,11 @@ func validateScheduleConfiguration(v *types.ScheduleConfiguration) error { invalidParams.AddNested("Transition", err.(smithy.InvalidParamsError)) } } + if v.ClipRange != nil { + if err := validateClipRange(v.ClipRange); err != nil { + invalidParams.AddNested("ClipRange", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -1165,6 +1206,23 @@ func validateTransition(v *types.Transition) error { } } +func validateUpdateProgramScheduleConfiguration(v *types.UpdateProgramScheduleConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateProgramScheduleConfiguration"} + if v.ClipRange != nil { + if err := validateClipRange(v.ClipRange); err != nil { + invalidParams.AddNested("ClipRange", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpConfigureLogsForChannelInput(v *ConfigureLogsForChannelInput) error { if v == nil { return nil @@ -1855,6 +1913,31 @@ func validateOpUpdateLiveSourceInput(v *UpdateLiveSourceInput) error { } } +func validateOpUpdateProgramInput(v *UpdateProgramInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateProgramInput"} + if v.ChannelName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelName")) + } + if v.ProgramName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ProgramName")) + } + if v.ScheduleConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ScheduleConfiguration")) + } else if v.ScheduleConfiguration != nil { + if err := validateUpdateProgramScheduleConfiguration(v.ScheduleConfiguration); err != nil { + invalidParams.AddNested("ScheduleConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpUpdateSourceLocationInput(v *UpdateSourceLocationInput) error { if v == nil { return nil diff --git a/service/ram/internal/endpoints/endpoints.go b/service/ram/internal/endpoints/endpoints.go index 3ae52cba8f3..fa3cb3bb037 100644 --- a/service/ram/internal/endpoints/endpoints.go +++ b/service/ram/internal/endpoints/endpoints.go @@ -431,6 +431,24 @@ var defaultPartitions = endpoints.Partitions{ Region: "us-gov-east-1", }, }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-gov-west-1", }: endpoints.Endpoint{ @@ -439,6 +457,24 @@ var defaultPartitions = endpoints.Partitions{ Region: "us-gov-west-1", }, }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, }, }, } diff --git a/service/sns/api_op_CreateTopic.go b/service/sns/api_op_CreateTopic.go index 4f9121362d6..41062992723 100644 --- a/service/sns/api_op_CreateTopic.go +++ b/service/sns/api_op_CreateTopic.go @@ -66,13 +66,6 @@ type CreateTopicInput struct { // confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By // default, SignatureVersion is set to 1. // - // * TracingConfig – Tracing mode of an - // Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic - // passes through the tracing header it receives from an Amazon SNS publisher to - // its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to - // topic owner account if the sampled flag in the tracing header is true. This is - // only supported on standard topics. - // // The following attribute applies only to // server-side encryption // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): diff --git a/service/sns/api_op_GetTopicAttributes.go b/service/sns/api_op_GetTopicAttributes.go index 0fcb54ef39d..99c647f0aad 100644 --- a/service/sns/api_op_GetTopicAttributes.go +++ b/service/sns/api_op_GetTopicAttributes.go @@ -83,13 +83,6 @@ type GetTopicAttributesOutput struct { // * TopicArn – The // topic's ARN. // - // * TracingConfig – Tracing mode of an Amazon SNS topic. By default - // TracingConfig is set to PassThrough, and the topic passes through the tracing - // header it receives from an Amazon SNS publisher to its subscriptions. If set to - // Active, Amazon SNS will vend X-Ray segment data to topic owner account if the - // sampled flag in the tracing header is true. This is only supported on standard - // topics. - // // The following attribute applies only to server-side-encryption // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // diff --git a/service/sns/api_op_SetTopicAttributes.go b/service/sns/api_op_SetTopicAttributes.go index fbe8efc1e68..90a645cc8c9 100644 --- a/service/sns/api_op_SetTopicAttributes.go +++ b/service/sns/api_op_SetTopicAttributes.go @@ -36,6 +36,10 @@ type SetTopicAttributesInput struct { // names, descriptions, and values of the special request parameters that the // SetTopicAttributes action uses: // + // * ApplicationSuccessFeedbackRoleArn – Indicates + // failed message delivery status for an Amazon SNS topic that is subscribed to a + // platform application endpoint. + // // * DeliveryPolicy – The policy that defines how // Amazon SNS retries failed deliveries to HTTP/S endpoints. // @@ -46,15 +50,93 @@ type SetTopicAttributesInput struct { // that defines who can access your topic. By default, only the topic owner can // publish or subscribe to the topic. // - // * TracingConfig – Tracing mode of an Amazon - // SNS topic. By default TracingConfig is set to PassThrough, and the topic passes - // through the tracing header it receives from an Amazon SNS publisher to its - // subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to - // topic owner account if the sampled flag in the tracing header is true. This is - // only supported on standard topics. + // * HTTP + // + // * HTTPSuccessFeedbackRoleArn – + // Indicates successful message delivery status for an Amazon SNS topic that is + // subscribed to an HTTP endpoint. + // + // * HTTPSuccessFeedbackSampleRate – Indicates + // percentage of successful messages to sample for an Amazon SNS topic that is + // subscribed to an HTTP endpoint. + // + // * HTTPFailureFeedbackRoleArn – Indicates failed + // message delivery status for an Amazon SNS topic that is subscribed to an HTTP + // endpoint. + // + // * Amazon Kinesis Data Firehose + // + // * FirehoseSuccessFeedbackRoleArn – + // Indicates successful message delivery status for an Amazon SNS topic that is + // subscribed to an Amazon Kinesis Data Firehose endpoint. + // + // * + // FirehoseSuccessFeedbackSampleRate – Indicates percentage of successful messages + // to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data + // Firehose endpoint. + // + // * FirehoseFailureFeedbackRoleArn – Indicates failed message + // delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis + // Data Firehose endpoint. + // + // * Lambda + // + // * LambdaSuccessFeedbackRoleArn – Indicates + // successful message delivery status for an Amazon SNS topic that is subscribed to + // an Lambda endpoint. + // + // * LambdaSuccessFeedbackSampleRate – Indicates percentage of + // successful messages to sample for an Amazon SNS topic that is subscribed to an + // Lambda endpoint. + // + // * LambdaFailureFeedbackRoleArn – Indicates failed message + // delivery status for an Amazon SNS topic that is subscribed to an Lambda + // endpoint. + // + // * Platform application endpoint + // + // * ApplicationSuccessFeedbackRoleArn + // – Indicates successful message delivery status for an Amazon SNS topic that is + // subscribed to an Amazon Web Services application endpoint. + // + // * + // ApplicationSuccessFeedbackSampleRate – Indicates percentage of successful + // messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web + // Services application endpoint. + // + // * ApplicationFailureFeedbackRoleArn – Indicates + // failed message delivery status for an Amazon SNS topic that is subscribed to an + // Amazon Web Services application endpoint. + // + // In addition to being able to + // configure topic attributes for message delivery status of notification messages + // sent to Amazon SNS application endpoints, you can also configure application + // attributes for the delivery status of push notification messages sent to push + // notification services. For example, For more information, see Using Amazon SNS + // Application Attributes for Message Delivery Status + // (https://docs.aws.amazon.com/sns/latest/dg/sns-msg-status.html). + // + // * Amazon + // SQS + // + // * SQSSuccessFeedbackRoleArn – Indicates successful message delivery status + // for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. + // + // * + // SQSSuccessFeedbackSampleRate – Indicates percentage of successful messages to + // sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. + // + // * + // SQSFailureFeedbackRoleArn – Indicates failed message delivery status for an + // Amazon SNS topic that is subscribed to an Amazon SQS endpoint. // - // The following attribute applies only to - // server-side-encryption + // The + // SuccessFeedbackRoleArn and FailureFeedbackRoleArn attributes are used to give + // Amazon SNS write access to use CloudWatch Logs on your behalf. The + // SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage + // (0-100) of successfully delivered messages. After you configure the + // FailureFeedbackRoleArn attribute, then all failed message deliveries generate + // CloudWatch Logs. The following attribute applies only to server-side-encryption // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // // * diff --git a/service/swf/internal/endpoints/endpoints.go b/service/swf/internal/endpoints/endpoints.go index 536364b1605..3b6dd3368d8 100644 --- a/service/swf/internal/endpoints/endpoints.go +++ b/service/swf/internal/endpoints/endpoints.go @@ -419,6 +419,24 @@ var defaultPartitions = endpoints.Partitions{ Region: "us-gov-east-1", }, }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-gov-west-1", }: endpoints.Endpoint{ @@ -427,6 +445,24 @@ var defaultPartitions = endpoints.Partitions{ Region: "us-gov-west-1", }, }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, }, }, }