From 88040cbb296d55c810e88a0e8a7142b7ab16d52c Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Wed, 3 Apr 2024 18:14:05 +0000 Subject: [PATCH] Regenerated Clients --- .../006be38f829c4e5f8bbb59662f289d07.json | 8 + .../298358f1d33543eab8eb627b8dd4c6db.json | 8 + .../2fa6c8428f334e309fe74ed93ef5647a.json | 8 + .../41575353444b40ffbf474f4155544f00.json | 8 + .../4d9d917250344ea6b53a91d966d7be38.json | 8 + .../9212250e0b5c4098b89c21eefaecbb6b.json | 8 + .../bacaec2741ba412cb1ee264305b519ce.json | 8 + .../caac2ffbd3cc4b05bb68f3d8ffc88e01.json | 8 + .../e9ec33ce1a7b42e8994dd2048b63fa37.json | 8 + .../efd6b87f62c54980ab138cf3974355ec.json | 8 + .../api_op_CreateAudienceModel.go | 6 +- .../api_op_CreateConfiguredAudienceModel.go | 8 +- .../api_op_CreateTrainingDataset.go | 6 +- .../api_op_GetAudienceGenerationJob.go | 3 +- .../cleanroomsml/api_op_GetAudienceModel.go | 3 - .../api_op_StartAudienceGenerationJob.go | 6 +- service/cleanroomsml/api_op_TagResource.go | 6 +- service/cleanroomsml/deserializers.go | 160 +-- service/cleanroomsml/types/enums.go | 22 - service/cleanroomsml/types/errors.go | 3 +- service/cleanroomsml/types/types.go | 39 +- service/cloudformation/api_op_ActivateType.go | 2 +- .../api_op_BatchDescribeTypeConfigurations.go | 2 +- service/cloudformation/api_op_DescribeType.go | 2 +- service/cloudformation/api_op_RegisterType.go | 2 +- .../api_op_SetTypeConfiguration.go | 2 +- service/cloudformation/deserializers.go | 13 + service/cloudformation/types/enums.go | 26 + service/cloudformation/types/types.go | 13 +- service/datazone/api_op_CreateAsset.go | 3 + .../datazone/api_op_CreateAssetRevision.go | 3 + .../api_op_DeleteTimeSeriesDataPoints.go | 188 +++ service/datazone/api_op_GetAsset.go | 3 + .../datazone/api_op_GetTimeSeriesDataPoint.go | 172 +++ .../api_op_ListTimeSeriesDataPoints.go | 276 ++++ .../api_op_PostTimeSeriesDataPoints.go | 202 +++ service/datazone/deserializers.go | 1111 +++++++++++++++-- service/datazone/generated.json | 4 + service/datazone/serializers.go | 453 +++++++ service/datazone/snapshot_test.go | 96 ++ service/datazone/types/enums.go | 22 +- service/datazone/types/types.go | 107 ++ service/datazone/validators.go | 237 ++++ .../docdb/api_op_SwitchoverGlobalCluster.go | 155 +++ service/docdb/deserializers.go | 159 +++ service/docdb/generated.json | 1 + service/docdb/serializers.go | 81 ++ service/docdb/snapshot_test.go | 24 + service/docdb/validators.go | 42 + .../api_op_CreateMissionProfile.go | 8 +- .../groundstation/api_op_DescribeContact.go | 12 + .../api_op_UpdateMissionProfile.go | 8 +- service/groundstation/deserializers.go | 64 + service/groundstation/types/types.go | 14 + service/lambda/api_op_CreateFunction.go | 2 +- .../api_op_UpdateFunctionConfiguration.go | 2 +- service/lambda/types/enums.go | 2 + service/medialive/deserializers.go | 239 ++++ service/medialive/serializers.go | 111 ++ service/medialive/types/enums.go | 155 +++ service/medialive/types/types.go | 65 + service/medialive/validators.go | 20 + .../api_op_GetDICOMImportJob.go | 7 +- .../medicalimaging/api_op_SearchImageSets.go | 3 + service/medicalimaging/deserializers.go | 94 ++ service/medicalimaging/doc.go | 32 +- service/medicalimaging/serializers.go | 32 + service/medicalimaging/types/enums.go | 38 + service/medicalimaging/types/types.go | 57 +- .../types/types_exported_test.go | 7 + service/medicalimaging/validators.go | 23 + service/transfer/api_op_CreateConnector.go | 3 + service/transfer/api_op_CreateServer.go | 2 +- .../transfer/api_op_DescribeSecurityPolicy.go | 11 +- .../transfer/api_op_ListSecurityPolicies.go | 7 +- service/transfer/api_op_UpdateConnector.go | 3 + service/transfer/api_op_UpdateServer.go | 2 +- service/transfer/deserializers.go | 64 + service/transfer/serializers.go | 10 + service/transfer/types/enums.go | 36 + service/transfer/types/types.go | 46 +- .../internal/endpoints/endpoints.go | 75 ++ 82 files changed, 4663 insertions(+), 334 deletions(-) create mode 100644 .changelog/006be38f829c4e5f8bbb59662f289d07.json create mode 100644 .changelog/298358f1d33543eab8eb627b8dd4c6db.json create mode 100644 .changelog/2fa6c8428f334e309fe74ed93ef5647a.json create mode 100644 .changelog/41575353444b40ffbf474f4155544f00.json create mode 100644 .changelog/4d9d917250344ea6b53a91d966d7be38.json create mode 100644 .changelog/9212250e0b5c4098b89c21eefaecbb6b.json create mode 100644 .changelog/bacaec2741ba412cb1ee264305b519ce.json create mode 100644 .changelog/caac2ffbd3cc4b05bb68f3d8ffc88e01.json create mode 100644 .changelog/e9ec33ce1a7b42e8994dd2048b63fa37.json create mode 100644 .changelog/efd6b87f62c54980ab138cf3974355ec.json create mode 100644 service/datazone/api_op_DeleteTimeSeriesDataPoints.go create mode 100644 service/datazone/api_op_GetTimeSeriesDataPoint.go create mode 100644 service/datazone/api_op_ListTimeSeriesDataPoints.go create mode 100644 service/datazone/api_op_PostTimeSeriesDataPoints.go create mode 100644 service/docdb/api_op_SwitchoverGlobalCluster.go diff --git a/.changelog/006be38f829c4e5f8bbb59662f289d07.json b/.changelog/006be38f829c4e5f8bbb59662f289d07.json new file mode 100644 index 00000000000..9a0f216f026 --- /dev/null +++ b/.changelog/006be38f829c4e5f8bbb59662f289d07.json @@ -0,0 +1,8 @@ +{ + "id": "006be38f-829c-4e5f-8bbb-59662f289d07", + "type": "feature", + "description": "This release would return a new field - PolicyAction in cloudformation's existed DescribeChangeSetResponse, showing actions we are going to apply on the physical resource (e.g., Delete, Retain) according to the user's template", + "modules": [ + "service/cloudformation" + ] +} \ No newline at end of file diff --git a/.changelog/298358f1d33543eab8eb627b8dd4c6db.json b/.changelog/298358f1d33543eab8eb627b8dd4c6db.json new file mode 100644 index 00000000000..f8ffa4f0dec --- /dev/null +++ b/.changelog/298358f1d33543eab8eb627b8dd4c6db.json @@ -0,0 +1,8 @@ +{ + "id": "298358f1-d335-43ea-b8eb-627b8dd4c6db", + "type": "feature", + "description": "This release adds visibilityStartTime and visibilityEndTime to DescribeContact and ListContacts responses.", + "modules": [ + "service/groundstation" + ] +} \ No newline at end of file diff --git a/.changelog/2fa6c8428f334e309fe74ed93ef5647a.json b/.changelog/2fa6c8428f334e309fe74ed93ef5647a.json new file mode 100644 index 00000000000..67e9923b1f9 --- /dev/null +++ b/.changelog/2fa6c8428f334e309fe74ed93ef5647a.json @@ -0,0 +1,8 @@ +{ + "id": "2fa6c842-8f33-4e30-9fe7-4ed93ef5647a", + "type": "feature", + "description": "Add Ruby 3.3 (ruby3.3) support to AWS Lambda", + "modules": [ + "service/lambda" + ] +} \ No newline at end of file diff --git a/.changelog/41575353444b40ffbf474f4155544f00.json b/.changelog/41575353444b40ffbf474f4155544f00.json new file mode 100644 index 00000000000..1cc0d8d6b2d --- /dev/null +++ b/.changelog/41575353444b40ffbf474f4155544f00.json @@ -0,0 +1,8 @@ +{ + "id": "41575353-444b-40ff-bf47-4f4155544f00", + "type": "release", + "description": "New AWS service client module", + "modules": [ + "internal/protocoltest/smithyrpcv2cbor" + ] +} \ No newline at end of file diff --git a/.changelog/4d9d917250344ea6b53a91d966d7be38.json b/.changelog/4d9d917250344ea6b53a91d966d7be38.json new file mode 100644 index 00000000000..806466e79c7 --- /dev/null +++ b/.changelog/4d9d917250344ea6b53a91d966d7be38.json @@ -0,0 +1,8 @@ +{ + "id": "4d9d9172-5034-4ea6-b53a-91d966d7be38", + "type": "feature", + "description": "This release supports the feature of dataQuality to enrich asset with dataQualityResult in Amazon DataZone.", + "modules": [ + "service/datazone" + ] +} \ No newline at end of file diff --git a/.changelog/9212250e0b5c4098b89c21eefaecbb6b.json b/.changelog/9212250e0b5c4098b89c21eefaecbb6b.json new file mode 100644 index 00000000000..e01ecedd6bd --- /dev/null +++ b/.changelog/9212250e0b5c4098b89c21eefaecbb6b.json @@ -0,0 +1,8 @@ +{ + "id": "9212250e-0b5c-4098-b89c-21eefaecbb6b", + "type": "feature", + "description": "The release includes a public SDK for AWS Clean Rooms ML APIs, making them globally available to developers worldwide.", + "modules": [ + "service/cleanroomsml" + ] +} \ No newline at end of file diff --git a/.changelog/bacaec2741ba412cb1ee264305b519ce.json b/.changelog/bacaec2741ba412cb1ee264305b519ce.json new file mode 100644 index 00000000000..8bfe07f3abd --- /dev/null +++ b/.changelog/bacaec2741ba412cb1ee264305b519ce.json @@ -0,0 +1,8 @@ +{ + "id": "bacaec27-41ba-412c-b1ee-264305b519ce", + "type": "feature", + "description": "This release adds Global Cluster Switchover capability which enables you to change your global cluster's primary AWS Region, the region that serves writes, while preserving the replication between all regions in the global cluster.", + "modules": [ + "service/docdb" + ] +} \ No newline at end of file diff --git a/.changelog/caac2ffbd3cc4b05bb68f3d8ffc88e01.json b/.changelog/caac2ffbd3cc4b05bb68f3d8ffc88e01.json new file mode 100644 index 00000000000..dbc60caf68b --- /dev/null +++ b/.changelog/caac2ffbd3cc4b05bb68f3d8ffc88e01.json @@ -0,0 +1,8 @@ +{ + "id": "caac2ffb-d3cc-4b05-bb68-f3d8ffc88e01", + "type": "feature", + "description": "SearchImageSets API now supports following enhancements - Additional support for searching on UpdatedAt and SeriesInstanceUID - Support for searching existing filters between dates/times - Support for sorting the search result by Ascending/Descending - Additional parameters returned in the response", + "modules": [ + "service/medicalimaging" + ] +} \ No newline at end of file diff --git a/.changelog/e9ec33ce1a7b42e8994dd2048b63fa37.json b/.changelog/e9ec33ce1a7b42e8994dd2048b63fa37.json new file mode 100644 index 00000000000..42dda60e96d --- /dev/null +++ b/.changelog/e9ec33ce1a7b42e8994dd2048b63fa37.json @@ -0,0 +1,8 @@ +{ + "id": "e9ec33ce-1a7b-42e8-994d-d2048b63fa37", + "type": "feature", + "description": "Add ability to specify Security Policies for SFTP Connectors", + "modules": [ + "service/transfer" + ] +} \ No newline at end of file diff --git a/.changelog/efd6b87f62c54980ab138cf3974355ec.json b/.changelog/efd6b87f62c54980ab138cf3974355ec.json new file mode 100644 index 00000000000..4b365899a70 --- /dev/null +++ b/.changelog/efd6b87f62c54980ab138cf3974355ec.json @@ -0,0 +1,8 @@ +{ + "id": "efd6b87f-62c5-4980-ab13-8cf3974355ec", + "type": "feature", + "description": "Cmaf Ingest outputs are now supported in Media Live", + "modules": [ + "service/medialive" + ] +} \ No newline at end of file diff --git a/service/cleanroomsml/api_op_CreateAudienceModel.go b/service/cleanroomsml/api_op_CreateAudienceModel.go index 80373109ad8..2a4eadfa579 100644 --- a/service/cleanroomsml/api_op_CreateAudienceModel.go +++ b/service/cleanroomsml/api_op_CreateAudienceModel.go @@ -66,9 +66,9 @@ type CreateAudienceModelInput struct { // - Do not use aws:, AWS:, or any upper or lowercase combination of such as a // prefix for keys as it is reserved for AWS use. You cannot edit or delete tag // keys with this prefix. Values can have this prefix. If a tag value has aws as - // its prefix but the key does not, then Forecast considers it to be a user tag and - // will count against the limit of 50 tags. Tags with only the key prefix of aws do - // not count against your tags per resource limit. + // its prefix but the key does not, then Clean Rooms ML considers it to be a user + // tag and will count against the limit of 50 tags. Tags with only the key prefix + // of aws do not count against your tags per resource limit. Tags map[string]string // The end date and time of the training window. diff --git a/service/cleanroomsml/api_op_CreateConfiguredAudienceModel.go b/service/cleanroomsml/api_op_CreateConfiguredAudienceModel.go index 90ebd4be002..5eb8834ac49 100644 --- a/service/cleanroomsml/api_op_CreateConfiguredAudienceModel.go +++ b/service/cleanroomsml/api_op_CreateConfiguredAudienceModel.go @@ -76,7 +76,7 @@ type CreateConfiguredAudienceModelInput struct { Description *string // The minimum number of users from the seed audience that must match with users - // in the training data of the audience model. + // in the training data of the audience model. The default value is 500. MinMatchingSeedSize *int32 // The optional metadata that you apply to the resource to help you categorize and @@ -95,9 +95,9 @@ type CreateConfiguredAudienceModelInput struct { // - Do not use aws:, AWS:, or any upper or lowercase combination of such as a // prefix for keys as it is reserved for AWS use. You cannot edit or delete tag // keys with this prefix. Values can have this prefix. If a tag value has aws as - // its prefix but the key does not, then Forecast considers it to be a user tag and - // will count against the limit of 50 tags. Tags with only the key prefix of aws do - // not count against your tags per resource limit. + // its prefix but the key does not, then Clean Rooms ML considers it to be a user + // tag and will count against the limit of 50 tags. Tags with only the key prefix + // of aws do not count against your tags per resource limit. Tags map[string]string noSmithyDocumentSerde diff --git a/service/cleanroomsml/api_op_CreateTrainingDataset.go b/service/cleanroomsml/api_op_CreateTrainingDataset.go index c045d57b7b5..221d37d7777 100644 --- a/service/cleanroomsml/api_op_CreateTrainingDataset.go +++ b/service/cleanroomsml/api_op_CreateTrainingDataset.go @@ -11,9 +11,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Defines the information necessary to create a training dataset, or seed -// audience. In Clean Rooms ML, the TrainingDataset is metadata that points to a -// Glue table, which is read only during AudienceModel creation. +// Defines the information necessary to create a training dataset. In Clean Rooms +// ML, the TrainingDataset is metadata that points to a Glue table, which is read +// only during AudienceModel creation. func (c *Client) CreateTrainingDataset(ctx context.Context, params *CreateTrainingDatasetInput, optFns ...func(*Options)) (*CreateTrainingDatasetOutput, error) { if params == nil { params = &CreateTrainingDatasetInput{} diff --git a/service/cleanroomsml/api_op_GetAudienceGenerationJob.go b/service/cleanroomsml/api_op_GetAudienceGenerationJob.go index 0f6863cf774..9e621100d96 100644 --- a/service/cleanroomsml/api_op_GetAudienceGenerationJob.go +++ b/service/cleanroomsml/api_op_GetAudienceGenerationJob.go @@ -87,7 +87,8 @@ type GetAudienceGenerationJobOutput struct { // the seed. IncludeSeedInOutput *bool - // The relevance scores for different audience sizes. + // The relevance scores for different audience sizes and the recall score of the + // generated audience. Metrics *types.AudienceQualityMetrics // The seed audience that was used for this audience generation job. This field diff --git a/service/cleanroomsml/api_op_GetAudienceModel.go b/service/cleanroomsml/api_op_GetAudienceModel.go index 679da78eb52..6ae136bbabf 100644 --- a/service/cleanroomsml/api_op_GetAudienceModel.go +++ b/service/cleanroomsml/api_op_GetAudienceModel.go @@ -77,9 +77,6 @@ type GetAudienceModelOutput struct { // The KMS key ARN used for the audience model. KmsKeyArn *string - // Accuracy metrics for the model. - Metrics []types.AudienceModelMetric - // Details about the status of the audience model. StatusDetails *types.StatusDetails diff --git a/service/cleanroomsml/api_op_StartAudienceGenerationJob.go b/service/cleanroomsml/api_op_StartAudienceGenerationJob.go index 2870679b2bd..8d1194e5460 100644 --- a/service/cleanroomsml/api_op_StartAudienceGenerationJob.go +++ b/service/cleanroomsml/api_op_StartAudienceGenerationJob.go @@ -70,9 +70,9 @@ type StartAudienceGenerationJobInput struct { // - Do not use aws:, AWS:, or any upper or lowercase combination of such as a // prefix for keys as it is reserved for AWS use. You cannot edit or delete tag // keys with this prefix. Values can have this prefix. If a tag value has aws as - // its prefix but the key does not, then Forecast considers it to be a user tag and - // will count against the limit of 50 tags. Tags with only the key prefix of aws do - // not count against your tags per resource limit. + // its prefix but the key does not, then Clean Rooms ML considers it to be a user + // tag and will count against the limit of 50 tags. Tags with only the key prefix + // of aws do not count against your tags per resource limit. Tags map[string]string noSmithyDocumentSerde diff --git a/service/cleanroomsml/api_op_TagResource.go b/service/cleanroomsml/api_op_TagResource.go index 088115695e5..6d19eafe150 100644 --- a/service/cleanroomsml/api_op_TagResource.go +++ b/service/cleanroomsml/api_op_TagResource.go @@ -49,9 +49,9 @@ type TagResourceInput struct { // - Do not use aws:, AWS:, or any upper or lowercase combination of such as a // prefix for keys as it is reserved for AWS use. You cannot edit or delete tag // keys with this prefix. Values can have this prefix. If a tag value has aws as - // its prefix but the key does not, then Forecast considers it to be a user tag and - // will count against the limit of 50 tags. Tags with only the key prefix of aws do - // not count against your tags per resource limit. + // its prefix but the key does not, then Clean Rooms considers it to be a user tag + // and will count against the limit of 50 tags. Tags with only the key prefix of + // aws do not count against your tags per resource limit. // // This member is required. Tags map[string]string diff --git a/service/cleanroomsml/deserializers.go b/service/cleanroomsml/deserializers.go index ef6161f078a..37c5c0b9d08 100644 --- a/service/cleanroomsml/deserializers.go +++ b/service/cleanroomsml/deserializers.go @@ -1436,11 +1436,6 @@ func awsRestjson1_deserializeOpDocumentGetAudienceModelOutput(v **GetAudienceMod sv.KmsKeyArn = ptr.String(jtv) } - case "metrics": - if err := awsRestjson1_deserializeDocumentAudienceModelMetrics(&sv.Metrics, value); err != nil { - return err - } - case "name": if value != nil { jtv, ok := value.(string) @@ -4542,127 +4537,6 @@ func awsRestjson1_deserializeDocumentAudienceModelList(v *[]types.AudienceModelS return nil } -func awsRestjson1_deserializeDocumentAudienceModelMetric(v **types.AudienceModelMetric, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AudienceModelMetric - if *v == nil { - sv = &types.AudienceModelMetric{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "forTopKItemPredictions": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ForTopKItemPredictions = ptr.Int32(int32(i64)) - } - - case "type": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AudienceModelMetricType to be of type string, got %T instead", value) - } - sv.Type = types.AudienceModelMetricType(jtv) - } - - case "value": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.Value = ptr.Float64(f64) - - case string: - var f64 float64 - switch { - case strings.EqualFold(jtv, "NaN"): - f64 = math.NaN() - - case strings.EqualFold(jtv, "Infinity"): - f64 = math.Inf(1) - - case strings.EqualFold(jtv, "-Infinity"): - f64 = math.Inf(-1) - - default: - return fmt.Errorf("unknown JSON number value: %s", jtv) - - } - sv.Value = ptr.Float64(f64) - - default: - return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) - - } - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAudienceModelMetrics(v *[]types.AudienceModelMetric, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.AudienceModelMetric - if *v == nil { - cv = []types.AudienceModelMetric{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.AudienceModelMetric - destAddr := &col - if err := awsRestjson1_deserializeDocumentAudienceModelMetric(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - func awsRestjson1_deserializeDocumentAudienceModelSummary(v **types.AudienceModelSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -4787,6 +4661,40 @@ func awsRestjson1_deserializeDocumentAudienceQualityMetrics(v **types.AudienceQu for key, value := range shape { switch key { + case "recallMetric": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.RecallMetric = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.RecallMetric = ptr.Float64(f64) + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + + } + } + case "relevanceMetrics": if err := awsRestjson1_deserializeDocumentRelevanceMetrics(&sv.RelevanceMetrics, value); err != nil { return err diff --git a/service/cleanroomsml/types/enums.go b/service/cleanroomsml/types/enums.go index 206af12f736..19dd49df025 100644 --- a/service/cleanroomsml/types/enums.go +++ b/service/cleanroomsml/types/enums.go @@ -52,28 +52,6 @@ func (AudienceGenerationJobStatus) Values() []AudienceGenerationJobStatus { } } -type AudienceModelMetricType string - -// Enum values for AudienceModelMetricType -const ( - AudienceModelMetricTypeNormalizedDiscountedCumulativeGain AudienceModelMetricType = "NORMALIZED_DISCOUNTED_CUMULATIVE_GAIN" - AudienceModelMetricTypeMeanReciprocalRank AudienceModelMetricType = "MEAN_RECIPROCAL_RANK" - AudienceModelMetricTypePrecision AudienceModelMetricType = "PRECISION" - AudienceModelMetricTypeRecall AudienceModelMetricType = "RECALL" -) - -// Values returns all known values for AudienceModelMetricType. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (AudienceModelMetricType) Values() []AudienceModelMetricType { - return []AudienceModelMetricType{ - "NORMALIZED_DISCOUNTED_CUMULATIVE_GAIN", - "MEAN_RECIPROCAL_RANK", - "PRECISION", - "RECALL", - } -} - type AudienceModelStatus string // Enum values for AudienceModelStatus diff --git a/service/cleanroomsml/types/errors.go b/service/cleanroomsml/types/errors.go index e806f3b822b..ae57e21b825 100644 --- a/service/cleanroomsml/types/errors.go +++ b/service/cleanroomsml/types/errors.go @@ -33,7 +33,8 @@ func (e *AccessDeniedException) ErrorCode() string { } func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// A resource with that name already exists in this region. +// You can't complete this action because another resource depends on this +// resource. type ConflictException struct { Message *string diff --git a/service/cleanroomsml/types/types.go b/service/cleanroomsml/types/types.go index aa93ec3d755..e64ebdcc56a 100644 --- a/service/cleanroomsml/types/types.go +++ b/service/cleanroomsml/types/types.go @@ -64,12 +64,16 @@ type AudienceExportJobSummary struct { noSmithyDocumentSerde } -// Defines the Amazon S3 bucket where the training data for the configured +// Defines the Amazon S3 bucket where the seed audience for the generating // audience is stored. type AudienceGenerationJobDataSource struct { - // The Amazon S3 bucket where the training data for the configured audience is - // stored. + // Defines the Amazon S3 bucket where the seed audience for the generating + // audience is stored. A valid data source is a JSON line file in the following + // format: {"user_id": "111111"} + // {"user_id": "222222"} + // + // ... // // This member is required. DataSource *S3ConfigMap @@ -129,27 +133,6 @@ type AudienceGenerationJobSummary struct { noSmithyDocumentSerde } -// The audience model metrics. -type AudienceModelMetric struct { - - // The number of users that were used to generate these model metrics. - // - // This member is required. - ForTopKItemPredictions *int32 - - // The audience model metric. - // - // This member is required. - Type AudienceModelMetricType - - // The value of the audience model metric - // - // This member is required. - Value *float64 - - noSmithyDocumentSerde -} - // Information about the audience model. type AudienceModelSummary struct { @@ -198,6 +181,14 @@ type AudienceQualityMetrics struct { // This member is required. RelevanceMetrics []RelevanceMetric + // The recall score of the generated audience. Recall is the percentage of the + // most similar users (by default, the most similar 20%) from a sample of the + // training data that are included in the seed audience by the audience generation + // job. Values range from 0-1, larger values indicate a better audience. A recall + // value approximately equal to the maximum bin size indicates that the audience + // model is equivalent to random selection. + RecallMetric *float64 + noSmithyDocumentSerde } diff --git a/service/cloudformation/api_op_ActivateType.go b/service/cloudformation/api_op_ActivateType.go index a123c6b1c5d..16ebb75f018 100644 --- a/service/cloudformation/api_op_ActivateType.go +++ b/service/cloudformation/api_op_ActivateType.go @@ -16,7 +16,7 @@ import ( // in the CloudFormation User Guide. Once you have activated a public third-party // extension in your account and Region, use SetTypeConfiguration (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_SetTypeConfiguration.html) // to specify configuration properties for the extension. For more information, see -// Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-register.html#registry-set-configuration) +// Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-private.html#registry-set-configuration) // in the CloudFormation User Guide. func (c *Client) ActivateType(ctx context.Context, params *ActivateTypeInput, optFns ...func(*Options)) (*ActivateTypeOutput, error) { if params == nil { diff --git a/service/cloudformation/api_op_BatchDescribeTypeConfigurations.go b/service/cloudformation/api_op_BatchDescribeTypeConfigurations.go index 2e469fff020..3667dfd1457 100644 --- a/service/cloudformation/api_op_BatchDescribeTypeConfigurations.go +++ b/service/cloudformation/api_op_BatchDescribeTypeConfigurations.go @@ -13,7 +13,7 @@ import ( // Returns configuration data for the specified CloudFormation extensions, from // the CloudFormation registry for the account and Region. For more information, -// see Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-register.html#registry-set-configuration) +// see Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-private.html#registry-set-configuration) // in the CloudFormation User Guide. func (c *Client) BatchDescribeTypeConfigurations(ctx context.Context, params *BatchDescribeTypeConfigurationsInput, optFns ...func(*Options)) (*BatchDescribeTypeConfigurationsOutput, error) { if params == nil { diff --git a/service/cloudformation/api_op_DescribeType.go b/service/cloudformation/api_op_DescribeType.go index d121aecdc50..5c2b367be91 100644 --- a/service/cloudformation/api_op_DescribeType.go +++ b/service/cloudformation/api_op_DescribeType.go @@ -77,7 +77,7 @@ type DescribeTypeOutput struct { // A JSON string that represent the current configuration data for the extension // in this account and Region. To set the configuration data for an extension, use // SetTypeConfiguration (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_SetTypeConfiguration.html) - // . For more information, see Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-register.html#registry-set-configuration) + // . For more information, see Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-private.html#registry-set-configuration) // in the CloudFormation User Guide. ConfigurationSchema *string diff --git a/service/cloudformation/api_op_RegisterType.go b/service/cloudformation/api_op_RegisterType.go index 2ca65028388..af3f4998c43 100644 --- a/service/cloudformation/api_op_RegisterType.go +++ b/service/cloudformation/api_op_RegisterType.go @@ -28,7 +28,7 @@ import ( // to monitor the progress of the registration request. Once you have registered a // private extension in your account and Region, use SetTypeConfiguration (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_SetTypeConfiguration.html) // to specify configuration properties for the extension. For more information, see -// Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-register.html#registry-set-configuration) +// Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-private.html#registry-set-configuration) // in the CloudFormation User Guide. func (c *Client) RegisterType(ctx context.Context, params *RegisterTypeInput, optFns ...func(*Options)) (*RegisterTypeOutput, error) { if params == nil { diff --git a/service/cloudformation/api_op_SetTypeConfiguration.go b/service/cloudformation/api_op_SetTypeConfiguration.go index 296d3935cf6..6f8e3c23c4a 100644 --- a/service/cloudformation/api_op_SetTypeConfiguration.go +++ b/service/cloudformation/api_op_SetTypeConfiguration.go @@ -14,7 +14,7 @@ import ( // Specifies the configuration data for a registered CloudFormation extension, in // the given account and Region. To view the current configuration data for an // extension, refer to the ConfigurationSchema element of DescribeType (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeType.html) -// . For more information, see Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-register.html#registry-set-configuration) +// . For more information, see Configuring extensions at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-private.html#registry-set-configuration) // in the CloudFormation User Guide. It's strongly recommended that you use dynamic // references to restrict sensitive configuration definitions, such as third-party // credentials. For more details on dynamic references, see Using dynamic diff --git a/service/cloudformation/deserializers.go b/service/cloudformation/deserializers.go index 3c59ccf4c10..6d43180ce75 100644 --- a/service/cloudformation/deserializers.go +++ b/service/cloudformation/deserializers.go @@ -14271,6 +14271,19 @@ func awsAwsquery_deserializeDocumentResourceChange(v **types.ResourceChange, dec sv.PhysicalResourceId = ptr.String(xtv) } + case strings.EqualFold("PolicyAction", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PolicyAction = types.PolicyAction(xtv) + } + case strings.EqualFold("Replacement", t.Name.Local): val, err := decoder.Value() if err != nil { diff --git a/service/cloudformation/types/enums.go b/service/cloudformation/types/enums.go index b26dd48cfce..b7aa47e9290 100644 --- a/service/cloudformation/types/enums.go +++ b/service/cloudformation/types/enums.go @@ -707,6 +707,32 @@ func (PermissionModels) Values() []PermissionModels { } } +type PolicyAction string + +// Enum values for PolicyAction +const ( + PolicyActionDelete PolicyAction = "Delete" + PolicyActionRetain PolicyAction = "Retain" + PolicyActionSnapshot PolicyAction = "Snapshot" + PolicyActionReplaceAndDelete PolicyAction = "ReplaceAndDelete" + PolicyActionReplaceAndRetain PolicyAction = "ReplaceAndRetain" + PolicyActionReplaceAndSnapshot PolicyAction = "ReplaceAndSnapshot" +) + +// Values returns all known values for PolicyAction. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (PolicyAction) Values() []PolicyAction { + return []PolicyAction{ + "Delete", + "Retain", + "Snapshot", + "ReplaceAndDelete", + "ReplaceAndRetain", + "ReplaceAndSnapshot", + } +} + type ProvisioningType string // Enum values for ProvisioningType diff --git a/service/cloudformation/types/types.go b/service/cloudformation/types/types.go index f46afa7a0dd..f31e59aed12 100644 --- a/service/cloudformation/types/types.go +++ b/service/cloudformation/types/types.go @@ -558,6 +558,17 @@ type ResourceChange struct { // have physical IDs because they haven't been created. PhysicalResourceId *string + // The action that will be taken on the physical resource when the change set is + // executed. + // - Delete The resource will be deleted. + // - Retain The resource will be retained. + // - Snapshot The resource will have a snapshot taken. + // - ReplaceAndDelete The resource will be replaced and then deleted. + // - ReplaceAndRetain The resource will be replaced and then retained. + // - ReplaceAndSnapshot The resource will be replaced and then have a snapshot + // taken. + PolicyAction PolicyAction + // For the Modify action, indicates whether CloudFormation will replace the // resource by creating a new one and deleting the old one. This value depends on // the value of the RequiresRecreation property in the ResourceTargetDefinition @@ -2417,7 +2428,7 @@ type TemplateSummaryConfig struct { // Detailed information concerning the specification of a CloudFormation extension // in a given account and Region. For more information, see Configuring extensions -// at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-register.html#registry-set-configuration) +// at the account level (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/registry-private.html#registry-set-configuration) // in the CloudFormation User Guide. type TypeConfigurationDetails struct { diff --git a/service/datazone/api_op_CreateAsset.go b/service/datazone/api_op_CreateAsset.go index 6c224e7996b..f355d79e8be 100644 --- a/service/datazone/api_op_CreateAsset.go +++ b/service/datazone/api_op_CreateAsset.go @@ -139,6 +139,9 @@ type CreateAssetOutput struct { // The glossary terms that are attached to the created asset. GlossaryTerms []string + // The latest data point that was imported into the time series form for the asset. + LatestTimeSeriesDataPointFormsOutput []types.TimeSeriesDataPointSummaryFormOutput + // The details of an asset published in an Amazon DataZone catalog. Listing *types.AssetListingDetails diff --git a/service/datazone/api_op_CreateAssetRevision.go b/service/datazone/api_op_CreateAssetRevision.go index 73d4be00212..33d25c2f932 100644 --- a/service/datazone/api_op_CreateAssetRevision.go +++ b/service/datazone/api_op_CreateAssetRevision.go @@ -132,6 +132,9 @@ type CreateAssetRevisionOutput struct { // The glossary terms that were attached to the asset as part of asset revision. GlossaryTerms []string + // The latest data point that was imported into the time series form for the asset. + LatestTimeSeriesDataPointFormsOutput []types.TimeSeriesDataPointSummaryFormOutput + // The details of an asset published in an Amazon DataZone catalog. Listing *types.AssetListingDetails diff --git a/service/datazone/api_op_DeleteTimeSeriesDataPoints.go b/service/datazone/api_op_DeleteTimeSeriesDataPoints.go new file mode 100644 index 00000000000..2c18e51ee8e --- /dev/null +++ b/service/datazone/api_op_DeleteTimeSeriesDataPoints.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package datazone + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/datazone/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified time series form for the specified asset. +func (c *Client) DeleteTimeSeriesDataPoints(ctx context.Context, params *DeleteTimeSeriesDataPointsInput, optFns ...func(*Options)) (*DeleteTimeSeriesDataPointsOutput, error) { + if params == nil { + params = &DeleteTimeSeriesDataPointsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteTimeSeriesDataPoints", params, optFns, c.addOperationDeleteTimeSeriesDataPointsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteTimeSeriesDataPointsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteTimeSeriesDataPointsInput struct { + + // The ID of the Amazon DataZone domain that houses the asset for which you want + // to delete a time series form. + // + // This member is required. + DomainIdentifier *string + + // The ID of the asset for which you want to delete a time series form. + // + // This member is required. + EntityIdentifier *string + + // The type of the asset for which you want to delete a time series form. + // + // This member is required. + EntityType types.TimeSeriesEntityType + + // The name of the time series form that you want to delete. + // + // This member is required. + FormName *string + + // A unique, case-sensitive identifier to ensure idempotency of the request. This + // field is automatically populated if not provided. + ClientToken *string + + noSmithyDocumentSerde +} + +type DeleteTimeSeriesDataPointsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteTimeSeriesDataPointsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteTimeSeriesDataPoints{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteTimeSeriesDataPoints{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteTimeSeriesDataPoints"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addIdempotencyToken_opDeleteTimeSeriesDataPointsMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteTimeSeriesDataPointsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteTimeSeriesDataPoints(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpDeleteTimeSeriesDataPoints struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpDeleteTimeSeriesDataPoints) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpDeleteTimeSeriesDataPoints) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*DeleteTimeSeriesDataPointsInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *DeleteTimeSeriesDataPointsInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opDeleteTimeSeriesDataPointsMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpDeleteTimeSeriesDataPoints{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opDeleteTimeSeriesDataPoints(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteTimeSeriesDataPoints", + } +} diff --git a/service/datazone/api_op_GetAsset.go b/service/datazone/api_op_GetAsset.go index c570a145b5d..6043c73f0e8 100644 --- a/service/datazone/api_op_GetAsset.go +++ b/service/datazone/api_op_GetAsset.go @@ -109,6 +109,9 @@ type GetAssetOutput struct { // The business glossary terms attached to the asset. GlossaryTerms []string + // The latest data point that was imported into the time series form for the asset. + LatestTimeSeriesDataPointFormsOutput []types.TimeSeriesDataPointSummaryFormOutput + // The listing of the asset. Listing *types.AssetListingDetails diff --git a/service/datazone/api_op_GetTimeSeriesDataPoint.go b/service/datazone/api_op_GetTimeSeriesDataPoint.go new file mode 100644 index 00000000000..9279149daf8 --- /dev/null +++ b/service/datazone/api_op_GetTimeSeriesDataPoint.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package datazone + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/datazone/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the existing data point for the asset. +func (c *Client) GetTimeSeriesDataPoint(ctx context.Context, params *GetTimeSeriesDataPointInput, optFns ...func(*Options)) (*GetTimeSeriesDataPointOutput, error) { + if params == nil { + params = &GetTimeSeriesDataPointInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetTimeSeriesDataPoint", params, optFns, c.addOperationGetTimeSeriesDataPointMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetTimeSeriesDataPointOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetTimeSeriesDataPointInput struct { + + // The ID of the Amazon DataZone domain that houses the asset for which you want + // to get the data point. + // + // This member is required. + DomainIdentifier *string + + // The ID of the asset for which you want to get the data point. + // + // This member is required. + EntityIdentifier *string + + // The type of the asset for which you want to get the data point. + // + // This member is required. + EntityType types.TimeSeriesEntityType + + // The name of the time series form that houses the data point that you want to + // get. + // + // This member is required. + FormName *string + + // The ID of the data point that you want to get. + // + // This member is required. + Identifier *string + + noSmithyDocumentSerde +} + +type GetTimeSeriesDataPointOutput struct { + + // The ID of the Amazon DataZone domain that houses the asset data point that you + // want to get. + DomainId *string + + // The ID of the asset for which you want to get the data point. + EntityId *string + + // The type of the asset for which you want to get the data point. + EntityType types.TimeSeriesEntityType + + // The time series form that houses the data point that you want to get. + Form *types.TimeSeriesDataPointFormOutput + + // The name of the time series form that houses the data point that you want to + // get. + FormName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetTimeSeriesDataPointMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetTimeSeriesDataPoint{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetTimeSeriesDataPoint{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetTimeSeriesDataPoint"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpGetTimeSeriesDataPointValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetTimeSeriesDataPoint(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetTimeSeriesDataPoint(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetTimeSeriesDataPoint", + } +} diff --git a/service/datazone/api_op_ListTimeSeriesDataPoints.go b/service/datazone/api_op_ListTimeSeriesDataPoints.go new file mode 100644 index 00000000000..386e6063ccd --- /dev/null +++ b/service/datazone/api_op_ListTimeSeriesDataPoints.go @@ -0,0 +1,276 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package datazone + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/datazone/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Lists time series data points. +func (c *Client) ListTimeSeriesDataPoints(ctx context.Context, params *ListTimeSeriesDataPointsInput, optFns ...func(*Options)) (*ListTimeSeriesDataPointsOutput, error) { + if params == nil { + params = &ListTimeSeriesDataPointsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTimeSeriesDataPoints", params, optFns, c.addOperationListTimeSeriesDataPointsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTimeSeriesDataPointsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTimeSeriesDataPointsInput struct { + + // The ID of the Amazon DataZone domain that houses the assets for which you want + // to list time series data points. + // + // This member is required. + DomainIdentifier *string + + // The ID of the asset for which you want to list data points. + // + // This member is required. + EntityIdentifier *string + + // The type of the asset for which you want to list data points. + // + // This member is required. + EntityType types.TimeSeriesEntityType + + // The name of the time series data points form. + // + // This member is required. + FormName *string + + // The timestamp at which the data points that you wanted to list ended. + EndedAt *time.Time + + // The maximum number of data points to return in a single call to + // ListTimeSeriesDataPoints. When the number of data points to be listed is greater + // than the value of MaxResults, the response contains a NextToken value that you + // can use in a subsequent call to ListTimeSeriesDataPoints to list the next set of + // data points. + MaxResults *int32 + + // When the number of data points is greater than the default value for the + // MaxResults parameter, or if you explicitly specify a value for MaxResults that + // is less than the number of data points, the response includes a pagination token + // named NextToken. You can specify this NextToken value in a subsequent call to + // ListTimeSeriesDataPoints to list the next set of data points. + NextToken *string + + // The timestamp at which the data points that you want to list started. + StartedAt *time.Time + + noSmithyDocumentSerde +} + +type ListTimeSeriesDataPointsOutput struct { + + // The results of the ListTimeSeriesDataPoints action. + Items []types.TimeSeriesDataPointSummaryFormOutput + + // When the number of data points is greater than the default value for the + // MaxResults parameter, or if you explicitly specify a value for MaxResults that + // is less than the number of data points, the response includes a pagination token + // named NextToken. You can specify this NextToken value in a subsequent call to + // ListTimeSeriesDataPoints to list the next set of data points. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTimeSeriesDataPointsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTimeSeriesDataPoints{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTimeSeriesDataPoints{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTimeSeriesDataPoints"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpListTimeSeriesDataPointsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTimeSeriesDataPoints(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListTimeSeriesDataPointsAPIClient is a client that implements the +// ListTimeSeriesDataPoints operation. +type ListTimeSeriesDataPointsAPIClient interface { + ListTimeSeriesDataPoints(context.Context, *ListTimeSeriesDataPointsInput, ...func(*Options)) (*ListTimeSeriesDataPointsOutput, error) +} + +var _ ListTimeSeriesDataPointsAPIClient = (*Client)(nil) + +// ListTimeSeriesDataPointsPaginatorOptions is the paginator options for +// ListTimeSeriesDataPoints +type ListTimeSeriesDataPointsPaginatorOptions struct { + // The maximum number of data points to return in a single call to + // ListTimeSeriesDataPoints. When the number of data points to be listed is greater + // than the value of MaxResults, the response contains a NextToken value that you + // can use in a subsequent call to ListTimeSeriesDataPoints to list the next set of + // data points. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTimeSeriesDataPointsPaginator is a paginator for ListTimeSeriesDataPoints +type ListTimeSeriesDataPointsPaginator struct { + options ListTimeSeriesDataPointsPaginatorOptions + client ListTimeSeriesDataPointsAPIClient + params *ListTimeSeriesDataPointsInput + nextToken *string + firstPage bool +} + +// NewListTimeSeriesDataPointsPaginator returns a new +// ListTimeSeriesDataPointsPaginator +func NewListTimeSeriesDataPointsPaginator(client ListTimeSeriesDataPointsAPIClient, params *ListTimeSeriesDataPointsInput, optFns ...func(*ListTimeSeriesDataPointsPaginatorOptions)) *ListTimeSeriesDataPointsPaginator { + if params == nil { + params = &ListTimeSeriesDataPointsInput{} + } + + options := ListTimeSeriesDataPointsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTimeSeriesDataPointsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTimeSeriesDataPointsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTimeSeriesDataPoints page. +func (p *ListTimeSeriesDataPointsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTimeSeriesDataPointsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListTimeSeriesDataPoints(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListTimeSeriesDataPoints(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTimeSeriesDataPoints", + } +} diff --git a/service/datazone/api_op_PostTimeSeriesDataPoints.go b/service/datazone/api_op_PostTimeSeriesDataPoints.go new file mode 100644 index 00000000000..d76829b8c47 --- /dev/null +++ b/service/datazone/api_op_PostTimeSeriesDataPoints.go @@ -0,0 +1,202 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package datazone + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/datazone/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Posts time series data points to Amazon DataZone for the specified asset. +func (c *Client) PostTimeSeriesDataPoints(ctx context.Context, params *PostTimeSeriesDataPointsInput, optFns ...func(*Options)) (*PostTimeSeriesDataPointsOutput, error) { + if params == nil { + params = &PostTimeSeriesDataPointsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PostTimeSeriesDataPoints", params, optFns, c.addOperationPostTimeSeriesDataPointsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PostTimeSeriesDataPointsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PostTimeSeriesDataPointsInput struct { + + // The ID of the Amazon DataZone domain in which you want to post time series data + // points. + // + // This member is required. + DomainIdentifier *string + + // The ID of the asset for which you want to post time series data points. + // + // This member is required. + EntityIdentifier *string + + // The type of the asset for which you want to post data points. + // + // This member is required. + EntityType types.TimeSeriesEntityType + + // The forms that contain the data points that you want to post. + // + // This member is required. + Forms []types.TimeSeriesDataPointFormInput + + // A unique, case-sensitive identifier that is provided to ensure the idempotency + // of the request. + ClientToken *string + + noSmithyDocumentSerde +} + +type PostTimeSeriesDataPointsOutput struct { + + // The ID of the Amazon DataZone domain in which you want to post time series data + // points. + DomainId *string + + // The ID of the asset for which you want to post time series data points. + EntityId *string + + // The type of the asset for which you want to post data points. + EntityType types.TimeSeriesEntityType + + // The forms that contain the data points that you have posted. + Forms []types.TimeSeriesDataPointFormOutput + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPostTimeSeriesDataPointsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpPostTimeSeriesDataPoints{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpPostTimeSeriesDataPoints{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PostTimeSeriesDataPoints"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addIdempotencyToken_opPostTimeSeriesDataPointsMiddleware(stack, options); err != nil { + return err + } + if err = addOpPostTimeSeriesDataPointsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPostTimeSeriesDataPoints(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpPostTimeSeriesDataPoints struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpPostTimeSeriesDataPoints) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpPostTimeSeriesDataPoints) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*PostTimeSeriesDataPointsInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *PostTimeSeriesDataPointsInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opPostTimeSeriesDataPointsMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpPostTimeSeriesDataPoints{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opPostTimeSeriesDataPoints(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PostTimeSeriesDataPoints", + } +} diff --git a/service/datazone/deserializers.go b/service/datazone/deserializers.go index 5da67bae146..c9812863d7f 100644 --- a/service/datazone/deserializers.go +++ b/service/datazone/deserializers.go @@ -1097,6 +1097,11 @@ func awsRestjson1_deserializeOpDocumentCreateAssetOutput(v **CreateAssetOutput, sv.Id = ptr.String(jtv) } + case "latestTimeSeriesDataPointFormsOutput": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(&sv.LatestTimeSeriesDataPointFormsOutput, value); err != nil { + return err + } + case "listing": if err := awsRestjson1_deserializeDocumentAssetListingDetails(&sv.Listing, value); err != nil { return err @@ -1413,6 +1418,11 @@ func awsRestjson1_deserializeOpDocumentCreateAssetRevisionOutput(v **CreateAsset sv.Id = ptr.String(jtv) } + case "latestTimeSeriesDataPointFormsOutput": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(&sv.LatestTimeSeriesDataPointFormsOutput, value); err != nil { + return err + } + case "listing": if err := awsRestjson1_deserializeDocumentAssetListingDetails(&sv.Listing, value); err != nil { return err @@ -7455,6 +7465,104 @@ func awsRestjson1_deserializeOpErrorDeleteSubscriptionTarget(response *smithyhtt } } +type awsRestjson1_deserializeOpDeleteTimeSeriesDataPoints struct { +} + +func (*awsRestjson1_deserializeOpDeleteTimeSeriesDataPoints) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteTimeSeriesDataPoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteTimeSeriesDataPoints(response, &metadata) + } + output := &DeleteTimeSeriesDataPointsOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteTimeSeriesDataPoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestjson1_deserializeOpGetAsset struct { } @@ -7699,6 +7807,11 @@ func awsRestjson1_deserializeOpDocumentGetAssetOutput(v **GetAssetOutput, value sv.Id = ptr.String(jtv) } + case "latestTimeSeriesDataPointFormsOutput": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(&sv.LatestTimeSeriesDataPointFormsOutput, value); err != nil { + return err + } + case "listing": if err := awsRestjson1_deserializeDocumentAssetListingDetails(&sv.Listing, value); err != nil { return err @@ -13012,14 +13125,14 @@ func awsRestjson1_deserializeOpDocumentGetSubscriptionTargetOutput(v **GetSubscr return nil } -type awsRestjson1_deserializeOpGetUserProfile struct { +type awsRestjson1_deserializeOpGetTimeSeriesDataPoint struct { } -func (*awsRestjson1_deserializeOpGetUserProfile) ID() string { +func (*awsRestjson1_deserializeOpGetTimeSeriesDataPoint) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetUserProfile) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetTimeSeriesDataPoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13033,9 +13146,9 @@ func (m *awsRestjson1_deserializeOpGetUserProfile) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetUserProfile(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetTimeSeriesDataPoint(response, &metadata) } - output := &GetUserProfileOutput{} + output := &GetTimeSeriesDataPointOutput{} out.Result = output var buff [1024]byte @@ -13056,7 +13169,7 @@ func (m *awsRestjson1_deserializeOpGetUserProfile) HandleDeserialize(ctx context return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentGetUserProfileOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentGetTimeSeriesDataPointOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13069,7 +13182,7 @@ func (m *awsRestjson1_deserializeOpGetUserProfile) HandleDeserialize(ctx context return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetUserProfile(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetTimeSeriesDataPoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13138,7 +13251,7 @@ func awsRestjson1_deserializeOpErrorGetUserProfile(response *smithyhttp.Response } } -func awsRestjson1_deserializeOpDocumentGetUserProfileOutput(v **GetUserProfileOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetTimeSeriesDataPointOutput(v **GetTimeSeriesDataPointOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13151,20 +13264,15 @@ func awsRestjson1_deserializeOpDocumentGetUserProfileOutput(v **GetUserProfileOu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetUserProfileOutput + var sv *GetTimeSeriesDataPointOutput if *v == nil { - sv = &GetUserProfileOutput{} + sv = &GetTimeSeriesDataPointOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "details": - if err := awsRestjson1_deserializeDocumentUserProfileDetails(&sv.Details, value); err != nil { - return err - } - case "domainId": if value != nil { jtv, ok := value.(string) @@ -13174,31 +13282,36 @@ func awsRestjson1_deserializeOpDocumentGetUserProfileOutput(v **GetUserProfileOu sv.DomainId = ptr.String(jtv) } - case "id": + case "entityId": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected UserProfileId to be of type string, got %T instead", value) + return fmt.Errorf("expected EntityId to be of type string, got %T instead", value) } - sv.Id = ptr.String(jtv) + sv.EntityId = ptr.String(jtv) } - case "status": + case "entityType": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected UserProfileStatus to be of type string, got %T instead", value) + return fmt.Errorf("expected TimeSeriesEntityType to be of type string, got %T instead", value) } - sv.Status = types.UserProfileStatus(jtv) + sv.EntityType = types.TimeSeriesEntityType(jtv) } - case "type": + case "form": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointFormOutput(&sv.Form, value); err != nil { + return err + } + + case "formName": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected UserProfileType to be of type string, got %T instead", value) + return fmt.Errorf("expected TimeSeriesFormName to be of type string, got %T instead", value) } - sv.Type = types.UserProfileType(jtv) + sv.FormName = ptr.String(jtv) } default: @@ -13210,14 +13323,14 @@ func awsRestjson1_deserializeOpDocumentGetUserProfileOutput(v **GetUserProfileOu return nil } -type awsRestjson1_deserializeOpListAssetRevisions struct { +type awsRestjson1_deserializeOpGetUserProfile struct { } -func (*awsRestjson1_deserializeOpListAssetRevisions) ID() string { +func (*awsRestjson1_deserializeOpGetUserProfile) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListAssetRevisions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetUserProfile) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13231,9 +13344,9 @@ func (m *awsRestjson1_deserializeOpListAssetRevisions) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListAssetRevisions(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetUserProfile(response, &metadata) } - output := &ListAssetRevisionsOutput{} + output := &GetUserProfileOutput{} out.Result = output var buff [1024]byte @@ -13254,7 +13367,7 @@ func (m *awsRestjson1_deserializeOpListAssetRevisions) HandleDeserialize(ctx con return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListAssetRevisionsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentGetUserProfileOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13267,7 +13380,7 @@ func (m *awsRestjson1_deserializeOpListAssetRevisions) HandleDeserialize(ctx con return out, metadata, err } -func awsRestjson1_deserializeOpErrorListAssetRevisions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetUserProfile(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13336,7 +13449,7 @@ func awsRestjson1_deserializeOpErrorListAssetRevisions(response *smithyhttp.Resp } } -func awsRestjson1_deserializeOpDocumentListAssetRevisionsOutput(v **ListAssetRevisionsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetUserProfileOutput(v **GetUserProfileOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13349,27 +13462,54 @@ func awsRestjson1_deserializeOpDocumentListAssetRevisionsOutput(v **ListAssetRev return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListAssetRevisionsOutput + var sv *GetUserProfileOutput if *v == nil { - sv = &ListAssetRevisionsOutput{} + sv = &GetUserProfileOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "items": - if err := awsRestjson1_deserializeDocumentAssetRevisions(&sv.Items, value); err != nil { + case "details": + if err := awsRestjson1_deserializeDocumentUserProfileDetails(&sv.Details, value); err != nil { return err } - case "nextToken": + case "domainId": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected PaginationToken to be of type string, got %T instead", value) + return fmt.Errorf("expected DomainId to be of type string, got %T instead", value) } - sv.NextToken = ptr.String(jtv) + sv.DomainId = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserProfileId to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserProfileStatus to be of type string, got %T instead", value) + } + sv.Status = types.UserProfileStatus(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserProfileType to be of type string, got %T instead", value) + } + sv.Type = types.UserProfileType(jtv) } default: @@ -13381,14 +13521,14 @@ func awsRestjson1_deserializeOpDocumentListAssetRevisionsOutput(v **ListAssetRev return nil } -type awsRestjson1_deserializeOpListDataSourceRunActivities struct { +type awsRestjson1_deserializeOpListAssetRevisions struct { } -func (*awsRestjson1_deserializeOpListDataSourceRunActivities) ID() string { +func (*awsRestjson1_deserializeOpListAssetRevisions) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListDataSourceRunActivities) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListAssetRevisions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13402,9 +13542,9 @@ func (m *awsRestjson1_deserializeOpListDataSourceRunActivities) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListDataSourceRunActivities(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListAssetRevisions(response, &metadata) } - output := &ListDataSourceRunActivitiesOutput{} + output := &ListAssetRevisionsOutput{} out.Result = output var buff [1024]byte @@ -13425,7 +13565,7 @@ func (m *awsRestjson1_deserializeOpListDataSourceRunActivities) HandleDeserializ return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListDataSourceRunActivitiesOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListAssetRevisionsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13438,7 +13578,7 @@ func (m *awsRestjson1_deserializeOpListDataSourceRunActivities) HandleDeserializ return out, metadata, err } -func awsRestjson1_deserializeOpErrorListDataSourceRunActivities(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListAssetRevisions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13482,18 +13622,12 @@ func awsRestjson1_deserializeOpErrorListDataSourceRunActivities(response *smithy case strings.EqualFold("AccessDeniedException", errorCode): return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - case strings.EqualFold("ConflictException", errorCode): - return awsRestjson1_deserializeErrorConflictException(response, errorBody) - case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) case strings.EqualFold("ResourceNotFoundException", errorCode): return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ServiceQuotaExceededException", errorCode): - return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) - case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -13513,7 +13647,7 @@ func awsRestjson1_deserializeOpErrorListDataSourceRunActivities(response *smithy } } -func awsRestjson1_deserializeOpDocumentListDataSourceRunActivitiesOutput(v **ListDataSourceRunActivitiesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListAssetRevisionsOutput(v **ListAssetRevisionsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13526,9 +13660,9 @@ func awsRestjson1_deserializeOpDocumentListDataSourceRunActivitiesOutput(v **Lis return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListDataSourceRunActivitiesOutput + var sv *ListAssetRevisionsOutput if *v == nil { - sv = &ListDataSourceRunActivitiesOutput{} + sv = &ListAssetRevisionsOutput{} } else { sv = *v } @@ -13536,7 +13670,7 @@ func awsRestjson1_deserializeOpDocumentListDataSourceRunActivitiesOutput(v **Lis for key, value := range shape { switch key { case "items": - if err := awsRestjson1_deserializeDocumentDataSourceRunActivities(&sv.Items, value); err != nil { + if err := awsRestjson1_deserializeDocumentAssetRevisions(&sv.Items, value); err != nil { return err } @@ -13558,14 +13692,14 @@ func awsRestjson1_deserializeOpDocumentListDataSourceRunActivitiesOutput(v **Lis return nil } -type awsRestjson1_deserializeOpListDataSourceRuns struct { +type awsRestjson1_deserializeOpListDataSourceRunActivities struct { } -func (*awsRestjson1_deserializeOpListDataSourceRuns) ID() string { +func (*awsRestjson1_deserializeOpListDataSourceRunActivities) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListDataSourceRuns) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListDataSourceRunActivities) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13579,9 +13713,9 @@ func (m *awsRestjson1_deserializeOpListDataSourceRuns) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListDataSourceRuns(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListDataSourceRunActivities(response, &metadata) } - output := &ListDataSourceRunsOutput{} + output := &ListDataSourceRunActivitiesOutput{} out.Result = output var buff [1024]byte @@ -13602,7 +13736,7 @@ func (m *awsRestjson1_deserializeOpListDataSourceRuns) HandleDeserialize(ctx con return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListDataSourceRunsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListDataSourceRunActivitiesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13615,7 +13749,7 @@ func (m *awsRestjson1_deserializeOpListDataSourceRuns) HandleDeserialize(ctx con return out, metadata, err } -func awsRestjson1_deserializeOpErrorListDataSourceRuns(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListDataSourceRunActivities(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13690,7 +13824,7 @@ func awsRestjson1_deserializeOpErrorListDataSourceRuns(response *smithyhttp.Resp } } -func awsRestjson1_deserializeOpDocumentListDataSourceRunsOutput(v **ListDataSourceRunsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListDataSourceRunActivitiesOutput(v **ListDataSourceRunActivitiesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13703,9 +13837,9 @@ func awsRestjson1_deserializeOpDocumentListDataSourceRunsOutput(v **ListDataSour return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListDataSourceRunsOutput + var sv *ListDataSourceRunActivitiesOutput if *v == nil { - sv = &ListDataSourceRunsOutput{} + sv = &ListDataSourceRunActivitiesOutput{} } else { sv = *v } @@ -13713,7 +13847,7 @@ func awsRestjson1_deserializeOpDocumentListDataSourceRunsOutput(v **ListDataSour for key, value := range shape { switch key { case "items": - if err := awsRestjson1_deserializeDocumentDataSourceRunSummaries(&sv.Items, value); err != nil { + if err := awsRestjson1_deserializeDocumentDataSourceRunActivities(&sv.Items, value); err != nil { return err } @@ -13735,14 +13869,14 @@ func awsRestjson1_deserializeOpDocumentListDataSourceRunsOutput(v **ListDataSour return nil } -type awsRestjson1_deserializeOpListDataSources struct { +type awsRestjson1_deserializeOpListDataSourceRuns struct { } -func (*awsRestjson1_deserializeOpListDataSources) ID() string { +func (*awsRestjson1_deserializeOpListDataSourceRuns) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListDataSources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListDataSourceRuns) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13756,9 +13890,9 @@ func (m *awsRestjson1_deserializeOpListDataSources) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListDataSources(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListDataSourceRuns(response, &metadata) } - output := &ListDataSourcesOutput{} + output := &ListDataSourceRunsOutput{} out.Result = output var buff [1024]byte @@ -13779,7 +13913,7 @@ func (m *awsRestjson1_deserializeOpListDataSources) HandleDeserialize(ctx contex return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListDataSourcesOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListDataSourceRunsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13792,7 +13926,7 @@ func (m *awsRestjson1_deserializeOpListDataSources) HandleDeserialize(ctx contex return out, metadata, err } -func awsRestjson1_deserializeOpErrorListDataSources(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListDataSourceRuns(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13867,7 +14001,184 @@ func awsRestjson1_deserializeOpErrorListDataSources(response *smithyhttp.Respons } } -func awsRestjson1_deserializeOpDocumentListDataSourcesOutput(v **ListDataSourcesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListDataSourceRunsOutput(v **ListDataSourceRunsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListDataSourceRunsOutput + if *v == nil { + sv = &ListDataSourceRunsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "items": + if err := awsRestjson1_deserializeDocumentDataSourceRunSummaries(&sv.Items, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PaginationToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListDataSources struct { +} + +func (*awsRestjson1_deserializeOpListDataSources) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListDataSources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListDataSources(response, &metadata) + } + output := &ListDataSourcesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListDataSourcesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListDataSources(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListDataSourcesOutput(v **ListDataSourcesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -16278,10 +16589,376 @@ func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor sv = *v } - for key, value := range shape { - switch key { - case "tags": - if err := awsRestjson1_deserializeDocumentTags(&sv.Tags, value); err != nil { + for key, value := range shape { + switch key { + case "tags": + if err := awsRestjson1_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTimeSeriesDataPoints struct { +} + +func (*awsRestjson1_deserializeOpListTimeSeriesDataPoints) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTimeSeriesDataPoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTimeSeriesDataPoints(response, &metadata) + } + output := &ListTimeSeriesDataPointsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTimeSeriesDataPointsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTimeSeriesDataPoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTimeSeriesDataPointsOutput(v **ListTimeSeriesDataPointsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTimeSeriesDataPointsOutput + if *v == nil { + sv = &ListTimeSeriesDataPointsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "items": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(&sv.Items, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PaginationToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpPostTimeSeriesDataPoints struct { +} + +func (*awsRestjson1_deserializeOpPostTimeSeriesDataPoints) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpPostTimeSeriesDataPoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorPostTimeSeriesDataPoints(response, &metadata) + } + output := &PostTimeSeriesDataPointsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentPostTimeSeriesDataPointsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorPostTimeSeriesDataPoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentPostTimeSeriesDataPointsOutput(v **PostTimeSeriesDataPointsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PostTimeSeriesDataPointsOutput + if *v == nil { + sv = &PostTimeSeriesDataPointsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "domainId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DomainId to be of type string, got %T instead", value) + } + sv.DomainId = ptr.String(jtv) + } + + case "entityId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityId to be of type string, got %T instead", value) + } + sv.EntityId = ptr.String(jtv) + } + + case "entityType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TimeSeriesEntityType to be of type string, got %T instead", value) + } + sv.EntityType = types.TimeSeriesEntityType(jtv) + } + + case "forms": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointFormOutputList(&sv.Forms, value); err != nil { return err } @@ -22482,6 +23159,11 @@ func awsRestjson1_deserializeDocumentAssetItemAdditionalAttributes(v **types.Ass return err } + case "latestTimeSeriesDataPointFormsOutput": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(&sv.LatestTimeSeriesDataPointFormsOutput, value); err != nil { + return err + } + case "readOnlyFormsOutput": if err := awsRestjson1_deserializeDocumentFormOutputList(&sv.ReadOnlyFormsOutput, value); err != nil { return err @@ -22575,6 +23257,11 @@ func awsRestjson1_deserializeDocumentAssetListing(v **types.AssetListing, value return err } + case "latestTimeSeriesDataPointForms": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(&sv.LatestTimeSeriesDataPointForms, value); err != nil { + return err + } + case "owningProjectId": if value != nil { jtv, ok := value.(string) @@ -22820,6 +23507,11 @@ func awsRestjson1_deserializeDocumentAssetListingItemAdditionalAttributes(v **ty sv.Forms = ptr.String(jtv) } + case "latestTimeSeriesDataPointForms": + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(&sv.LatestTimeSeriesDataPointForms, value); err != nil { + return err + } + default: _, _ = key, value @@ -26358,6 +27050,15 @@ func awsRestjson1_deserializeDocumentGlueRunConfigurationOutput(v **types.GlueRu sv.AccountId = ptr.String(jtv) } + case "autoImportDataQualityResult": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.AutoImportDataQualityResult = ptr.Bool(jtv) + } + case "dataAccessRole": if value != nil { jtv, ok := value.(string) @@ -30277,6 +30978,258 @@ func awsRestjson1_deserializeDocumentThrottlingException(v **types.ThrottlingExc return nil } +func awsRestjson1_deserializeDocumentTimeSeriesDataPointFormOutput(v **types.TimeSeriesDataPointFormOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TimeSeriesDataPointFormOutput + if *v == nil { + sv = &types.TimeSeriesDataPointFormOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "content": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Content = ptr.String(jtv) + } + + case "formName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TimeSeriesFormName to be of type string, got %T instead", value) + } + sv.FormName = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataPointIdentifier to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "timestamp": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Timestamp = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "typeIdentifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FormTypeIdentifier to be of type string, got %T instead", value) + } + sv.TypeIdentifier = ptr.String(jtv) + } + + case "typeRevision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Revision to be of type string, got %T instead", value) + } + sv.TypeRevision = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTimeSeriesDataPointFormOutputList(v *[]types.TimeSeriesDataPointFormOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TimeSeriesDataPointFormOutput + if *v == nil { + cv = []types.TimeSeriesDataPointFormOutput{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TimeSeriesDataPointFormOutput + destAddr := &col + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointFormOutput(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutput(v **types.TimeSeriesDataPointSummaryFormOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TimeSeriesDataPointSummaryFormOutput + if *v == nil { + sv = &types.TimeSeriesDataPointSummaryFormOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "contentSummary": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ContentSummary = ptr.String(jtv) + } + + case "formName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TimeSeriesFormName to be of type string, got %T instead", value) + } + sv.FormName = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataPointIdentifier to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "timestamp": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Timestamp = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "typeIdentifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FormTypeIdentifier to be of type string, got %T instead", value) + } + sv.TypeIdentifier = ptr.String(jtv) + } + + case "typeRevision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Revision to be of type string, got %T instead", value) + } + sv.TypeRevision = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutputList(v *[]types.TimeSeriesDataPointSummaryFormOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TimeSeriesDataPointSummaryFormOutput + if *v == nil { + cv = []types.TimeSeriesDataPointSummaryFormOutput{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TimeSeriesDataPointSummaryFormOutput + destAddr := &col + if err := awsRestjson1_deserializeDocumentTimeSeriesDataPointSummaryFormOutput(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentTopic(v **types.Topic, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/datazone/generated.json b/service/datazone/generated.json index dde2bd43913..4ca316bf38e 100644 --- a/service/datazone/generated.json +++ b/service/datazone/generated.json @@ -46,6 +46,7 @@ "api_op_DeleteSubscriptionGrant.go", "api_op_DeleteSubscriptionRequest.go", "api_op_DeleteSubscriptionTarget.go", + "api_op_DeleteTimeSeriesDataPoints.go", "api_op_GetAsset.go", "api_op_GetAssetType.go", "api_op_GetDataSource.go", @@ -67,6 +68,7 @@ "api_op_GetSubscriptionGrant.go", "api_op_GetSubscriptionRequestDetails.go", "api_op_GetSubscriptionTarget.go", + "api_op_GetTimeSeriesDataPoint.go", "api_op_GetUserProfile.go", "api_op_ListAssetRevisions.go", "api_op_ListDataSourceRunActivities.go", @@ -86,6 +88,8 @@ "api_op_ListSubscriptionTargets.go", "api_op_ListSubscriptions.go", "api_op_ListTagsForResource.go", + "api_op_ListTimeSeriesDataPoints.go", + "api_op_PostTimeSeriesDataPoints.go", "api_op_PutEnvironmentBlueprintConfiguration.go", "api_op_RejectPredictions.go", "api_op_RejectSubscriptionRequest.go", diff --git a/service/datazone/serializers.go b/service/datazone/serializers.go index 930424c90cf..077fbbe9b83 100644 --- a/service/datazone/serializers.go +++ b/service/datazone/serializers.go @@ -3673,6 +3673,97 @@ func awsRestjson1_serializeOpHttpBindingsDeleteSubscriptionTargetInput(v *Delete return nil } +type awsRestjson1_serializeOpDeleteTimeSeriesDataPoints struct { +} + +func (*awsRestjson1_serializeOpDeleteTimeSeriesDataPoints) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteTimeSeriesDataPoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteTimeSeriesDataPointsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v2/domains/{domainIdentifier}/entities/{entityType}/{entityIdentifier}/time-series-data-points") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteTimeSeriesDataPointsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteTimeSeriesDataPointsInput(v *DeleteTimeSeriesDataPointsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ClientToken != nil { + encoder.SetQuery("clientToken").String(*v.ClientToken) + } + + if v.DomainIdentifier == nil || len(*v.DomainIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member domainIdentifier must not be empty")} + } + if v.DomainIdentifier != nil { + if err := encoder.SetURI("domainIdentifier").String(*v.DomainIdentifier); err != nil { + return err + } + } + + if v.EntityIdentifier == nil || len(*v.EntityIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityIdentifier must not be empty")} + } + if v.EntityIdentifier != nil { + if err := encoder.SetURI("entityIdentifier").String(*v.EntityIdentifier); err != nil { + return err + } + } + + if len(v.EntityType) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityType must not be empty")} + } + if len(v.EntityType) > 0 { + if err := encoder.SetURI("entityType").String(string(v.EntityType)); err != nil { + return err + } + } + + if v.FormName != nil { + encoder.SetQuery("formName").String(*v.FormName) + } + + return nil +} + type awsRestjson1_serializeOpGetAsset struct { } @@ -5234,6 +5325,102 @@ func awsRestjson1_serializeOpHttpBindingsGetSubscriptionTargetInput(v *GetSubscr return nil } +type awsRestjson1_serializeOpGetTimeSeriesDataPoint struct { +} + +func (*awsRestjson1_serializeOpGetTimeSeriesDataPoint) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetTimeSeriesDataPoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetTimeSeriesDataPointInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v2/domains/{domainIdentifier}/entities/{entityType}/{entityIdentifier}/time-series-data-points/{identifier}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetTimeSeriesDataPointInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetTimeSeriesDataPointInput(v *GetTimeSeriesDataPointInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.DomainIdentifier == nil || len(*v.DomainIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member domainIdentifier must not be empty")} + } + if v.DomainIdentifier != nil { + if err := encoder.SetURI("domainIdentifier").String(*v.DomainIdentifier); err != nil { + return err + } + } + + if v.EntityIdentifier == nil || len(*v.EntityIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityIdentifier must not be empty")} + } + if v.EntityIdentifier != nil { + if err := encoder.SetURI("entityIdentifier").String(*v.EntityIdentifier); err != nil { + return err + } + } + + if len(v.EntityType) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityType must not be empty")} + } + if len(v.EntityType) > 0 { + if err := encoder.SetURI("entityType").String(string(v.EntityType)); err != nil { + return err + } + } + + if v.FormName != nil { + encoder.SetQuery("formName").String(*v.FormName) + } + + if v.Identifier == nil || len(*v.Identifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member identifier must not be empty")} + } + if v.Identifier != nil { + if err := encoder.SetURI("identifier").String(*v.Identifier); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpGetUserProfile struct { } @@ -6880,6 +7067,222 @@ func awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(v *ListTagsFor return nil } +type awsRestjson1_serializeOpListTimeSeriesDataPoints struct { +} + +func (*awsRestjson1_serializeOpListTimeSeriesDataPoints) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTimeSeriesDataPoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTimeSeriesDataPointsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v2/domains/{domainIdentifier}/entities/{entityType}/{entityIdentifier}/time-series-data-points") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListTimeSeriesDataPointsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTimeSeriesDataPointsInput(v *ListTimeSeriesDataPointsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.DomainIdentifier == nil || len(*v.DomainIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member domainIdentifier must not be empty")} + } + if v.DomainIdentifier != nil { + if err := encoder.SetURI("domainIdentifier").String(*v.DomainIdentifier); err != nil { + return err + } + } + + if v.EndedAt != nil { + encoder.SetQuery("endedAt").String(smithytime.FormatDateTime(*v.EndedAt)) + } + + if v.EntityIdentifier == nil || len(*v.EntityIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityIdentifier must not be empty")} + } + if v.EntityIdentifier != nil { + if err := encoder.SetURI("entityIdentifier").String(*v.EntityIdentifier); err != nil { + return err + } + } + + if len(v.EntityType) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityType must not be empty")} + } + if len(v.EntityType) > 0 { + if err := encoder.SetURI("entityType").String(string(v.EntityType)); err != nil { + return err + } + } + + if v.FormName != nil { + encoder.SetQuery("formName").String(*v.FormName) + } + + if v.MaxResults != nil { + encoder.SetQuery("maxResults").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + if v.StartedAt != nil { + encoder.SetQuery("startedAt").String(smithytime.FormatDateTime(*v.StartedAt)) + } + + return nil +} + +type awsRestjson1_serializeOpPostTimeSeriesDataPoints struct { +} + +func (*awsRestjson1_serializeOpPostTimeSeriesDataPoints) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpPostTimeSeriesDataPoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PostTimeSeriesDataPointsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v2/domains/{domainIdentifier}/entities/{entityType}/{entityIdentifier}/time-series-data-points") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsPostTimeSeriesDataPointsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentPostTimeSeriesDataPointsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsPostTimeSeriesDataPointsInput(v *PostTimeSeriesDataPointsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.DomainIdentifier == nil || len(*v.DomainIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member domainIdentifier must not be empty")} + } + if v.DomainIdentifier != nil { + if err := encoder.SetURI("domainIdentifier").String(*v.DomainIdentifier); err != nil { + return err + } + } + + if v.EntityIdentifier == nil || len(*v.EntityIdentifier) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityIdentifier must not be empty")} + } + if v.EntityIdentifier != nil { + if err := encoder.SetURI("entityIdentifier").String(*v.EntityIdentifier); err != nil { + return err + } + } + + if len(v.EntityType) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member entityType must not be empty")} + } + if len(v.EntityType) > 0 { + if err := encoder.SetURI("entityType").String(string(v.EntityType)); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentPostTimeSeriesDataPointsInput(v *PostTimeSeriesDataPointsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientToken != nil { + ok := object.Key("clientToken") + ok.String(*v.ClientToken) + } + + if v.Forms != nil { + ok := object.Key("forms") + if err := awsRestjson1_serializeDocumentTimeSeriesDataPointFormInputList(v.Forms, ok); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpPutEnvironmentBlueprintConfiguration struct { } @@ -10028,6 +10431,11 @@ func awsRestjson1_serializeDocumentGlueRunConfigurationInput(v *types.GlueRunCon object := value.Object() defer object.Close() + if v.AutoImportDataQualityResult != nil { + ok := object.Key("autoImportDataQualityResult") + ok.Boolean(*v.AutoImportDataQualityResult) + } + if v.DataAccessRole != nil { ok := object.Key("dataAccessRole") ok.String(*v.DataAccessRole) @@ -10611,3 +11019,48 @@ func awsRestjson1_serializeDocumentTermRelations(v *types.TermRelations, value s return nil } + +func awsRestjson1_serializeDocumentTimeSeriesDataPointFormInput(v *types.TimeSeriesDataPointFormInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Content != nil { + ok := object.Key("content") + ok.String(*v.Content) + } + + if v.FormName != nil { + ok := object.Key("formName") + ok.String(*v.FormName) + } + + if v.Timestamp != nil { + ok := object.Key("timestamp") + ok.Double(smithytime.FormatEpochSeconds(*v.Timestamp)) + } + + if v.TypeIdentifier != nil { + ok := object.Key("typeIdentifier") + ok.String(*v.TypeIdentifier) + } + + if v.TypeRevision != nil { + ok := object.Key("typeRevision") + ok.String(*v.TypeRevision) + } + + return nil +} + +func awsRestjson1_serializeDocumentTimeSeriesDataPointFormInputList(v []types.TimeSeriesDataPointFormInput, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentTimeSeriesDataPointFormInput(&v[i], av); err != nil { + return err + } + } + return nil +} diff --git a/service/datazone/snapshot_test.go b/service/datazone/snapshot_test.go index 2c30f64956b..ea317605427 100644 --- a/service/datazone/snapshot_test.go +++ b/service/datazone/snapshot_test.go @@ -518,6 +518,18 @@ func TestCheckSnapshot_DeleteSubscriptionTarget(t *testing.T) { } } +func TestCheckSnapshot_DeleteTimeSeriesDataPoints(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteTimeSeriesDataPoints(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteTimeSeriesDataPoints") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_GetAsset(t *testing.T) { svc := New(Options{}) _, err := svc.GetAsset(context.Background(), nil, func(o *Options) { @@ -770,6 +782,18 @@ func TestCheckSnapshot_GetSubscriptionTarget(t *testing.T) { } } +func TestCheckSnapshot_GetTimeSeriesDataPoint(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetTimeSeriesDataPoint(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "GetTimeSeriesDataPoint") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_GetUserProfile(t *testing.T) { svc := New(Options{}) _, err := svc.GetUserProfile(context.Background(), nil, func(o *Options) { @@ -998,6 +1022,30 @@ func TestCheckSnapshot_ListTagsForResource(t *testing.T) { } } +func TestCheckSnapshot_ListTimeSeriesDataPoints(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTimeSeriesDataPoints(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListTimeSeriesDataPoints") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_PostTimeSeriesDataPoints(t *testing.T) { + svc := New(Options{}) + _, err := svc.PostTimeSeriesDataPoints(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "PostTimeSeriesDataPoints") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_PutEnvironmentBlueprintConfiguration(t *testing.T) { svc := New(Options{}) _, err := svc.PutEnvironmentBlueprintConfiguration(context.Background(), nil, func(o *Options) { @@ -1753,6 +1801,18 @@ func TestUpdateSnapshot_DeleteSubscriptionTarget(t *testing.T) { } } +func TestUpdateSnapshot_DeleteTimeSeriesDataPoints(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteTimeSeriesDataPoints(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteTimeSeriesDataPoints") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_GetAsset(t *testing.T) { svc := New(Options{}) _, err := svc.GetAsset(context.Background(), nil, func(o *Options) { @@ -2005,6 +2065,18 @@ func TestUpdateSnapshot_GetSubscriptionTarget(t *testing.T) { } } +func TestUpdateSnapshot_GetTimeSeriesDataPoint(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetTimeSeriesDataPoint(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "GetTimeSeriesDataPoint") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_GetUserProfile(t *testing.T) { svc := New(Options{}) _, err := svc.GetUserProfile(context.Background(), nil, func(o *Options) { @@ -2233,6 +2305,30 @@ func TestUpdateSnapshot_ListTagsForResource(t *testing.T) { } } +func TestUpdateSnapshot_ListTimeSeriesDataPoints(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTimeSeriesDataPoints(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListTimeSeriesDataPoints") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_PostTimeSeriesDataPoints(t *testing.T) { + svc := New(Options{}) + _, err := svc.PostTimeSeriesDataPoints(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "PostTimeSeriesDataPoints") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_PutEnvironmentBlueprintConfiguration(t *testing.T) { svc := New(Options{}) _, err := svc.PutEnvironmentBlueprintConfiguration(context.Background(), nil, func(o *Options) { diff --git a/service/datazone/types/enums.go b/service/datazone/types/enums.go index 6cb19b7c92a..a70fa86d589 100644 --- a/service/datazone/types/enums.go +++ b/service/datazone/types/enums.go @@ -652,7 +652,8 @@ type SearchOutputAdditionalAttribute string // Enum values for SearchOutputAdditionalAttribute const ( - SearchOutputAdditionalAttributeForms SearchOutputAdditionalAttribute = "FORMS" + SearchOutputAdditionalAttributeForms SearchOutputAdditionalAttribute = "FORMS" + SearchOutputAdditionalAttributeTimeSeriesDataPointForms SearchOutputAdditionalAttribute = "TIME_SERIES_DATA_POINT_FORMS" ) // Values returns all known values for SearchOutputAdditionalAttribute. Note that @@ -662,6 +663,7 @@ const ( func (SearchOutputAdditionalAttribute) Values() []SearchOutputAdditionalAttribute { return []SearchOutputAdditionalAttribute{ "FORMS", + "TIME_SERIES_DATA_POINT_FORMS", } } @@ -834,6 +836,24 @@ func (TaskStatus) Values() []TaskStatus { } } +type TimeSeriesEntityType string + +// Enum values for TimeSeriesEntityType +const ( + TimeSeriesEntityTypeAsset TimeSeriesEntityType = "ASSET" + TimeSeriesEntityTypeListing TimeSeriesEntityType = "LISTING" +) + +// Values returns all known values for TimeSeriesEntityType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (TimeSeriesEntityType) Values() []TimeSeriesEntityType { + return []TimeSeriesEntityType{ + "ASSET", + "LISTING", + } +} + type Timezone string // Enum values for Timezone diff --git a/service/datazone/types/types.go b/service/datazone/types/types.go index c640c27c821..374d1c85888 100644 --- a/service/datazone/types/types.go +++ b/service/datazone/types/types.go @@ -108,6 +108,10 @@ type AssetItemAdditionalAttributes struct { // The forms included in the additional attributes of an inventory asset. FormsOutput []FormOutput + // The latest time series data points forms included in the additional attributes + // of an asset. + LatestTimeSeriesDataPointFormsOutput []TimeSeriesDataPointSummaryFormOutput + // The read-only forms included in the additional attributes of an inventory asset. ReadOnlyFormsOutput []FormOutput @@ -136,6 +140,10 @@ type AssetListing struct { // The glossary terms attached to an asset published in an Amazon DataZone catalog. GlossaryTerms []DetailedGlossaryTerm + // The latest time series data points forms included in the additional attributes + // of an asset. + LatestTimeSeriesDataPointForms []TimeSeriesDataPointSummaryFormOutput + // The identifier of the project where an asset published in an Amazon DataZone // catalog exists. OwningProjectId *string @@ -211,6 +219,10 @@ type AssetListingItemAdditionalAttributes struct { // The metadata forms that form additional attributes of the metadata asset. Forms *string + // The latest time series data points forms included in the additional attributes + // of an asset. + LatestTimeSeriesDataPointForms []TimeSeriesDataPointSummaryFormOutput + noSmithyDocumentSerde } @@ -1299,6 +1311,10 @@ type GlueRunConfigurationInput struct { // This member is required. RelationalFilterConfigurations []RelationalFilterConfiguration + // Specifies whether to automatically import data quality metrics as part of the + // data source run. + AutoImportDataQualityResult *bool + // The data access role included in the configuration details of the Amazon Web // Services Glue data source. DataAccessRole *string @@ -1319,6 +1335,10 @@ type GlueRunConfigurationOutput struct { // Amazon Web Services Glue data source. AccountId *string + // Specifies whether to automatically import data quality metrics as part of the + // data source run. + AutoImportDataQualityResult *bool + // The data access role included in the configuration details of the Amazon Web // Services Glue data source. DataAccessRole *string @@ -2627,6 +2647,93 @@ type TermRelations struct { noSmithyDocumentSerde } +// The time series data points form. +type TimeSeriesDataPointFormInput struct { + + // The name of the time series data points form. + // + // This member is required. + FormName *string + + // The timestamp of the time series data points form. + // + // This member is required. + Timestamp *time.Time + + // The ID of the type of the time series data points form. + // + // This member is required. + TypeIdentifier *string + + // The content of the time series data points form. + Content *string + + // The revision type of the time series data points form. + TypeRevision *string + + noSmithyDocumentSerde +} + +// The time series data points form. +type TimeSeriesDataPointFormOutput struct { + + // The name of the time series data points form. + // + // This member is required. + FormName *string + + // The timestamp of the time series data points form. + // + // This member is required. + Timestamp *time.Time + + // The ID of the type of the time series data points form. + // + // This member is required. + TypeIdentifier *string + + // The content of the time series data points form. + Content *string + + // The ID of the time series data points form. + Id *string + + // The revision type of the time series data points form. + TypeRevision *string + + noSmithyDocumentSerde +} + +// The summary of the time series data points form. +type TimeSeriesDataPointSummaryFormOutput struct { + + // The name of the time series data points summary form. + // + // This member is required. + FormName *string + + // The timestamp of the time series data points summary form. + // + // This member is required. + Timestamp *time.Time + + // The type ID of the time series data points summary form. + // + // This member is required. + TypeIdentifier *string + + // The content of the summary of the time series data points form. + ContentSummary *string + + // The ID of the time series data points summary form. + Id *string + + // The type revision of the time series data points summary form. + TypeRevision *string + + noSmithyDocumentSerde +} + // The topic of the notification. type Topic struct { diff --git a/service/datazone/validators.go b/service/datazone/validators.go index 0c7f19cf533..8b7308497a4 100644 --- a/service/datazone/validators.go +++ b/service/datazone/validators.go @@ -770,6 +770,26 @@ func (m *validateOpDeleteSubscriptionTarget) HandleInitialize(ctx context.Contex return next.HandleInitialize(ctx, in) } +type validateOpDeleteTimeSeriesDataPoints struct { +} + +func (*validateOpDeleteTimeSeriesDataPoints) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteTimeSeriesDataPoints) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteTimeSeriesDataPointsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteTimeSeriesDataPointsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetAsset struct { } @@ -1190,6 +1210,26 @@ func (m *validateOpGetSubscriptionTarget) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpGetTimeSeriesDataPoint struct { +} + +func (*validateOpGetTimeSeriesDataPoint) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetTimeSeriesDataPoint) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetTimeSeriesDataPointInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetTimeSeriesDataPointInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetUserProfile struct { } @@ -1550,6 +1590,46 @@ func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpListTimeSeriesDataPoints struct { +} + +func (*validateOpListTimeSeriesDataPoints) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTimeSeriesDataPoints) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTimeSeriesDataPointsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTimeSeriesDataPointsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPostTimeSeriesDataPoints struct { +} + +func (*validateOpPostTimeSeriesDataPoints) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPostTimeSeriesDataPoints) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PostTimeSeriesDataPointsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPostTimeSeriesDataPointsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpPutEnvironmentBlueprintConfiguration struct { } @@ -2202,6 +2282,10 @@ func addOpDeleteSubscriptionTargetValidationMiddleware(stack *middleware.Stack) return stack.Initialize.Add(&validateOpDeleteSubscriptionTarget{}, middleware.After) } +func addOpDeleteTimeSeriesDataPointsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteTimeSeriesDataPoints{}, middleware.After) +} + func addOpGetAssetValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetAsset{}, middleware.After) } @@ -2286,6 +2370,10 @@ func addOpGetSubscriptionTargetValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpGetSubscriptionTarget{}, middleware.After) } +func addOpGetTimeSeriesDataPointValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetTimeSeriesDataPoint{}, middleware.After) +} + func addOpGetUserProfileValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetUserProfile{}, middleware.After) } @@ -2358,6 +2446,14 @@ func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) } +func addOpListTimeSeriesDataPointsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTimeSeriesDataPoints{}, middleware.After) +} + +func addOpPostTimeSeriesDataPointsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPostTimeSeriesDataPoints{}, middleware.After) +} + func addOpPutEnvironmentBlueprintConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpPutEnvironmentBlueprintConfiguration{}, middleware.After) } @@ -3075,6 +3171,44 @@ func validateSubscriptionTargetForms(v []types.SubscriptionTargetForm) error { } } +func validateTimeSeriesDataPointFormInput(v *types.TimeSeriesDataPointFormInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TimeSeriesDataPointFormInput"} + if v.FormName == nil { + invalidParams.Add(smithy.NewErrParamRequired("FormName")) + } + if v.TypeIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("TypeIdentifier")) + } + if v.Timestamp == nil { + invalidParams.Add(smithy.NewErrParamRequired("Timestamp")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTimeSeriesDataPointFormInputList(v []types.TimeSeriesDataPointFormInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TimeSeriesDataPointFormInputList"} + for i := range v { + if err := validateTimeSeriesDataPointFormInput(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpAcceptPredictionsInput(v *AcceptPredictionsInput) error { if v == nil { return nil @@ -3898,6 +4032,30 @@ func validateOpDeleteSubscriptionTargetInput(v *DeleteSubscriptionTargetInput) e } } +func validateOpDeleteTimeSeriesDataPointsInput(v *DeleteTimeSeriesDataPointsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteTimeSeriesDataPointsInput"} + if v.DomainIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DomainIdentifier")) + } + if v.EntityIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("EntityIdentifier")) + } + if len(v.EntityType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EntityType")) + } + if v.FormName == nil { + invalidParams.Add(smithy.NewErrParamRequired("FormName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetAssetInput(v *GetAssetInput) error { if v == nil { return nil @@ -4273,6 +4431,33 @@ func validateOpGetSubscriptionTargetInput(v *GetSubscriptionTargetInput) error { } } +func validateOpGetTimeSeriesDataPointInput(v *GetTimeSeriesDataPointInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetTimeSeriesDataPointInput"} + if v.DomainIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DomainIdentifier")) + } + if v.EntityIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("EntityIdentifier")) + } + if len(v.EntityType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EntityType")) + } + if v.Identifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("Identifier")) + } + if v.FormName == nil { + invalidParams.Add(smithy.NewErrParamRequired("FormName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetUserProfileInput(v *GetUserProfileInput) error { if v == nil { return nil @@ -4570,6 +4755,58 @@ func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { } } +func validateOpListTimeSeriesDataPointsInput(v *ListTimeSeriesDataPointsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTimeSeriesDataPointsInput"} + if v.DomainIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DomainIdentifier")) + } + if v.EntityIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("EntityIdentifier")) + } + if len(v.EntityType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EntityType")) + } + if v.FormName == nil { + invalidParams.Add(smithy.NewErrParamRequired("FormName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPostTimeSeriesDataPointsInput(v *PostTimeSeriesDataPointsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PostTimeSeriesDataPointsInput"} + if v.DomainIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DomainIdentifier")) + } + if v.EntityIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("EntityIdentifier")) + } + if len(v.EntityType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EntityType")) + } + if v.Forms == nil { + invalidParams.Add(smithy.NewErrParamRequired("Forms")) + } else if v.Forms != nil { + if err := validateTimeSeriesDataPointFormInputList(v.Forms); err != nil { + invalidParams.AddNested("Forms", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpPutEnvironmentBlueprintConfigurationInput(v *PutEnvironmentBlueprintConfigurationInput) error { if v == nil { return nil diff --git a/service/docdb/api_op_SwitchoverGlobalCluster.go b/service/docdb/api_op_SwitchoverGlobalCluster.go new file mode 100644 index 00000000000..a4f03e5775e --- /dev/null +++ b/service/docdb/api_op_SwitchoverGlobalCluster.go @@ -0,0 +1,155 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package docdb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/docdb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Switches over the specified secondary Amazon DocumentDB cluster to be the new +// primary Amazon DocumentDB cluster in the global database cluster. +func (c *Client) SwitchoverGlobalCluster(ctx context.Context, params *SwitchoverGlobalClusterInput, optFns ...func(*Options)) (*SwitchoverGlobalClusterOutput, error) { + if params == nil { + params = &SwitchoverGlobalClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "SwitchoverGlobalCluster", params, optFns, c.addOperationSwitchoverGlobalClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SwitchoverGlobalClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SwitchoverGlobalClusterInput struct { + + // The identifier of the Amazon DocumentDB global database cluster to switch over. + // The identifier is the unique key assigned by the user when the cluster is + // created. In other words, it's the name of the global cluster. This parameter + // isn’t case-sensitive. Constraints: + // - Must match the identifier of an existing global cluster (Amazon DocumentDB + // global database). + // - Minimum length of 1. Maximum length of 255. + // Pattern: [A-Za-z][0-9A-Za-z-:._]* + // + // This member is required. + GlobalClusterIdentifier *string + + // The identifier of the secondary Amazon DocumentDB cluster to promote to the new + // primary for the global database cluster. Use the Amazon Resource Name (ARN) for + // the identifier so that Amazon DocumentDB can locate the cluster in its Amazon + // Web Services region. Constraints: + // - Must match the identifier of an existing secondary cluster. + // - Minimum length of 1. Maximum length of 255. + // Pattern: [A-Za-z][0-9A-Za-z-:._]* + // + // This member is required. + TargetDbClusterIdentifier *string + + noSmithyDocumentSerde +} + +type SwitchoverGlobalClusterOutput struct { + + // A data type representing an Amazon DocumentDB global cluster. + GlobalCluster *types.GlobalCluster + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSwitchoverGlobalClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpSwitchoverGlobalCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpSwitchoverGlobalCluster{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "SwitchoverGlobalCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpSwitchoverGlobalClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSwitchoverGlobalCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSwitchoverGlobalCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "SwitchoverGlobalCluster", + } +} diff --git a/service/docdb/deserializers.go b/service/docdb/deserializers.go index d5578a54cb4..d1d0566b84c 100644 --- a/service/docdb/deserializers.go +++ b/service/docdb/deserializers.go @@ -6045,6 +6045,123 @@ func awsAwsquery_deserializeOpErrorStopDBCluster(response *smithyhttp.Response, } } +type awsAwsquery_deserializeOpSwitchoverGlobalCluster struct { +} + +func (*awsAwsquery_deserializeOpSwitchoverGlobalCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpSwitchoverGlobalCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorSwitchoverGlobalCluster(response, &metadata) + } + output := &SwitchoverGlobalClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("SwitchoverGlobalClusterResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentSwitchoverGlobalClusterOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorSwitchoverGlobalCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("DBClusterNotFoundFault", errorCode): + return awsAwsquery_deserializeErrorDBClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("GlobalClusterNotFoundFault", errorCode): + return awsAwsquery_deserializeErrorGlobalClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidDBClusterStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidDBClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidGlobalClusterStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidGlobalClusterStateFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + func awsAwsquery_deserializeErrorAuthorizationNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.AuthorizationNotFoundFault{} var buff [1024]byte @@ -19763,3 +19880,45 @@ func awsAwsquery_deserializeOpDocumentStopDBClusterOutput(v **StopDBClusterOutpu *v = sv return nil } + +func awsAwsquery_deserializeOpDocumentSwitchoverGlobalClusterOutput(v **SwitchoverGlobalClusterOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *SwitchoverGlobalClusterOutput + if *v == nil { + sv = &SwitchoverGlobalClusterOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("GlobalCluster", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentGlobalCluster(&sv.GlobalCluster, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} diff --git a/service/docdb/generated.json b/service/docdb/generated.json index 5f97e2c4ad3..15dbbd79d69 100644 --- a/service/docdb/generated.json +++ b/service/docdb/generated.json @@ -66,6 +66,7 @@ "api_op_RestoreDBClusterToPointInTime.go", "api_op_StartDBCluster.go", "api_op_StopDBCluster.go", + "api_op_SwitchoverGlobalCluster.go", "auth.go", "deserializers.go", "doc.go", diff --git a/service/docdb/serializers.go b/service/docdb/serializers.go index dbb6539c6ad..c1a3d675b5f 100644 --- a/service/docdb/serializers.go +++ b/service/docdb/serializers.go @@ -3407,6 +3407,70 @@ func (m *awsAwsquery_serializeOpStopDBCluster) HandleSerialize(ctx context.Conte return next.HandleSerialize(ctx, in) } + +type awsAwsquery_serializeOpSwitchoverGlobalCluster struct { +} + +func (*awsAwsquery_serializeOpSwitchoverGlobalCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpSwitchoverGlobalCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SwitchoverGlobalClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("SwitchoverGlobalCluster") + body.Key("Version").String("2014-10-31") + + if err := awsAwsquery_serializeOpDocumentSwitchoverGlobalClusterInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} func awsAwsquery_serializeDocumentAttributeValueList(v []string, value query.Value) error { array := value.Array("AttributeValue") @@ -5378,3 +5442,20 @@ func awsAwsquery_serializeOpDocumentStopDBClusterInput(v *StopDBClusterInput, va return nil } + +func awsAwsquery_serializeOpDocumentSwitchoverGlobalClusterInput(v *SwitchoverGlobalClusterInput, value query.Value) error { + object := value.Object() + _ = object + + if v.GlobalClusterIdentifier != nil { + objectKey := object.Key("GlobalClusterIdentifier") + objectKey.String(*v.GlobalClusterIdentifier) + } + + if v.TargetDbClusterIdentifier != nil { + objectKey := object.Key("TargetDbClusterIdentifier") + objectKey.String(*v.TargetDbClusterIdentifier) + } + + return nil +} diff --git a/service/docdb/snapshot_test.go b/service/docdb/snapshot_test.go index 1926bb0b644..7ee9b104a1f 100644 --- a/service/docdb/snapshot_test.go +++ b/service/docdb/snapshot_test.go @@ -697,6 +697,18 @@ func TestCheckSnapshot_StopDBCluster(t *testing.T) { t.Fatal(err) } } + +func TestCheckSnapshot_SwitchoverGlobalCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.SwitchoverGlobalCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "SwitchoverGlobalCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} func TestUpdateSnapshot_AddSourceIdentifierToSubscription(t *testing.T) { svc := New(Options{}) _, err := svc.AddSourceIdentifierToSubscription(context.Background(), nil, func(o *Options) { @@ -1332,3 +1344,15 @@ func TestUpdateSnapshot_StopDBCluster(t *testing.T) { t.Fatal(err) } } + +func TestUpdateSnapshot_SwitchoverGlobalCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.SwitchoverGlobalCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "SwitchoverGlobalCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} diff --git a/service/docdb/validators.go b/service/docdb/validators.go index 2fc5228ab68..7cf10c3c8d5 100644 --- a/service/docdb/validators.go +++ b/service/docdb/validators.go @@ -1050,6 +1050,26 @@ func (m *validateOpStopDBCluster) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpSwitchoverGlobalCluster struct { +} + +func (*validateOpSwitchoverGlobalCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSwitchoverGlobalCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SwitchoverGlobalClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSwitchoverGlobalClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + func addOpAddSourceIdentifierToSubscriptionValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpAddSourceIdentifierToSubscription{}, middleware.After) } @@ -1258,6 +1278,10 @@ func addOpStopDBClusterValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpStopDBCluster{}, middleware.After) } +func addOpSwitchoverGlobalClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSwitchoverGlobalCluster{}, middleware.After) +} + func validateFilter(v *types.Filter) error { if v == nil { return nil @@ -2194,3 +2218,21 @@ func validateOpStopDBClusterInput(v *StopDBClusterInput) error { return nil } } + +func validateOpSwitchoverGlobalClusterInput(v *SwitchoverGlobalClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SwitchoverGlobalClusterInput"} + if v.GlobalClusterIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("GlobalClusterIdentifier")) + } + if v.TargetDbClusterIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetDbClusterIdentifier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/groundstation/api_op_CreateMissionProfile.go b/service/groundstation/api_op_CreateMissionProfile.go index 0721aab6d6d..85fa165662d 100644 --- a/service/groundstation/api_op_CreateMissionProfile.go +++ b/service/groundstation/api_op_CreateMissionProfile.go @@ -53,12 +53,12 @@ type CreateMissionProfileInput struct { // This member is required. TrackingConfigArn *string - // Amount of time after a contact ends that you’d like to receive a CloudWatch - // event indicating the pass has finished. + // Amount of time after a contact ends that you’d like to receive a Ground Station + // Contact State Change event indicating the pass has finished. ContactPostPassDurationSeconds *int32 - // Amount of time prior to contact start you’d like to receive a CloudWatch event - // indicating an upcoming pass. + // Amount of time prior to contact start you’d like to receive a Ground Station + // Contact State Change event indicating an upcoming pass. ContactPrePassDurationSeconds *int32 // KMS key to use for encrypting streams. diff --git a/service/groundstation/api_op_DescribeContact.go b/service/groundstation/api_op_DescribeContact.go index 0410760a11f..32bdcf8be48 100644 --- a/service/groundstation/api_op_DescribeContact.go +++ b/service/groundstation/api_op_DescribeContact.go @@ -87,6 +87,18 @@ type DescribeContactOutput struct { // Tags assigned to a contact. Tags map[string]string + // Projected time in UTC your satellite will set below the receive mask (https://docs.aws.amazon.com/ground-station/latest/ug/site-masks.html) + // . This time is based on the satellite's current active ephemeris for future + // contacts and the ephemeris that was active during contact execution for + // completed contacts. + VisibilityEndTime *time.Time + + // Projected time in UTC your satellite will rise above the receive mask (https://docs.aws.amazon.com/ground-station/latest/ug/site-masks.html) + // . This time is based on the satellite's current active ephemeris for future + // contacts and the ephemeris that was active during contact execution for + // completed contacts. + VisibilityStartTime *time.Time + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/groundstation/api_op_UpdateMissionProfile.go b/service/groundstation/api_op_UpdateMissionProfile.go index 4e8cf84c24a..d2c11e4096c 100644 --- a/service/groundstation/api_op_UpdateMissionProfile.go +++ b/service/groundstation/api_op_UpdateMissionProfile.go @@ -35,12 +35,12 @@ type UpdateMissionProfileInput struct { // This member is required. MissionProfileId *string - // Amount of time after a contact ends that you’d like to receive a CloudWatch - // event indicating the pass has finished. + // Amount of time after a contact ends that you’d like to receive a Ground Station + // Contact State Change event indicating the pass has finished. ContactPostPassDurationSeconds *int32 - // Amount of time after a contact ends that you’d like to receive a CloudWatch - // event indicating the pass has finished. + // Amount of time after a contact ends that you’d like to receive a Ground Station + // Contact State Change event indicating the pass has finished. ContactPrePassDurationSeconds *int32 // A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a diff --git a/service/groundstation/deserializers.go b/service/groundstation/deserializers.go index b0aaa17b1e2..3a26c80d41d 100644 --- a/service/groundstation/deserializers.go +++ b/service/groundstation/deserializers.go @@ -1753,6 +1753,38 @@ func awsRestjson1_deserializeOpDocumentDescribeContactOutput(v **DescribeContact return err } + case "visibilityEndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VisibilityEndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "visibilityStartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VisibilityStartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + default: _, _ = key, value @@ -6533,6 +6565,38 @@ func awsRestjson1_deserializeDocumentContactData(v **types.ContactData, value in return err } + case "visibilityEndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VisibilityEndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "visibilityStartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VisibilityStartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + default: _, _ = key, value diff --git a/service/groundstation/types/types.go b/service/groundstation/types/types.go index 41c1bcebcfc..ce4b70752a9 100644 --- a/service/groundstation/types/types.go +++ b/service/groundstation/types/types.go @@ -394,6 +394,20 @@ type ContactData struct { // Tags assigned to a contact. Tags map[string]string + // Projected time in UTC your satellite will set below the receive mask (https://docs.aws.amazon.com/ground-station/latest/ug/site-masks.html) + // . This time is based on the satellite's current active ephemeris for future + // contacts and the ephemeris that was active during contact execution for + // completed contacts. This field is not present for contacts with a SCHEDULING or + // SCHEDULED status. + VisibilityEndTime *time.Time + + // Projected time in UTC your satellite will rise above the receive mask (https://docs.aws.amazon.com/ground-station/latest/ug/site-masks.html) + // . This time is based on the satellite's current active ephemeris for future + // contacts and the ephemeris that was active during contact execution for + // completed contacts. This field is not present for contacts with a SCHEDULING or + // SCHEDULED status. + VisibilityStartTime *time.Time + noSmithyDocumentSerde } diff --git a/service/lambda/api_op_CreateFunction.go b/service/lambda/api_op_CreateFunction.go index 75efcac4e63..45176080c3d 100644 --- a/service/lambda/api_op_CreateFunction.go +++ b/service/lambda/api_op_CreateFunction.go @@ -132,7 +132,7 @@ type CreateFunctionInput struct { // . Handler *string - // Container image configuration values (https://docs.aws.amazon.com/lambda/latest/dg/configuration-images.html#configuration-images-settings) + // Container image configuration values (https://docs.aws.amazon.com/lambda/latest/dg/images-create.html#images-parms) // that override the values in the container image Dockerfile. ImageConfig *types.ImageConfig diff --git a/service/lambda/api_op_UpdateFunctionConfiguration.go b/service/lambda/api_op_UpdateFunctionConfiguration.go index 27aa55f6561..2efa1fe6949 100644 --- a/service/lambda/api_op_UpdateFunctionConfiguration.go +++ b/service/lambda/api_op_UpdateFunctionConfiguration.go @@ -79,7 +79,7 @@ type UpdateFunctionConfigurationInput struct { // . Handler *string - // Container image configuration values (https://docs.aws.amazon.com/lambda/latest/dg/images-parms.html) + // Container image configuration values (https://docs.aws.amazon.com/lambda/latest/dg/images-create.html#images-parms) // that override the values in the container image Docker file. ImageConfig *types.ImageConfig diff --git a/service/lambda/types/enums.go b/service/lambda/types/enums.go index bca4edbf294..1a240f6f5a0 100644 --- a/service/lambda/types/enums.go +++ b/service/lambda/types/enums.go @@ -412,6 +412,7 @@ const ( RuntimePython310 Runtime = "python3.10" RuntimeJava17 Runtime = "java17" RuntimeRuby32 Runtime = "ruby3.2" + RuntimeRuby33 Runtime = "ruby3.3" RuntimePython311 Runtime = "python3.11" RuntimeNodejs20x Runtime = "nodejs20.x" RuntimeProvidedal2023 Runtime = "provided.al2023" @@ -456,6 +457,7 @@ func (Runtime) Values() []Runtime { "python3.10", "java17", "ruby3.2", + "ruby3.3", "python3.11", "nodejs20.x", "provided.al2023", diff --git a/service/medialive/deserializers.go b/service/medialive/deserializers.go index 77012107101..ecac04486cf 100644 --- a/service/medialive/deserializers.go +++ b/service/medialive/deserializers.go @@ -12753,6 +12753,78 @@ func awsRestjson1_deserializeDocument__listOfColorCorrection(v *[]types.ColorCor return nil } +func awsRestjson1_deserializeDocument__listOfDashRoleAudio(v *[]types.DashRoleAudio, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DashRoleAudio + if *v == nil { + cv = []types.DashRoleAudio{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DashRoleAudio + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DashRoleAudio to be of type string, got %T instead", value) + } + col = types.DashRoleAudio(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocument__listOfDashRoleCaption(v *[]types.DashRoleCaption, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DashRoleCaption + if *v == nil { + cv = []types.DashRoleCaption{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DashRoleCaption + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DashRoleCaption to be of type string, got %T instead", value) + } + col = types.DashRoleCaption(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocument__listOfFailoverCondition(v *[]types.FailoverCondition, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14715,6 +14787,11 @@ func awsRestjson1_deserializeDocumentAudioDescription(v **types.AudioDescription for key, value := range shape { switch key { + case "audioDashRoles": + if err := awsRestjson1_deserializeDocument__listOfDashRoleAudio(&sv.AudioDashRoles, value); err != nil { + return err + } + case "audioNormalizationSettings": if err := awsRestjson1_deserializeDocumentAudioNormalizationSettings(&sv.AudioNormalizationSettings, value); err != nil { return err @@ -14757,6 +14834,15 @@ func awsRestjson1_deserializeDocumentAudioDescription(v **types.AudioDescription return err } + case "dvbDashAccessibility": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DvbDashAccessibility to be of type string, got %T instead", value) + } + sv.DvbDashAccessibility = types.DvbDashAccessibility(jtv) + } + case "languageCode": if value != nil { jtv, ok := value.(string) @@ -16191,6 +16277,11 @@ func awsRestjson1_deserializeDocumentCaptionDescription(v **types.CaptionDescrip sv.Accessibility = types.AccessibilityType(jtv) } + case "captionDashRoles": + if err := awsRestjson1_deserializeDocument__listOfDashRoleCaption(&sv.CaptionDashRoles, value); err != nil { + return err + } + case "captionSelectorName": if value != nil { jtv, ok := value.(string) @@ -16205,6 +16296,15 @@ func awsRestjson1_deserializeDocumentCaptionDescription(v **types.CaptionDescrip return err } + case "dvbDashAccessibility": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DvbDashAccessibility to be of type string, got %T instead", value) + } + sv.DvbDashAccessibility = types.DvbDashAccessibility(jtv) + } + case "languageCode": if value != nil { jtv, ok := value.(string) @@ -17070,6 +17170,135 @@ func awsRestjson1_deserializeDocumentChannelSummary(v **types.ChannelSummary, va return nil } +func awsRestjson1_deserializeDocumentCmafIngestGroupSettings(v **types.CmafIngestGroupSettings, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CmafIngestGroupSettings + if *v == nil { + sv = &types.CmafIngestGroupSettings{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "destination": + if err := awsRestjson1_deserializeDocumentOutputLocationRef(&sv.Destination, value); err != nil { + return err + } + + case "nielsenId3Behavior": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CmafNielsenId3Behavior to be of type string, got %T instead", value) + } + sv.NielsenId3Behavior = types.CmafNielsenId3Behavior(jtv) + } + + case "scte35Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Scte35Type to be of type string, got %T instead", value) + } + sv.Scte35Type = types.Scte35Type(jtv) + } + + case "segmentLength": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integerMin1 to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SegmentLength = ptr.Int32(int32(i64)) + } + + case "segmentLengthUnits": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CmafIngestSegmentLengthUnits to be of type string, got %T instead", value) + } + sv.SegmentLengthUnits = types.CmafIngestSegmentLengthUnits(jtv) + } + + case "sendDelayMs": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integerMin0Max2000 to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SendDelayMs = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCmafIngestOutputSettings(v **types.CmafIngestOutputSettings, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CmafIngestOutputSettings + if *v == nil { + sv = &types.CmafIngestOutputSettings{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nameModifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.NameModifier = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentColorCorrection(v **types.ColorCorrection, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -27374,6 +27603,11 @@ func awsRestjson1_deserializeDocumentOutputGroupSettings(v **types.OutputGroupSe return err } + case "cmafIngestGroupSettings": + if err := awsRestjson1_deserializeDocumentCmafIngestGroupSettings(&sv.CmafIngestGroupSettings, value); err != nil { + return err + } + case "frameCaptureGroupSettings": if err := awsRestjson1_deserializeDocumentFrameCaptureGroupSettings(&sv.FrameCaptureGroupSettings, value); err != nil { return err @@ -27526,6 +27760,11 @@ func awsRestjson1_deserializeDocumentOutputSettings(v **types.OutputSettings, va return err } + case "cmafIngestOutputSettings": + if err := awsRestjson1_deserializeDocumentCmafIngestOutputSettings(&sv.CmafIngestOutputSettings, value); err != nil { + return err + } + case "frameCaptureOutputSettings": if err := awsRestjson1_deserializeDocumentFrameCaptureOutputSettings(&sv.FrameCaptureOutputSettings, value); err != nil { return err diff --git a/service/medialive/serializers.go b/service/medialive/serializers.go index 6546d8630df..a0e645ce901 100644 --- a/service/medialive/serializers.go +++ b/service/medialive/serializers.go @@ -5378,6 +5378,28 @@ func awsRestjson1_serializeDocument__listOfColorCorrection(v []types.ColorCorrec return nil } +func awsRestjson1_serializeDocument__listOfDashRoleAudio(v []types.DashRoleAudio, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsRestjson1_serializeDocument__listOfDashRoleCaption(v []types.DashRoleCaption, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + func awsRestjson1_serializeDocument__listOfFailoverCondition(v []types.FailoverCondition, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -5999,6 +6021,13 @@ func awsRestjson1_serializeDocumentAudioDescription(v *types.AudioDescription, v object := value.Object() defer object.Close() + if v.AudioDashRoles != nil { + ok := object.Key("audioDashRoles") + if err := awsRestjson1_serializeDocument__listOfDashRoleAudio(v.AudioDashRoles, ok); err != nil { + return err + } + } + if v.AudioNormalizationSettings != nil { ok := object.Key("audioNormalizationSettings") if err := awsRestjson1_serializeDocumentAudioNormalizationSettings(v.AudioNormalizationSettings, ok); err != nil { @@ -6035,6 +6064,11 @@ func awsRestjson1_serializeDocumentAudioDescription(v *types.AudioDescription, v } } + if len(v.DvbDashAccessibility) > 0 { + ok := object.Key("dvbDashAccessibility") + ok.String(string(v.DvbDashAccessibility)) + } + if v.LanguageCode != nil { ok := object.Key("languageCode") ok.String(*v.LanguageCode) @@ -6562,6 +6596,13 @@ func awsRestjson1_serializeDocumentCaptionDescription(v *types.CaptionDescriptio ok.String(string(v.Accessibility)) } + if v.CaptionDashRoles != nil { + ok := object.Key("captionDashRoles") + if err := awsRestjson1_serializeDocument__listOfDashRoleCaption(v.CaptionDashRoles, ok); err != nil { + return err + } + } + if v.CaptionSelectorName != nil { ok := object.Key("captionSelectorName") ok.String(*v.CaptionSelectorName) @@ -6574,6 +6615,11 @@ func awsRestjson1_serializeDocumentCaptionDescription(v *types.CaptionDescriptio } } + if len(v.DvbDashAccessibility) > 0 { + ok := object.Key("dvbDashAccessibility") + ok.String(string(v.DvbDashAccessibility)) + } + if v.LanguageCode != nil { ok := object.Key("languageCode") ok.String(*v.LanguageCode) @@ -6883,6 +6929,57 @@ func awsRestjson1_serializeDocumentCdiInputSpecification(v *types.CdiInputSpecif return nil } +func awsRestjson1_serializeDocumentCmafIngestGroupSettings(v *types.CmafIngestGroupSettings, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Destination != nil { + ok := object.Key("destination") + if err := awsRestjson1_serializeDocumentOutputLocationRef(v.Destination, ok); err != nil { + return err + } + } + + if len(v.NielsenId3Behavior) > 0 { + ok := object.Key("nielsenId3Behavior") + ok.String(string(v.NielsenId3Behavior)) + } + + if len(v.Scte35Type) > 0 { + ok := object.Key("scte35Type") + ok.String(string(v.Scte35Type)) + } + + if v.SegmentLength != nil { + ok := object.Key("segmentLength") + ok.Integer(*v.SegmentLength) + } + + if len(v.SegmentLengthUnits) > 0 { + ok := object.Key("segmentLengthUnits") + ok.String(string(v.SegmentLengthUnits)) + } + + if v.SendDelayMs != nil { + ok := object.Key("sendDelayMs") + ok.Integer(*v.SendDelayMs) + } + + return nil +} + +func awsRestjson1_serializeDocumentCmafIngestOutputSettings(v *types.CmafIngestOutputSettings, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NameModifier != nil { + ok := object.Key("nameModifier") + ok.String(*v.NameModifier) + } + + return nil +} + func awsRestjson1_serializeDocumentColorCorrection(v *types.ColorCorrection, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -10702,6 +10799,13 @@ func awsRestjson1_serializeDocumentOutputGroupSettings(v *types.OutputGroupSetti } } + if v.CmafIngestGroupSettings != nil { + ok := object.Key("cmafIngestGroupSettings") + if err := awsRestjson1_serializeDocumentCmafIngestGroupSettings(v.CmafIngestGroupSettings, ok); err != nil { + return err + } + } + if v.FrameCaptureGroupSettings != nil { ok := object.Key("frameCaptureGroupSettings") if err := awsRestjson1_serializeDocumentFrameCaptureGroupSettings(v.FrameCaptureGroupSettings, ok); err != nil { @@ -10798,6 +10902,13 @@ func awsRestjson1_serializeDocumentOutputSettings(v *types.OutputSettings, value } } + if v.CmafIngestOutputSettings != nil { + ok := object.Key("cmafIngestOutputSettings") + if err := awsRestjson1_serializeDocumentCmafIngestOutputSettings(v.CmafIngestOutputSettings, ok); err != nil { + return err + } + } + if v.FrameCaptureOutputSettings != nil { ok := object.Key("frameCaptureOutputSettings") if err := awsRestjson1_serializeDocumentFrameCaptureOutputSettings(v.FrameCaptureOutputSettings, ok); err != nil { diff --git a/service/medialive/types/enums.go b/service/medialive/types/enums.go index d9db3e6d165..783af06de3d 100644 --- a/service/medialive/types/enums.go +++ b/service/medialive/types/enums.go @@ -769,6 +769,43 @@ func (ChannelState) Values() []ChannelState { } } +type CmafIngestSegmentLengthUnits string + +// Enum values for CmafIngestSegmentLengthUnits +const ( + CmafIngestSegmentLengthUnitsMilliseconds CmafIngestSegmentLengthUnits = "MILLISECONDS" + CmafIngestSegmentLengthUnitsSeconds CmafIngestSegmentLengthUnits = "SECONDS" +) + +// Values returns all known values for CmafIngestSegmentLengthUnits. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (CmafIngestSegmentLengthUnits) Values() []CmafIngestSegmentLengthUnits { + return []CmafIngestSegmentLengthUnits{ + "MILLISECONDS", + "SECONDS", + } +} + +type CmafNielsenId3Behavior string + +// Enum values for CmafNielsenId3Behavior +const ( + CmafNielsenId3BehaviorNoPassthrough CmafNielsenId3Behavior = "NO_PASSTHROUGH" + CmafNielsenId3BehaviorPassthrough CmafNielsenId3Behavior = "PASSTHROUGH" +) + +// Values returns all known values for CmafNielsenId3Behavior. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CmafNielsenId3Behavior) Values() []CmafNielsenId3Behavior { + return []CmafNielsenId3Behavior{ + "NO_PASSTHROUGH", + "PASSTHROUGH", + } +} + type ColorSpace string // Enum values for ColorSpace @@ -807,6 +844,78 @@ func (ContentType) Values() []ContentType { } } +type DashRoleAudio string + +// Enum values for DashRoleAudio +const ( + DashRoleAudioAlternate DashRoleAudio = "ALTERNATE" + DashRoleAudioCommentary DashRoleAudio = "COMMENTARY" + DashRoleAudioDescription DashRoleAudio = "DESCRIPTION" + DashRoleAudioDub DashRoleAudio = "DUB" + DashRoleAudioEmergency DashRoleAudio = "EMERGENCY" + DashRoleAudioEnhancedAudioIntelligibility DashRoleAudio = "ENHANCED-AUDIO-INTELLIGIBILITY" + DashRoleAudioKaraoke DashRoleAudio = "KARAOKE" + DashRoleAudioMain DashRoleAudio = "MAIN" + DashRoleAudioSupplementary DashRoleAudio = "SUPPLEMENTARY" +) + +// Values returns all known values for DashRoleAudio. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DashRoleAudio) Values() []DashRoleAudio { + return []DashRoleAudio{ + "ALTERNATE", + "COMMENTARY", + "DESCRIPTION", + "DUB", + "EMERGENCY", + "ENHANCED-AUDIO-INTELLIGIBILITY", + "KARAOKE", + "MAIN", + "SUPPLEMENTARY", + } +} + +type DashRoleCaption string + +// Enum values for DashRoleCaption +const ( + DashRoleCaptionAlternate DashRoleCaption = "ALTERNATE" + DashRoleCaptionCaption DashRoleCaption = "CAPTION" + DashRoleCaptionCommentary DashRoleCaption = "COMMENTARY" + DashRoleCaptionDescription DashRoleCaption = "DESCRIPTION" + DashRoleCaptionDub DashRoleCaption = "DUB" + DashRoleCaptionEasyreader DashRoleCaption = "EASYREADER" + DashRoleCaptionEmergency DashRoleCaption = "EMERGENCY" + DashRoleCaptionForcedSubtitle DashRoleCaption = "FORCED-SUBTITLE" + DashRoleCaptionKaraoke DashRoleCaption = "KARAOKE" + DashRoleCaptionMain DashRoleCaption = "MAIN" + DashRoleCaptionMetadata DashRoleCaption = "METADATA" + DashRoleCaptionSubtitle DashRoleCaption = "SUBTITLE" + DashRoleCaptionSupplementary DashRoleCaption = "SUPPLEMENTARY" +) + +// Values returns all known values for DashRoleCaption. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DashRoleCaption) Values() []DashRoleCaption { + return []DashRoleCaption{ + "ALTERNATE", + "CAPTION", + "COMMENTARY", + "DESCRIPTION", + "DUB", + "EASYREADER", + "EMERGENCY", + "FORCED-SUBTITLE", + "KARAOKE", + "MAIN", + "METADATA", + "SUBTITLE", + "SUPPLEMENTARY", + } +} + type DeviceSettingsSyncState string // Enum values for DeviceSettingsSyncState @@ -877,6 +986,34 @@ func (DolbyEProgramSelection) Values() []DolbyEProgramSelection { } } +type DvbDashAccessibility string + +// Enum values for DvbDashAccessibility +const ( + DvbDashAccessibilityDvbdash1VisuallyImpaired DvbDashAccessibility = "DVBDASH_1_VISUALLY_IMPAIRED" + DvbDashAccessibilityDvbdash2HardOfHearing DvbDashAccessibility = "DVBDASH_2_HARD_OF_HEARING" + DvbDashAccessibilityDvbdash3SupplementalCommentary DvbDashAccessibility = "DVBDASH_3_SUPPLEMENTAL_COMMENTARY" + DvbDashAccessibilityDvbdash4DirectorsCommentary DvbDashAccessibility = "DVBDASH_4_DIRECTORS_COMMENTARY" + DvbDashAccessibilityDvbdash5EducationalNotes DvbDashAccessibility = "DVBDASH_5_EDUCATIONAL_NOTES" + DvbDashAccessibilityDvbdash6MainProgram DvbDashAccessibility = "DVBDASH_6_MAIN_PROGRAM" + DvbDashAccessibilityDvbdash7CleanFeed DvbDashAccessibility = "DVBDASH_7_CLEAN_FEED" +) + +// Values returns all known values for DvbDashAccessibility. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DvbDashAccessibility) Values() []DvbDashAccessibility { + return []DvbDashAccessibility{ + "DVBDASH_1_VISUALLY_IMPAIRED", + "DVBDASH_2_HARD_OF_HEARING", + "DVBDASH_3_SUPPLEMENTAL_COMMENTARY", + "DVBDASH_4_DIRECTORS_COMMENTARY", + "DVBDASH_5_EDUCATIONAL_NOTES", + "DVBDASH_6_MAIN_PROGRAM", + "DVBDASH_7_CLEAN_FEED", + } +} + type DvbSdtOutputSdt string // Enum values for DvbSdtOutputSdt @@ -5108,6 +5245,24 @@ func (Scte35SpliceInsertWebDeliveryAllowedBehavior) Values() []Scte35SpliceInser } } +type Scte35Type string + +// Enum values for Scte35Type +const ( + Scte35TypeNone Scte35Type = "NONE" + Scte35TypeScte35WithoutSegmentation Scte35Type = "SCTE_35_WITHOUT_SEGMENTATION" +) + +// Values returns all known values for Scte35Type. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (Scte35Type) Values() []Scte35Type { + return []Scte35Type{ + "NONE", + "SCTE_35_WITHOUT_SEGMENTATION", + } +} + type Scte35WebDeliveryAllowedFlag string // Enum values for Scte35WebDeliveryAllowedFlag diff --git a/service/medialive/types/types.go b/service/medialive/types/types.go index c915e2464ed..f7586a5f88e 100644 --- a/service/medialive/types/types.go +++ b/service/medialive/types/types.go @@ -248,6 +248,10 @@ type AudioDescription struct { // This member is required. Name *string + // Identifies the DASH roles to assign to this audio output. Applies only when the + // audio output is configured for DVB DASH accessibility signaling. + AudioDashRoles []DashRoleAudio + // Advanced audio normalization settings. AudioNormalizationSettings *AudioNormalizationSettings @@ -270,6 +274,11 @@ type AudioDescription struct { // Audio codec settings. CodecSettings *AudioCodecSettings + // Identifies DVB DASH accessibility signaling in this audio output. Used in + // Microsoft Smooth Streaming outputs to signal accessibility information to + // packagers. + DvbDashAccessibility DvbDashAccessibility + // RFC 5646 language code representing the language of the audio output track. // Only used if languageControlMode is useConfigured, or there is no ISO 639 // language code specified in the input. @@ -791,10 +800,19 @@ type CaptionDescription struct { // added to HLS output group and MediaPackage output group. Accessibility AccessibilityType + // Identifies the DASH roles to assign to this captions output. Applies only when + // the captions output is configured for DVB DASH accessibility signaling. + CaptionDashRoles []DashRoleCaption + // Additional settings for captions destination that depend on the destination // type. DestinationSettings *CaptionDestinationSettings + // Identifies DVB DASH accessibility signaling in this captions output. Used in + // Microsoft Smooth Streaming outputs to signal accessibility information to + // packagers. + DvbDashAccessibility DvbDashAccessibility + // ISO 639-2 three-digit code: http://www.loc.gov/standards/iso639-2/ LanguageCode *string @@ -1110,6 +1128,47 @@ type ChannelSummary struct { noSmithyDocumentSerde } +// Cmaf Ingest Group Settings +type CmafIngestGroupSettings struct { + + // A HTTP destination for the tracks + // + // This member is required. + Destination *OutputLocationRef + + // If set to passthrough, Nielsen inaudible tones for media tracking will be + // detected in the input audio and an equivalent ID3 tag will be inserted in the + // output. + NielsenId3Behavior CmafNielsenId3Behavior + + // Type of scte35 track to add. none or scte35WithoutSegmentation + Scte35Type Scte35Type + + // The nominal duration of segments. The units are specified in + // SegmentLengthUnits. The segments will end on the next keyframe after the + // specified duration, so the actual segment length might be longer, and it might + // be a fraction of the units. + SegmentLength *int32 + + // Time unit for segment length parameter. + SegmentLengthUnits CmafIngestSegmentLengthUnits + + // Number of milliseconds to delay the output from the second pipeline. + SendDelayMs *int32 + + noSmithyDocumentSerde +} + +// Cmaf Ingest Output Settings +type CmafIngestOutputSettings struct { + + // String concatenated to the end of the destination filename. Required for + // multiple outputs of the same type. + NameModifier *string + + noSmithyDocumentSerde +} + // Property of ColorCorrectionSettings. Used for custom color space conversion. // The object identifies one 3D LUT file and specifies the input/output color space // combination that the file will be used for. @@ -4784,6 +4843,9 @@ type OutputGroupSettings struct { // Archive Group Settings ArchiveGroupSettings *ArchiveGroupSettings + // Cmaf Ingest Group Settings + CmafIngestGroupSettings *CmafIngestGroupSettings + // Frame Capture Group Settings FrameCaptureGroupSettings *FrameCaptureGroupSettings @@ -4835,6 +4897,9 @@ type OutputSettings struct { // Archive Output Settings ArchiveOutputSettings *ArchiveOutputSettings + // Cmaf Ingest Output Settings + CmafIngestOutputSettings *CmafIngestOutputSettings + // Frame Capture Output Settings FrameCaptureOutputSettings *FrameCaptureOutputSettings diff --git a/service/medialive/validators.go b/service/medialive/validators.go index a03a3d5f927..9371b90d0e6 100644 --- a/service/medialive/validators.go +++ b/service/medialive/validators.go @@ -2098,6 +2098,21 @@ func validateCaptionSelectorSettings(v *types.CaptionSelectorSettings) error { } } +func validateCmafIngestGroupSettings(v *types.CmafIngestGroupSettings) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CmafIngestGroupSettings"} + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateColorCorrection(v *types.ColorCorrection) error { if v == nil { return nil @@ -2978,6 +2993,11 @@ func validateOutputGroupSettings(v *types.OutputGroupSettings) error { invalidParams.AddNested("MsSmoothGroupSettings", err.(smithy.InvalidParamsError)) } } + if v.CmafIngestGroupSettings != nil { + if err := validateCmafIngestGroupSettings(v.CmafIngestGroupSettings); err != nil { + invalidParams.AddNested("CmafIngestGroupSettings", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/medicalimaging/api_op_GetDICOMImportJob.go b/service/medicalimaging/api_op_GetDICOMImportJob.go index 3205c0e79bc..625db9cab75 100644 --- a/service/medicalimaging/api_op_GetDICOMImportJob.go +++ b/service/medicalimaging/api_op_GetDICOMImportJob.go @@ -11,7 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Get the import job properties to learn more about the job or job progress. +// Get the import job properties to learn more about the job or job progress. The +// jobStatus refers to the execution of the import job. Therefore, an import job +// can return a jobStatus as COMPLETED even if validation issues are discovered +// during the import process. If a jobStatus returns as COMPLETED , we still +// recommend you review the output manifests written to S3, as they provide details +// on the success or failure of individual P10 object imports. func (c *Client) GetDICOMImportJob(ctx context.Context, params *GetDICOMImportJobInput, optFns ...func(*Options)) (*GetDICOMImportJobOutput, error) { if params == nil { params = &GetDICOMImportJobInput{} diff --git a/service/medicalimaging/api_op_SearchImageSets.go b/service/medicalimaging/api_op_SearchImageSets.go index 69bcc18abdf..857a511031f 100644 --- a/service/medicalimaging/api_op_SearchImageSets.go +++ b/service/medicalimaging/api_op_SearchImageSets.go @@ -63,6 +63,9 @@ type SearchImageSetsOutput struct { // The token for pagination results. NextToken *string + // The sort order for image set search results. + Sort *types.Sort + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/medicalimaging/deserializers.go b/service/medicalimaging/deserializers.go index 16e20657a6b..83fa513fe62 100644 --- a/service/medicalimaging/deserializers.go +++ b/service/medicalimaging/deserializers.go @@ -2421,6 +2421,11 @@ func awsRestjson1_deserializeOpDocumentSearchImageSetsOutput(v **SearchImageSets sv.NextToken = ptr.String(jtv) } + case "sort": + if err := awsRestjson1_deserializeDocumentSort(&sv.Sort, value); err != nil { + return err + } + default: _, _ = key, value @@ -4237,6 +4242,46 @@ func awsRestjson1_deserializeDocumentDICOMTags(v **types.DICOMTags, value interf sv.DICOMPatientSex = ptr.String(jtv) } + case "DICOMSeriesBodyPart": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DICOMSeriesBodyPart to be of type string, got %T instead", value) + } + sv.DICOMSeriesBodyPart = ptr.String(jtv) + } + + case "DICOMSeriesInstanceUID": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DICOMSeriesInstanceUID to be of type string, got %T instead", value) + } + sv.DICOMSeriesInstanceUID = ptr.String(jtv) + } + + case "DICOMSeriesModality": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DICOMSeriesModality to be of type string, got %T instead", value) + } + sv.DICOMSeriesModality = ptr.String(jtv) + } + + case "DICOMSeriesNumber": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected DICOMSeriesNumber to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DICOMSeriesNumber = ptr.Int32(int32(i64)) + } + case "DICOMStudyDate": if value != nil { jtv, ok := value.(string) @@ -4693,6 +4738,55 @@ func awsRestjson1_deserializeDocumentServiceQuotaExceededException(v **types.Ser return nil } +func awsRestjson1_deserializeDocumentSort(v **types.Sort, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Sort + if *v == nil { + sv = &types.Sort{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "sortField": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SortField to be of type string, got %T instead", value) + } + sv.SortField = types.SortField(jtv) + } + + case "sortOrder": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SortOrder to be of type string, got %T instead", value) + } + sv.SortOrder = types.SortOrder(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentTagMap(v *map[string]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/medicalimaging/doc.go b/service/medicalimaging/doc.go index 0cab91bcbf3..4eb933f57bc 100644 --- a/service/medicalimaging/doc.go +++ b/service/medicalimaging/doc.go @@ -3,22 +3,19 @@ // Package medicalimaging provides the API client, operations, and parameter types // for AWS Health Imaging. // -// This is the AWS HealthImaging API Reference. AWS HealthImaging is a -// HIPAA-eligible service that helps health care providers and their medical -// imaging ISV partners store, transform, and apply machine learning to medical -// images. For an introduction to the service, see the AWS HealthImaging Developer -// Guide (https://docs.aws.amazon.com/healthimaging/latest/devguide/what-is.html) +// This is the AWS HealthImaging API Reference. AWS HealthImaging is a HIPAA +// eligible service that empowers healthcare providers, life science organizations, +// and their software partners to store, analyze, and share medical images in the +// cloud at petabyte scale. For an introduction to the service, see the AWS +// HealthImaging Developer Guide (https://docs.aws.amazon.com/healthimaging/latest/devguide/what-is.html) // . We recommend using one of the AWS Software Development Kits (SDKs) for your // programming language, as they take care of request authentication, // serialization, and connection management. For more information, see Tools to -// build on AWS (http://aws.amazon.com/developer/tools) . For information about -// using HealthImaging API actions in one of the language-specific AWS SDKs, refer -// to the See Also link at the end of each section that describes an API action or -// data type. The following sections list AWS HealthImaging API actions categorized -// according to functionality. Links are provided to actions within this Reference, -// along with links back to corresponding sections in the AWS HealthImaging -// Developer Guide where you can view console procedures and CLI/SDK code examples. -// Data store actions +// build on AWS (http://aws.amazon.com/developer/tools) . The following sections +// list AWS HealthImaging API actions categorized according to functionality. Links +// are provided to actions within this Reference, along with links back to +// corresponding sections in the AWS HealthImaging Developer Guide where you can +// view tested code examples. Data store actions // - CreateDatastore (https://docs.aws.amazon.com/healthimaging/latest/APIReference/API_CreateDatastore.html) // – See Creating a data store (https://docs.aws.amazon.com/healthimaging/latest/devguide/create-data-store.html) // . @@ -73,15 +70,12 @@ // // Tagging actions // - TagResource (https://docs.aws.amazon.com/healthimaging/latest/APIReference/API_TagResource.html) -// – See Tagging a data store (https://docs.aws.amazon.com/healthimaging/latest/devguide/tag-list-untag-data-store.html) -// and Tagging an image set (https://docs.aws.amazon.com/healthimaging/latest/devguide/tag-list-untag-image-set.html) +// – See Tagging a resource (https://docs.aws.amazon.com/healthimaging/latest/devguide/tag-resource.html) // . // - ListTagsForResource (https://docs.aws.amazon.com/healthimaging/latest/APIReference/API_ListTagsForResource.html) -// – See Tagging a data store (https://docs.aws.amazon.com/healthimaging/latest/devguide/tag-list-untag-data-store.html) -// and Tagging an image set (https://docs.aws.amazon.com/healthimaging/latest/devguide/tag-list-untag-image-set.html) +// – See Listing tags for a resource (https://docs.aws.amazon.com/healthimaging/latest/devguide/list-tag-resource.html) // . // - UntagResource (https://docs.aws.amazon.com/healthimaging/latest/APIReference/API_UntagResource.html) -// – See Tagging a data store (https://docs.aws.amazon.com/healthimaging/latest/devguide/tag-list-untag-data-store.html) -// and Tagging an image set (https://docs.aws.amazon.com/healthimaging/latest/devguide/tag-list-untag-image-set.html) +// – See Untagging a resource (https://docs.aws.amazon.com/healthimaging/latest/devguide/untag-resource.html) // . package medicalimaging diff --git a/service/medicalimaging/serializers.go b/service/medicalimaging/serializers.go index fe4fe8d6515..207ffadd84d 100644 --- a/service/medicalimaging/serializers.go +++ b/service/medicalimaging/serializers.go @@ -1620,6 +1620,10 @@ func awsRestjson1_serializeDocumentSearchByAttributeValue(v types.SearchByAttrib av := object.Key("DICOMPatientId") av.String(uv.Value) + case *types.SearchByAttributeValueMemberDICOMSeriesInstanceUID: + av := object.Key("DICOMSeriesInstanceUID") + av.String(uv.Value) + case *types.SearchByAttributeValueMemberDICOMStudyDateAndTime: av := object.Key("DICOMStudyDateAndTime") if err := awsRestjson1_serializeDocumentDICOMStudyDateAndTime(&uv.Value, av); err != nil { @@ -1634,6 +1638,10 @@ func awsRestjson1_serializeDocumentSearchByAttributeValue(v types.SearchByAttrib av := object.Key("DICOMStudyInstanceUID") av.String(uv.Value) + case *types.SearchByAttributeValueMemberUpdatedAt: + av := object.Key("updatedAt") + av.Double(smithytime.FormatEpochSeconds(uv.Value)) + default: return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) @@ -1668,6 +1676,13 @@ func awsRestjson1_serializeDocumentSearchCriteria(v *types.SearchCriteria, value } } + if v.Sort != nil { + ok := object.Key("sort") + if err := awsRestjson1_serializeDocumentSort(v.Sort, ok); err != nil { + return err + } + } + return nil } @@ -1703,6 +1718,23 @@ func awsRestjson1_serializeDocumentSearchFilters(v []types.SearchFilter, value s return nil } +func awsRestjson1_serializeDocumentSort(v *types.Sort, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.SortField) > 0 { + ok := object.Key("sortField") + ok.String(string(v.SortField)) + } + + if len(v.SortOrder) > 0 { + ok := object.Key("sortOrder") + ok.String(string(v.SortOrder)) + } + + return nil +} + func awsRestjson1_serializeDocumentTagMap(v map[string]string, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/medicalimaging/types/enums.go b/service/medicalimaging/types/enums.go index 28768754b56..a533a442d64 100644 --- a/service/medicalimaging/types/enums.go +++ b/service/medicalimaging/types/enums.go @@ -119,3 +119,41 @@ func (Operator) Values() []Operator { "BETWEEN", } } + +type SortField string + +// Enum values for SortField +const ( + SortFieldUpdatedAt SortField = "updatedAt" + SortFieldCreatedAt SortField = "createdAt" + SortFieldDICOMStudyDateAndTime SortField = "DICOMStudyDateAndTime" +) + +// Values returns all known values for SortField. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (SortField) Values() []SortField { + return []SortField{ + "updatedAt", + "createdAt", + "DICOMStudyDateAndTime", + } +} + +type SortOrder string + +// Enum values for SortOrder +const ( + SortOrderAsc SortOrder = "ASC" + SortOrderDesc SortOrder = "DESC" +) + +// Values returns all known values for SortOrder. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (SortOrder) Values() []SortOrder { + return []SortOrder{ + "ASC", + "DESC", + } +} diff --git a/service/medicalimaging/types/types.go b/service/medicalimaging/types/types.go index 835a5f9f986..839ab93b38f 100644 --- a/service/medicalimaging/types/types.go +++ b/service/medicalimaging/types/types.go @@ -306,16 +306,28 @@ type DICOMTags struct { // The patient sex. DICOMPatientSex *string + // The DICOM provided identifier for the series Body Part Examined. + DICOMSeriesBodyPart *string + + // The DICOM provided identifier for the Series Instance UID. + DICOMSeriesInstanceUID *string + + // The DICOM provided identifier for the series Modality. + DICOMSeriesModality *string + + // The DICOM provided identifier for the Series Number. + DICOMSeriesNumber *int32 + // The study date. DICOMStudyDate *string - // The description of the study. + // The DICOM provided Study Description. DICOMStudyDescription *string - // The DICOM provided studyId. + // The DICOM provided identifier for the Study ID. DICOMStudyId *string - // The DICOM provided identifier for studyInstanceUid.> + // The DICOM provided identifier for the Study Instance UID. DICOMStudyInstanceUID *string // The study time. @@ -432,9 +444,11 @@ func (*MetadataUpdatesMemberDICOMUpdates) isMetadataUpdates() {} // SearchByAttributeValueMemberCreatedAt // SearchByAttributeValueMemberDICOMAccessionNumber // SearchByAttributeValueMemberDICOMPatientId +// SearchByAttributeValueMemberDICOMSeriesInstanceUID // SearchByAttributeValueMemberDICOMStudyDateAndTime // SearchByAttributeValueMemberDICOMStudyId // SearchByAttributeValueMemberDICOMStudyInstanceUID +// SearchByAttributeValueMemberUpdatedAt type SearchByAttributeValue interface { isSearchByAttributeValue() } @@ -466,6 +480,15 @@ type SearchByAttributeValueMemberDICOMPatientId struct { func (*SearchByAttributeValueMemberDICOMPatientId) isSearchByAttributeValue() {} +// The Series Instance UID input for search. +type SearchByAttributeValueMemberDICOMSeriesInstanceUID struct { + Value string + + noSmithyDocumentSerde +} + +func (*SearchByAttributeValueMemberDICOMSeriesInstanceUID) isSearchByAttributeValue() {} + // The aggregated structure containing DICOM study date and study time for search. type SearchByAttributeValueMemberDICOMStudyDateAndTime struct { Value DICOMStudyDateAndTime @@ -493,12 +516,24 @@ type SearchByAttributeValueMemberDICOMStudyInstanceUID struct { func (*SearchByAttributeValueMemberDICOMStudyInstanceUID) isSearchByAttributeValue() {} +// The timestamp input for search. +type SearchByAttributeValueMemberUpdatedAt struct { + Value time.Time + + noSmithyDocumentSerde +} + +func (*SearchByAttributeValueMemberUpdatedAt) isSearchByAttributeValue() {} + // The search criteria. type SearchCriteria struct { // The filters for the search criteria. Filters []SearchFilter + // The sort input for search criteria. + Sort *Sort + noSmithyDocumentSerde } @@ -518,6 +553,22 @@ type SearchFilter struct { noSmithyDocumentSerde } +// Sort search results. +type Sort struct { + + // The sort field for search criteria. + // + // This member is required. + SortField SortField + + // The sort order for search criteria. + // + // This member is required. + SortOrder SortOrder + + noSmithyDocumentSerde +} + type noSmithyDocumentSerde = smithydocument.NoSerde // UnknownUnionMember is returned when a union member is returned over the wire, diff --git a/service/medicalimaging/types/types_exported_test.go b/service/medicalimaging/types/types_exported_test.go index 1b10702ef7c..89da847ec23 100644 --- a/service/medicalimaging/types/types_exported_test.go +++ b/service/medicalimaging/types/types_exported_test.go @@ -39,6 +39,9 @@ func ExampleSearchByAttributeValue_outputUsage() { case *types.SearchByAttributeValueMemberDICOMPatientId: _ = v.Value // Value is string + case *types.SearchByAttributeValueMemberDICOMSeriesInstanceUID: + _ = v.Value // Value is string + case *types.SearchByAttributeValueMemberDICOMStudyDateAndTime: _ = v.Value // Value is types.DICOMStudyDateAndTime @@ -48,6 +51,9 @@ func ExampleSearchByAttributeValue_outputUsage() { case *types.SearchByAttributeValueMemberDICOMStudyInstanceUID: _ = v.Value // Value is string + case *types.SearchByAttributeValueMemberUpdatedAt: + _ = v.Value // Value is time.Time + case *types.UnknownUnionMember: fmt.Println("unknown tag:", v.Tag) @@ -62,4 +68,5 @@ var _ *string var _ *string var _ *string var _ *string +var _ *string var _ *time.Time diff --git a/service/medicalimaging/validators.go b/service/medicalimaging/validators.go index cde83242da7..db23a1febc4 100644 --- a/service/medicalimaging/validators.go +++ b/service/medicalimaging/validators.go @@ -551,6 +551,11 @@ func validateSearchCriteria(v *types.SearchCriteria) error { invalidParams.AddNested("Filters", err.(smithy.InvalidParamsError)) } } + if v.Sort != nil { + if err := validateSort(v.Sort); err != nil { + invalidParams.AddNested("Sort", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -597,6 +602,24 @@ func validateSearchFilters(v []types.SearchFilter) error { } } +func validateSort(v *types.Sort) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Sort"} + if len(v.SortOrder) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SortOrder")) + } + if len(v.SortField) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SortField")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCopyImageSetInput(v *CopyImageSetInput) error { if v == nil { return nil diff --git a/service/transfer/api_op_CreateConnector.go b/service/transfer/api_op_CreateConnector.go index 9d751d973f4..c00777e73cd 100644 --- a/service/transfer/api_op_CreateConnector.go +++ b/service/transfer/api_op_CreateConnector.go @@ -73,6 +73,9 @@ type CreateConnectorInput struct { // set, you can view connector activity in your CloudWatch logs. LoggingRole *string + // Specifies the name of the security policy for the connector. + SecurityPolicyName *string + // A structure that contains the parameters for an SFTP connector object. SftpConfig *types.SftpConnectorConfig diff --git a/service/transfer/api_op_CreateServer.go b/service/transfer/api_op_CreateServer.go index 1c15ff29c4f..27bf493c6cf 100644 --- a/service/transfer/api_op_CreateServer.go +++ b/service/transfer/api_op_CreateServer.go @@ -188,7 +188,7 @@ type CreateServerInput struct { // have a file target. S3StorageOptions *types.S3StorageOptions - // Specifies the name of the security policy that is attached to the server. + // Specifies the name of the security policy for the server. SecurityPolicyName *string // Specifies the log groups to which your server logs are sent. To specify a log diff --git a/service/transfer/api_op_DescribeSecurityPolicy.go b/service/transfer/api_op_DescribeSecurityPolicy.go index 3a803b54e56..7462bc588f0 100644 --- a/service/transfer/api_op_DescribeSecurityPolicy.go +++ b/service/transfer/api_op_DescribeSecurityPolicy.go @@ -11,10 +11,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes the security policy that is attached to your file transfer -// protocol-enabled server. The response contains a description of the security -// policy's properties. For more information about security policies, see Working -// with security policies (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html) +// Describes the security policy that is attached to your server or SFTP +// connector. The response contains a description of the security policy's +// properties. For more information about security policies, see Working with +// security policies for servers (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html) +// or Working with security policies for SFTP connectors (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies-connectors.html) // . func (c *Client) DescribeSecurityPolicy(ctx context.Context, params *DescribeSecurityPolicyInput, optFns ...func(*Options)) (*DescribeSecurityPolicyOutput, error) { if params == nil { @@ -33,7 +34,7 @@ func (c *Client) DescribeSecurityPolicy(ctx context.Context, params *DescribeSec type DescribeSecurityPolicyInput struct { - // Specifies the name of the security policy that is attached to the server. + // Specify the text name of the security policy for which you want the details. // // This member is required. SecurityPolicyName *string diff --git a/service/transfer/api_op_ListSecurityPolicies.go b/service/transfer/api_op_ListSecurityPolicies.go index ee9ed1e71e3..42445741bd6 100644 --- a/service/transfer/api_op_ListSecurityPolicies.go +++ b/service/transfer/api_op_ListSecurityPolicies.go @@ -10,8 +10,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the security policies that are attached to your file transfer -// protocol-enabled servers. +// Lists the security policies that are attached to your servers and SFTP +// connectors. For more information about security policies, see Working with +// security policies for servers (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html) +// or Working with security policies for SFTP connectors (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies-connectors.html) +// . func (c *Client) ListSecurityPolicies(ctx context.Context, params *ListSecurityPoliciesInput, optFns ...func(*Options)) (*ListSecurityPoliciesOutput, error) { if params == nil { params = &ListSecurityPoliciesInput{} diff --git a/service/transfer/api_op_UpdateConnector.go b/service/transfer/api_op_UpdateConnector.go index 5d80c78d6b4..2bc729e0cd1 100644 --- a/service/transfer/api_op_UpdateConnector.go +++ b/service/transfer/api_op_UpdateConnector.go @@ -66,6 +66,9 @@ type UpdateConnectorInput struct { // set, you can view connector activity in your CloudWatch logs. LoggingRole *string + // Specifies the name of the security policy for the connector. + SecurityPolicyName *string + // A structure that contains the parameters for an SFTP connector object. SftpConfig *types.SftpConnectorConfig diff --git a/service/transfer/api_op_UpdateServer.go b/service/transfer/api_op_UpdateServer.go index dffffc24b1a..d74647ed9c8 100644 --- a/service/transfer/api_op_UpdateServer.go +++ b/service/transfer/api_op_UpdateServer.go @@ -170,7 +170,7 @@ type UpdateServerInput struct { // have a file target. S3StorageOptions *types.S3StorageOptions - // Specifies the name of the security policy that is attached to the server. + // Specifies the name of the security policy for the server. SecurityPolicyName *string // Specifies the log groups to which your server logs are sent. To specify a log diff --git a/service/transfer/deserializers.go b/service/transfer/deserializers.go index ec4dc90719a..29505395ba7 100644 --- a/service/transfer/deserializers.go +++ b/service/transfer/deserializers.go @@ -8014,6 +8014,15 @@ func awsAwsjson11_deserializeDocumentDescribedConnector(v **types.DescribedConne sv.LoggingRole = ptr.String(jtv) } + case "SecurityPolicyName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectorSecurityPolicyName to be of type string, got %T instead", value) + } + sv.SecurityPolicyName = ptr.String(jtv) + } + case "ServiceManagedEgressIpAddresses": if err := awsAwsjson11_deserializeDocumentServiceManagedEgressIpAddresses(&sv.ServiceManagedEgressIpAddresses, value); err != nil { return err @@ -8335,6 +8344,11 @@ func awsAwsjson11_deserializeDocumentDescribedSecurityPolicy(v **types.Described sv.Fips = ptr.Bool(jtv) } + case "Protocols": + if err := awsAwsjson11_deserializeDocumentSecurityPolicyProtocols(&sv.Protocols, value); err != nil { + return err + } + case "SecurityPolicyName": if value != nil { jtv, ok := value.(string) @@ -8349,6 +8363,11 @@ func awsAwsjson11_deserializeDocumentDescribedSecurityPolicy(v **types.Described return err } + case "SshHostKeyAlgorithms": + if err := awsAwsjson11_deserializeDocumentSecurityPolicyOptions(&sv.SshHostKeyAlgorithms, value); err != nil { + return err + } + case "SshKexs": if err := awsAwsjson11_deserializeDocumentSecurityPolicyOptions(&sv.SshKexs, value); err != nil { return err @@ -8364,6 +8383,15 @@ func awsAwsjson11_deserializeDocumentDescribedSecurityPolicy(v **types.Described return err } + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SecurityPolicyResourceType to be of type string, got %T instead", value) + } + sv.Type = types.SecurityPolicyResourceType(jtv) + } + default: _, _ = key, value @@ -11338,6 +11366,42 @@ func awsAwsjson11_deserializeDocumentSecurityPolicyOptions(v *[]string, value in return nil } +func awsAwsjson11_deserializeDocumentSecurityPolicyProtocols(v *[]types.SecurityPolicyProtocol, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SecurityPolicyProtocol + if *v == nil { + cv = []types.SecurityPolicyProtocol{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SecurityPolicyProtocol + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SecurityPolicyProtocol to be of type string, got %T instead", value) + } + col = types.SecurityPolicyProtocol(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentServiceManagedEgressIpAddresses(v *[]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/transfer/serializers.go b/service/transfer/serializers.go index 883862f135b..72cbc32dd8b 100644 --- a/service/transfer/serializers.go +++ b/service/transfer/serializers.go @@ -4113,6 +4113,11 @@ func awsAwsjson11_serializeOpDocumentCreateConnectorInput(v *CreateConnectorInpu ok.String(*v.LoggingRole) } + if v.SecurityPolicyName != nil { + ok := object.Key("SecurityPolicyName") + ok.String(*v.SecurityPolicyName) + } + if v.SftpConfig != nil { ok := object.Key("SftpConfig") if err := awsAwsjson11_serializeDocumentSftpConnectorConfig(v.SftpConfig, ok); err != nil { @@ -5336,6 +5341,11 @@ func awsAwsjson11_serializeOpDocumentUpdateConnectorInput(v *UpdateConnectorInpu ok.String(*v.LoggingRole) } + if v.SecurityPolicyName != nil { + ok := object.Key("SecurityPolicyName") + ok.String(*v.SecurityPolicyName) + } + if v.SftpConfig != nil { ok := object.Key("SftpConfig") if err := awsAwsjson11_serializeDocumentSftpConnectorConfig(v.SftpConfig, ok); err != nil { diff --git a/service/transfer/types/enums.go b/service/transfer/types/enums.go index 3120fdf7718..2c2dc9cba97 100644 --- a/service/transfer/types/enums.go +++ b/service/transfer/types/enums.go @@ -437,6 +437,42 @@ func (Protocol) Values() []Protocol { } } +type SecurityPolicyProtocol string + +// Enum values for SecurityPolicyProtocol +const ( + SecurityPolicyProtocolSftp SecurityPolicyProtocol = "SFTP" + SecurityPolicyProtocolFtps SecurityPolicyProtocol = "FTPS" +) + +// Values returns all known values for SecurityPolicyProtocol. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (SecurityPolicyProtocol) Values() []SecurityPolicyProtocol { + return []SecurityPolicyProtocol{ + "SFTP", + "FTPS", + } +} + +type SecurityPolicyResourceType string + +// Enum values for SecurityPolicyResourceType +const ( + SecurityPolicyResourceTypeServer SecurityPolicyResourceType = "SERVER" + SecurityPolicyResourceTypeConnector SecurityPolicyResourceType = "CONNECTOR" +) + +// Values returns all known values for SecurityPolicyResourceType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (SecurityPolicyResourceType) Values() []SecurityPolicyResourceType { + return []SecurityPolicyResourceType{ + "SERVER", + "CONNECTOR", + } +} + type SetStatOption string // Enum values for SetStatOption diff --git a/service/transfer/types/types.go b/service/transfer/types/types.go index a21209a940d..be8c50ed2f2 100644 --- a/service/transfer/types/types.go +++ b/service/transfer/types/types.go @@ -424,6 +424,9 @@ type DescribedConnector struct { // set, you can view connector activity in your CloudWatch logs. LoggingRole *string + // The text name of the security policy for the specified connector. + SecurityPolicyName *string + // The list of egress IP addresses of this connector. These IP addresses are // assigned automatically when you create the connector. ServiceManagedEgressIpAddresses []string @@ -548,36 +551,53 @@ type DescribedProfile struct { noSmithyDocumentSerde } -// Describes the properties of a security policy that was specified. For more -// information about security policies, see Working with security policies (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html) +// Describes the properties of a security policy that you specify. For more +// information about security policies, see Working with security policies for +// servers (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html) +// or Working with security policies for SFTP connectors (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies-connectors.html) // . type DescribedSecurityPolicy struct { - // Specifies the name of the security policy that is attached to the server. + // The text name of the specified security policy. // // This member is required. SecurityPolicyName *string // Specifies whether this policy enables Federal Information Processing Standards - // (FIPS). + // (FIPS). This parameter applies to both server and connector security policies. Fips *bool - // Specifies the enabled Secure Shell (SSH) cipher encryption algorithms in the - // security policy that is attached to the server. + // Lists the file transfer protocols that the security policy applies to. + Protocols []SecurityPolicyProtocol + + // Lists the enabled Secure Shell (SSH) cipher encryption algorithms in the + // security policy that is attached to the server or connector. This parameter + // applies to both server and connector security policies. SshCiphers []string - // Specifies the enabled SSH key exchange (KEX) encryption algorithms in the - // security policy that is attached to the server. + // Lists the host key algorithms for the security policy. This parameter only + // applies to security policies for connectors. + SshHostKeyAlgorithms []string + + // Lists the enabled SSH key exchange (KEX) encryption algorithms in the security + // policy that is attached to the server or connector. This parameter applies to + // both server and connector security policies. SshKexs []string - // Specifies the enabled SSH message authentication code (MAC) encryption - // algorithms in the security policy that is attached to the server. + // Lists the enabled SSH message authentication code (MAC) encryption algorithms + // in the security policy that is attached to the server or connector. This + // parameter applies to both server and connector security policies. SshMacs []string - // Specifies the enabled Transport Layer Security (TLS) cipher encryption - // algorithms in the security policy that is attached to the server. + // Lists the enabled Transport Layer Security (TLS) cipher encryption algorithms + // in the security policy that is attached to the server. This parameter only + // applies to security policies for servers. TlsCiphers []string + // The resource type to which the security policy applies, either server or + // connector. + Type SecurityPolicyResourceType + noSmithyDocumentSerde } @@ -708,7 +728,7 @@ type DescribedServer struct { // have a file target. S3StorageOptions *S3StorageOptions - // Specifies the name of the security policy that is attached to the server. + // Specifies the name of the security policy for the server. SecurityPolicyName *string // Specifies the unique system-assigned identifier for a server that you diff --git a/service/verifiedpermissions/internal/endpoints/endpoints.go b/service/verifiedpermissions/internal/endpoints/endpoints.go index 1050e3152b7..485703c60f3 100644 --- a/service/verifiedpermissions/internal/endpoints/endpoints.go +++ b/service/verifiedpermissions/internal/endpoints/endpoints.go @@ -175,6 +175,12 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, @@ -199,6 +205,51 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-west-3", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "fips-ca-central-1", + }: endpoints.Endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "me-central-1", }: endpoints.Endpoint{}, @@ -211,15 +262,39 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "us-east-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-east-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-west-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-west-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, }, }, {