From efcd8f2dd8567141c9d45ffa45fb666ecc7d967a Mon Sep 17 00:00:00 2001 From: AWS SDK for Ruby Date: Wed, 13 Apr 2022 18:04:01 +0000 Subject: [PATCH] Updated API models and rebuilt service gems. --- apis/fsx/2018-03-01/api-2.json | 5 +- apis/fsx/2018-03-01/docs-2.json | 12 +- apis/fsx/2018-03-01/paginators-1.json | 6 +- apis/monitoring/2010-08-01/api-2.json | 41 +++- apis/monitoring/2010-08-01/docs-2.json | 59 +++++- gems/aws-sdk-cloudwatch/CHANGELOG.md | 5 + gems/aws-sdk-cloudwatch/VERSION | 2 +- .../lib/aws-sdk-cloudwatch.rb | 2 +- .../lib/aws-sdk-cloudwatch/alarm.rb | 13 +- .../lib/aws-sdk-cloudwatch/client.rb | 101 +++++++-- .../lib/aws-sdk-cloudwatch/client_api.rb | 22 ++ .../lib/aws-sdk-cloudwatch/types.rb | 200 ++++++++++++++++-- gems/aws-sdk-fsx/CHANGELOG.md | 5 + gems/aws-sdk-fsx/VERSION | 2 +- gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb | 2 +- gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb | 40 ++-- gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb | 63 ++++-- 17 files changed, 480 insertions(+), 100 deletions(-) diff --git a/apis/fsx/2018-03-01/api-2.json b/apis/fsx/2018-03-01/api-2.json index 25e87eaf1fc..0092933183d 100644 --- a/apis/fsx/2018-03-01/api-2.json +++ b/apis/fsx/2018-03-01/api-2.json @@ -2392,7 +2392,10 @@ }, "OntapDeploymentType":{ "type":"string", - "enum":["MULTI_AZ_1"] + "enum":[ + "MULTI_AZ_1", + "SINGLE_AZ_1" + ] }, "OntapEndpointIpAddresses":{ "type":"list", diff --git a/apis/fsx/2018-03-01/docs-2.json b/apis/fsx/2018-03-01/docs-2.json index b715b81c2c0..c5e16e46dd6 100644 --- a/apis/fsx/2018-03-01/docs-2.json +++ b/apis/fsx/2018-03-01/docs-2.json @@ -1342,8 +1342,8 @@ "IpAddressRange": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$EndpointIpAddressRange": "

Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range.

The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger.

", - "OntapFileSystemConfiguration$EndpointIpAddressRange": "

The IP address range in which the endpoints to access your file system are created.

The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.

" + "CreateFileSystemOntapConfiguration$EndpointIpAddressRange": "

(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range.

The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger.

", + "OntapFileSystemConfiguration$EndpointIpAddressRange": "

(Multi-AZ only) The IP address range in which the endpoints to access your file system are created.

The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.

" } }, "JunctionPath": { @@ -1537,8 +1537,8 @@ "OntapDeploymentType": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$DeploymentType": "

Specifies the FSx for ONTAP file system deployment type to use in creating the file system. MULTI_AZ_1 is the supported ONTAP deployment type.

", - "OntapFileSystemConfiguration$DeploymentType": "

The ONTAP file system deployment type.

" + "CreateFileSystemOntapConfiguration$DeploymentType": "

Specifies the FSx for ONTAP file system deployment type to use in creating the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

", + "OntapFileSystemConfiguration$DeploymentType": "

Specifies the FSx for ONTAP file system deployment type in use in the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

" } }, "OntapEndpointIpAddresses": { @@ -1819,8 +1819,8 @@ "RouteTableIds": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$RouteTableIds": "

Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

", - "OntapFileSystemConfiguration$RouteTableIds": "

The VPC route tables in which your file system's endpoints are created.

" + "CreateFileSystemOntapConfiguration$RouteTableIds": "

(Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

", + "OntapFileSystemConfiguration$RouteTableIds": "

(Multi-AZ only) The VPC route tables in which your file system's endpoints are created.

" } }, "S3DataRepositoryConfiguration": { diff --git a/apis/fsx/2018-03-01/paginators-1.json b/apis/fsx/2018-03-01/paginators-1.json index a1d7aab00e9..e2d6ecfb0bc 100644 --- a/apis/fsx/2018-03-01/paginators-1.json +++ b/apis/fsx/2018-03-01/paginators-1.json @@ -33,12 +33,14 @@ "DescribeStorageVirtualMachines": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "StorageVirtualMachines" }, "DescribeVolumes": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Volumes" }, "ListTagsForResource": { "input_token": "NextToken", diff --git a/apis/monitoring/2010-08-01/api-2.json b/apis/monitoring/2010-08-01/api-2.json index 2d58079b9d7..7da31990144 100644 --- a/apis/monitoring/2010-08-01/api-2.json +++ b/apis/monitoring/2010-08-01/api-2.json @@ -1289,7 +1289,8 @@ "State":{"shape":"MetricStreamState"}, "CreationDate":{"shape":"Timestamp"}, "LastUpdateDate":{"shape":"Timestamp"}, - "OutputFormat":{"shape":"MetricStreamOutputFormat"} + "OutputFormat":{"shape":"MetricStreamOutputFormat"}, + "StatisticsConfigurations":{"shape":"MetricStreamStatisticsConfigurations"} } }, "GetMetricWidgetImageInput":{ @@ -1843,6 +1844,41 @@ "min":1 }, "MetricStreamState":{"type":"string"}, + "MetricStreamStatistic":{"type":"string"}, + "MetricStreamStatisticsAdditionalStatistics":{ + "type":"list", + "member":{"shape":"MetricStreamStatistic"} + }, + "MetricStreamStatisticsConfiguration":{ + "type":"structure", + "required":[ + "IncludeMetrics", + "AdditionalStatistics" + ], + "members":{ + "IncludeMetrics":{"shape":"MetricStreamStatisticsIncludeMetrics"}, + "AdditionalStatistics":{"shape":"MetricStreamStatisticsAdditionalStatistics"} + } + }, + "MetricStreamStatisticsConfigurations":{ + "type":"list", + "member":{"shape":"MetricStreamStatisticsConfiguration"} + }, + "MetricStreamStatisticsIncludeMetrics":{ + "type":"list", + "member":{"shape":"MetricStreamStatisticsMetric"} + }, + "MetricStreamStatisticsMetric":{ + "type":"structure", + "required":[ + "Namespace", + "MetricName" + ], + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricName":{"shape":"MetricName"} + } + }, "MetricWidget":{"type":"string"}, "MetricWidgetImage":{"type":"blob"}, "Metrics":{ @@ -2026,7 +2062,8 @@ "FirehoseArn":{"shape":"AmazonResourceName"}, "RoleArn":{"shape":"AmazonResourceName"}, "OutputFormat":{"shape":"MetricStreamOutputFormat"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "StatisticsConfigurations":{"shape":"MetricStreamStatisticsConfigurations"} } }, "PutMetricStreamOutput":{ diff --git a/apis/monitoring/2010-08-01/docs-2.json b/apis/monitoring/2010-08-01/docs-2.json index bf23a10cfec..fcb7c61ff09 100644 --- a/apis/monitoring/2010-08-01/docs-2.json +++ b/apis/monitoring/2010-08-01/docs-2.json @@ -18,7 +18,7 @@ "EnableInsightRules": "

Enables the specified Contributor Insights rules. When rules are enabled, they immediately begin analyzing log data.

", "GetDashboard": "

Displays the details of the dashboard that you specify.

To copy an existing dashboard, use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard to create the copy.

", "GetInsightRuleReport": "

This operation returns the time series data collected by a Contributor Insights rule. The data includes the identity and number of contributors to the log group.

You can also optionally return one or more statistics about each data point in the time series. These statistics can include the following:

", - "GetMetricData": "

You can use the GetMetricData API to retrieve as many as 500 different metrics in a single request, with a total of as many as 100,800 data points. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

If you omit Unit in your request, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

", + "GetMetricData": "

You can use the GetMetricData API to retrieve CloudWatch metric values. The operation can also include a CloudWatch Metrics Insights query, and one or more metric math functions.

A GetMetricData operation that does not include a query can retrieve as many as 500 different metrics in a single request, with a total of as many as 100,800 data points. You can also optionally perform metric math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

If you include a Metrics Insights query, each GetMetricData operation can include only one query. But the same GetMetricData operation can also retrieve other metrics. Metrics Insights queries can query only the most recent three hours of metric data. For more information about Metrics Insights, see Query your metrics with CloudWatch Metrics Insights.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

If you omit Unit in your request, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

Using Metrics Insights queries with metric math

You can't mix a Metric Insights query and metric math syntax in the same expression, but you can reference results from a Metrics Insights query within other Metric math expressions. A Metrics Insights query without a GROUP BY clause returns a single time-series (TS), and can be used as input for a metric math expression that expects a single time series. A Metrics Insights query with a GROUP BY clause returns an array of time-series (TS[]), and can be used as input for a metric math expression that expects an array of time series.

", "GetMetricStatistics": "

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

Percentile statistics are not available for metrics when any of the metric values are negative numbers.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by Amazon Web Services services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

", "GetMetricStream": "

Returns information about the metric stream that you specify.

", "GetMetricWidgetImage": "

You can use the GetMetricWidgetImage API to retrieve a snapshot graph of one or more Amazon CloudWatch metrics as a bitmap image. You can then embed this image into your services and products, such as wiki pages, reports, and documents. You could also retrieve images regularly, such as every minute, and create your own custom live dashboard.

The graph you retrieve can include all CloudWatch metric graph features, including metric math and horizontal and vertical annotations.

There is a limit of 20 transactions per second for this API. Each GetMetricWidgetImage action has the following limits:

", @@ -27,12 +27,12 @@ "ListMetrics": "

List the specified metrics. You can use the returned metrics with GetMetricData or GetMetricStatistics to obtain statistical data.

Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.

After you create a metric, allow up to 15 minutes before the metric appears. You can see statistics about the metric sooner by using GetMetricData or GetMetricStatistics.

ListMetrics doesn't return information about metrics if those metrics haven't reported data in the past two weeks. To retrieve those metrics, use GetMetricData or GetMetricStatistics.

", "ListTagsForResource": "

Displays the tags associated with a CloudWatch resource. Currently, alarms and Contributor Insights rules support tagging.

", "PutAnomalyDetector": "

Creates an anomaly detection model for a CloudWatch metric. You can use the model to display a band of expected normal values when the metric is graphed.

For more information, see CloudWatch Anomaly Detection.

", - "PutCompositeAlarm": "

Creates or updates a composite alarm. When you create a composite alarm, you specify a rule expression for the alarm that takes into account the alarm states of other alarms that you have created. The composite alarm goes into ALARM state only if all conditions of the rule are met.

The alarms specified in a composite alarm's rule expression can include metric alarms and other composite alarms.

Using composite alarms can reduce alarm noise. You can create multiple metric alarms, and also create a composite alarm and set up alerts only for the composite alarm. For example, you could create a composite alarm that goes into ALARM state only when more than one of the underlying metric alarms are in ALARM state.

Currently, the only alarm actions that can be taken by composite alarms are notifying SNS topics.

It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.

To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule of one of the alarms to False.

Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed. For a composite alarm, this initial time after creation is the only time that the alarm can be in INSUFFICIENT_DATA state.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

To use this operation, you must be signed on with the cloudwatch:PutCompositeAlarm permission that is scoped to *. You can't create a composite alarms if your cloudwatch:PutCompositeAlarm permission has a narrower scope.

If you are an IAM user, you must have iam:CreateServiceLinkedRole to create a composite alarm that has Systems Manager OpsItem actions.

", + "PutCompositeAlarm": "

Creates or updates a composite alarm. When you create a composite alarm, you specify a rule expression for the alarm that takes into account the alarm states of other alarms that you have created. The composite alarm goes into ALARM state only if all conditions of the rule are met.

The alarms specified in a composite alarm's rule expression can include metric alarms and other composite alarms. The rule expression of a composite alarm can include as many as 100 underlying alarms. Any single alarm can be included in the rule expressions of as many as 150 composite alarms.

Using composite alarms can reduce alarm noise. You can create multiple metric alarms, and also create a composite alarm and set up alerts only for the composite alarm. For example, you could create a composite alarm that goes into ALARM state only when more than one of the underlying metric alarms are in ALARM state.

Currently, the only alarm actions that can be taken by composite alarms are notifying SNS topics.

It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.

To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule of one of the alarms to False.

Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed. For a composite alarm, this initial time after creation is the only time that the alarm can be in INSUFFICIENT_DATA state.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

To use this operation, you must be signed on with the cloudwatch:PutCompositeAlarm permission that is scoped to *. You can't create a composite alarms if your cloudwatch:PutCompositeAlarm permission has a narrower scope.

If you are an IAM user, you must have iam:CreateServiceLinkedRole to create a composite alarm that has Systems Manager OpsItem actions.

", "PutDashboard": "

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

", "PutInsightRule": "

Creates a Contributor Insights rule. Rules evaluate log events in a CloudWatch Logs log group, enabling you to find contributor data for the log events in that log group. For more information, see Using Contributor Insights to Analyze High-Cardinality Data.

If you create a rule, delete it, and then re-create it with the same name, historical data from the first time the rule was created might not be available.

", "PutMetricAlarm": "

Creates or updates an alarm and associates it with the specified metric, metric math expression, or anomaly detection model.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

The first time you create an alarm in the Amazon Web Services Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked roles are called AWSServiceRoleForCloudWatchEvents and AWSServiceRoleForCloudWatchAlarms_ActionSSM. For more information, see Amazon Web Services service-linked role.

Cross-account alarms

You can set an alarm on metrics in the current account, or in another account. To create a cross-account alarm that watches a metric in a different account, you must have completed the following pre-requisites:

", "PutMetricData": "

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

", - "PutMetricStream": "

Creates or updates a metric stream. Metric streams can automatically stream CloudWatch metrics to Amazon Web Services destinations including Amazon S3 and to many third-party solutions.

For more information, see Using Metric Streams.

To create a metric stream, you must be logged on to an account that has the iam:PassRole permission and either the CloudWatchFullAccess policy or the cloudwatch:PutMetricStream permission.

When you create or update a metric stream, you choose one of the following:

When you use PutMetricStream to create a new metric stream, the stream is created in the running state. If you use it to update an existing stream, the state of the stream is not changed.

", + "PutMetricStream": "

Creates or updates a metric stream. Metric streams can automatically stream CloudWatch metrics to Amazon Web Services destinations including Amazon S3 and to many third-party solutions.

For more information, see Using Metric Streams.

To create a metric stream, you must be logged on to an account that has the iam:PassRole permission and either the CloudWatchFullAccess policy or the cloudwatch:PutMetricStream permission.

When you create or update a metric stream, you choose one of the following:

By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT statistics for each metric that is streamed. You can use the StatisticsConfigurations parameter to have the metric stream also send extended statistics in the stream. Streaming extended statistics incurs additional costs. For more information, see Amazon CloudWatch Pricing.

When you use PutMetricStream to create a new metric stream, the stream is created in the running state. If you use it to update an existing stream, the state of the stream is not changed.

", "SetAlarmState": "

Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to ALARM sends an SNS message.

Metric alarms returns to their actual state quickly, often within seconds. Because the metric alarm state change happens quickly, it is typically only visible in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory.

If you use SetAlarmState on a composite alarm, the composite alarm is not guaranteed to return to its actual state. It returns to its actual state only once any of its children alarms change state. It is also reevaluated if you update its configuration.

If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling policies, you must include information in the StateReasonData parameter to enable the policy to take the correct action.

", "StartMetricStreams": "

Starts the streaming of metrics for one or more of your metric streams.

", "StopMetricStreams": "

Stops the streaming of metrics for one or more of your metric streams.

", @@ -490,7 +490,7 @@ } }, "Dimension": { - "base": "

A dimension is a name/value pair that is part of the identity of a metric. You can assign up to 10 dimensions to a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric.

", + "base": "

A dimension is a name/value pair that is part of the identity of a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric. For example, many Amazon EC2 metrics publish InstanceId as a dimension name, and the actual instance ID as the value for that dimension.

You can assign up to 10 dimensions to a metric.

", "refs": { "Dimensions$member": null } @@ -510,7 +510,7 @@ "DimensionName": { "base": null, "refs": { - "Dimension$Name": "

The name of the dimension. Dimension names must contain only ASCII characters and must include at least one non-whitespace character.

", + "Dimension$Name": "

The name of the dimension. Dimension names must contain only ASCII characters, must include at least one non-whitespace character, and cannot start with a colon (:).

", "DimensionFilter$Name": "

The dimension name to be matched.

" } }, @@ -1052,14 +1052,14 @@ "MetricDataQueries": { "base": null, "refs": { - "GetMetricDataInput$MetricDataQueries": "

The metric queries to be returned. A single GetMetricData call can include as many as 500 MetricDataQuery structures. Each of these structures can specify either a metric to retrieve, or a math expression to perform on retrieved data.

", + "GetMetricDataInput$MetricDataQueries": "

The metric queries to be returned. A single GetMetricData call can include as many as 500 MetricDataQuery structures. Each of these structures can specify either a metric to retrieve, a Metrics Insights query, or a math expression to perform on retrieved data.

", "MetricAlarm$Metrics": "

An array of MetricDataQuery structures, used in an alarm based on a metric math expression. Each structure either retrieves a metric or performs a math expression. One item in the Metrics array is the math expression that the alarm watches. This expression by designated by having ReturnData set to true.

", "MetricMathAnomalyDetector$MetricDataQueries": "

An array of metric data query structures that enables you to create an anomaly detector based on the result of a metric math expression. Each item in MetricDataQueries gets a metric or performs a math expression. One item in MetricDataQueries is the expression that provides the time series that the anomaly detector uses as input. Designate the expression by setting ReturnData to True for this object in the array. For all other expressions and metrics, set ReturnData to False. The designated expression must return a single time series.

", "PutMetricAlarmInput$Metrics": "

An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array.

Each item in the Metrics array either retrieves a metric or performs a math expression.

One item in the Metrics array is the expression that the alarm watches. You designate this expression by setting ReturnData to true for this object in the array. For more information, see MetricDataQuery.

If you use the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.

" } }, "MetricDataQuery": { - "base": "

This structure is used in both GetMetricData and PutMetricAlarm. The supported use of this structure is different for those two operations.

When used in GetMetricData, it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 500 MetricDataQuery structures.

When used in PutMetricAlarm, it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Of those Expression structures, one must have True as the value for ReturnData. The result of this expression is the value the alarm watches.

Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.

", + "base": "

This structure is used in both GetMetricData and PutMetricAlarm. The supported use of this structure is different for those two operations.

When used in GetMetricData, it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a Metrics Insights query or a math expression. A single GetMetricData call can include up to 500 MetricDataQuery structures.

When used in PutMetricAlarm, it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Of those Expression structures, one must have True as the value for ReturnData. The result of this expression is the value the alarm watches.

Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.

", "refs": { "MetricDataQueries$member": null } @@ -1092,7 +1092,7 @@ "MetricExpression": { "base": null, "refs": { - "MetricDataQuery$Expression": "

The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.

" + "MetricDataQuery$Expression": "

This field can contain either a Metrics Insights query, or a metric math expression to be performed on the returned data. For more information about Metrics Insights queries, see Metrics Insights query components and syntax in the Amazon CloudWatch User Guide.

A math expression can use the Id of the other metrics or queries to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.

" } }, "MetricId": { @@ -1132,6 +1132,7 @@ "Metric$MetricName": "

The name of the metric. This is a required field.

", "MetricAlarm$MetricName": "

The name of the metric associated with the alarm, if this is an alarm based on a single metric.

", "MetricDatum$MetricName": "

The name of the metric.

", + "MetricStreamStatisticsMetric$MetricName": "

The name of the metric.

", "PutAnomalyDetectorInput$MetricName": "

The name of the metric to create the anomaly detection model for.

", "PutMetricAlarmInput$MetricName": "

The name for the metric associated with the alarm. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array.

If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of the Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters. Instead, you specify all this information in the Metrics array.

", "SingleMetricAnomalyDetector$MetricName": "

The name of the metric to create the anomaly detection model for.

" @@ -1191,7 +1192,7 @@ "MetricStreamOutputFormat": { "base": null, "refs": { - "GetMetricStreamOutput$OutputFormat": "

", + "GetMetricStreamOutput$OutputFormat": "

The output format for the stream. Valid values are json and opentelemetry0.7. For more information about metric stream output formats, see Metric streams output formats.

", "MetricStreamEntry$OutputFormat": "

The output format of this metric stream. Valid values are json and opentelemetry0.7.

", "PutMetricStreamInput$OutputFormat": "

The output format for the stream. Valid values are json and opentelemetry0.7. For more information about metric stream output formats, see Metric streams output formats.

" } @@ -1203,6 +1204,43 @@ "MetricStreamEntry$State": "

The current state of this stream. Valid values are running and stopped.

" } }, + "MetricStreamStatistic": { + "base": null, + "refs": { + "MetricStreamStatisticsAdditionalStatistics$member": null + } + }, + "MetricStreamStatisticsAdditionalStatistics": { + "base": null, + "refs": { + "MetricStreamStatisticsConfiguration$AdditionalStatistics": "

The list of extended statistics that are to be streamed for the metrics listed in the IncludeMetrics array in this structure. This list can include as many as 20 statistics.

If the OutputFormat for the stream is opentelemetry0.7, the only valid values are p?? percentile statistics such as p90, p99 and so on.

If the OutputFormat for the stream is json, the valid values are include the abbreviations for all of the extended statistics listed in CloudWatch statistics definitions. For example, this includes tm98, wm90, PR(:300), and so on.

" + } + }, + "MetricStreamStatisticsConfiguration": { + "base": "

By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT statistics for each metric that is streamed. This structure contains information for one metric that includes extended statistics in the stream. For more information about extended statistics, see CloudWatch, listed in CloudWatch statistics definitions.

", + "refs": { + "MetricStreamStatisticsConfigurations$member": null + } + }, + "MetricStreamStatisticsConfigurations": { + "base": null, + "refs": { + "GetMetricStreamOutput$StatisticsConfigurations": "

Each entry in this array displays information about one or more metrics that include extended statistics in the metric stream. For more information about extended statistics, see CloudWatch statistics definitions.

", + "PutMetricStreamInput$StatisticsConfigurations": "

By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT statistics for each metric that is streamed. You can use this parameter to have the metric stream also send extended statistics in the stream. This array can have up to 100 members.

For each entry in this array, you specify one or more metrics and the list of extended statistics to stream for those metrics. The extended statistics that you can stream depend on the stream's OutputFormat. If the OutputFormat is json, you can stream any extended statistic that is supported by CloudWatch, listed in CloudWatch statistics definitions. If the OutputFormat is opentelemetry0.7, you can stream percentile statistics (p??).

" + } + }, + "MetricStreamStatisticsIncludeMetrics": { + "base": null, + "refs": { + "MetricStreamStatisticsConfiguration$IncludeMetrics": "

An array of metric name and namespace pairs that stream the extended statistics listed in the value of the AdditionalStatistics parameter. There can be as many as 100 pairs in the array.

All metrics that match the combination of metric name and namespace will be streamed with the extended statistics, no matter their dimensions.

" + } + }, + "MetricStreamStatisticsMetric": { + "base": "

This object contains the information for one metric that is to streamed with extended statistics.

", + "refs": { + "MetricStreamStatisticsIncludeMetrics$member": null + } + }, "MetricWidget": { "base": null, "refs": { @@ -1238,6 +1276,7 @@ "Metric$Namespace": "

The namespace of the metric.

", "MetricAlarm$Namespace": "

The namespace of the metric associated with the alarm.

", "MetricStreamFilter$Namespace": "

The name of the metric namespace in the filter.

", + "MetricStreamStatisticsMetric$Namespace": "

The metric namespace for the metric.

", "PutAnomalyDetectorInput$Namespace": "

The namespace of the metric to create the anomaly detection model for.

", "PutMetricAlarmInput$Namespace": "

The namespace for the metric associated specified in MetricName.

", "PutMetricDataInput$Namespace": "

The namespace for the metric data.

To avoid conflicts with Amazon Web Services service namespaces, you should not specify a namespace that begins with AWS/

", @@ -1620,7 +1659,7 @@ "TreatMissingData": { "base": null, "refs": { - "MetricAlarm$TreatMissingData": "

Sets how this alarm is to handle missing data points. If this parameter is omitted, the default behavior of missing is used.

", + "MetricAlarm$TreatMissingData": "

Sets how this alarm is to handle missing data points. The valid values are breaching, notBreaching, ignore, and missing. For more information, see Configuring how CloudWatch alarms treat missing data.

If this parameter is omitted, the default behavior of missing is used.

", "PutMetricAlarmInput$TreatMissingData": "

Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data.

Valid Values: breaching | notBreaching | ignore | missing

" } }, diff --git a/gems/aws-sdk-cloudwatch/CHANGELOG.md b/gems/aws-sdk-cloudwatch/CHANGELOG.md index e94f77bb2a7..f5ba896a215 100644 --- a/gems/aws-sdk-cloudwatch/CHANGELOG.md +++ b/gems/aws-sdk-cloudwatch/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.63.0 (2022-04-13) +------------------ + +* Feature - Adds support for additional statistics in CloudWatch Metric Streams. + 1.62.0 (2022-02-24) ------------------ diff --git a/gems/aws-sdk-cloudwatch/VERSION b/gems/aws-sdk-cloudwatch/VERSION index 76d05362056..af92bdd9f58 100644 --- a/gems/aws-sdk-cloudwatch/VERSION +++ b/gems/aws-sdk-cloudwatch/VERSION @@ -1 +1 @@ -1.62.0 +1.63.0 diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb index 17929d906e5..685051ecb3f 100644 --- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb +++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch.rb @@ -52,6 +52,6 @@ # @!group service module Aws::CloudWatch - GEM_VERSION = '1.62.0' + GEM_VERSION = '1.63.0' end diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/alarm.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/alarm.rb index 82819b33ca2..bd5bb7f8850 100644 --- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/alarm.rb +++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/alarm.rb @@ -180,8 +180,17 @@ def comparison_operator data[:comparison_operator] end - # Sets how this alarm is to handle missing data points. If this - # parameter is omitted, the default behavior of `missing` is used. + # Sets how this alarm is to handle missing data points. The valid values + # are `breaching`, `notBreaching`, `ignore`, and `missing`. For more + # information, see [Configuring how CloudWatch alarms treat missing + # data][1]. + # + # If this parameter is omitted, the default behavior of `missing` is + # used. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data # @return [String] def treat_missing_data data[:treat_missing_data] diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb index c8d70ddc5cf..96eda1570c4 100644 --- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb +++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client.rb @@ -1444,19 +1444,30 @@ def get_insight_rule_report(params = {}, options = {}) req.send_request(options) end - # You can use the `GetMetricData` API to retrieve as many as 500 - # different metrics in a single request, with a total of as many as - # 100,800 data points. You can also optionally perform math expressions - # on the values of the returned statistics, to create new time series - # that represent new insights into your data. For example, using Lambda - # metrics, you could divide the Errors metric by the Invocations metric - # to get an error rate time series. For more information about metric - # math expressions, see [Metric Math Syntax and Functions][1] in the - # *Amazon CloudWatch User Guide*. + # You can use the `GetMetricData` API to retrieve CloudWatch metric + # values. The operation can also include a CloudWatch Metrics Insights + # query, and one or more metric math functions. + # + # A `GetMetricData` operation that does not include a query can retrieve + # as many as 500 different metrics in a single request, with a total of + # as many as 100,800 data points. You can also optionally perform metric + # math expressions on the values of the returned statistics, to create + # new time series that represent new insights into your data. For + # example, using Lambda metrics, you could divide the Errors metric by + # the Invocations metric to get an error rate time series. For more + # information about metric math expressions, see [Metric Math Syntax and + # Functions][1] in the *Amazon CloudWatch User Guide*. + # + # If you include a Metrics Insights query, each `GetMetricData` + # operation can include only one query. But the same `GetMetricData` + # operation can also retrieve other metrics. Metrics Insights queries + # can query only the most recent three hours of metric data. For more + # information about Metrics Insights, see [Query your metrics with + # CloudWatch Metrics Insights][2]. # # Calls to the `GetMetricData` API have a different pricing structure # than calls to `GetMetricStatistics`. For more information about - # pricing, see [Amazon CloudWatch Pricing][2]. + # pricing, see [Amazon CloudWatch Pricing][3]. # # Amazon CloudWatch retains metric data as follows: # @@ -1490,16 +1501,29 @@ def get_insight_rule_report(params = {}, options = {}) # collected, the results of the operation are null. CloudWatch does not # perform unit conversions. # + # **Using Metrics Insights queries with metric math** + # + # You can't mix a Metric Insights query and metric math syntax in the + # same expression, but you can reference results from a Metrics Insights + # query within other Metric math expressions. A Metrics Insights query + # without a **GROUP BY** clause returns a single time-series (TS), and + # can be used as input for a metric math expression that expects a + # single time series. A Metrics Insights query with a **GROUP BY** + # clause returns an array of time-series (TS\[\]), and can be used as + # input for a metric math expression that expects an array of time + # series. + # # # # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax - # [2]: https://aws.amazon.com/cloudwatch/pricing/ + # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/query_with_cloudwatch-metrics-insights.html + # [3]: https://aws.amazon.com/cloudwatch/pricing/ # # @option params [required, Array] :metric_data_queries # The metric queries to be returned. A single `GetMetricData` call can # include as many as 500 `MetricDataQuery` structures. Each of these - # structures can specify either a metric to retrieve, or a math - # expression to perform on retrieved data. + # structures can specify either a metric to retrieve, a Metrics Insights + # query, or a math expression to perform on retrieved data. # # @option params [required, Time,DateTime,Date,Integer,String] :start_time # The time stamp indicating the earliest data to be returned. @@ -1872,6 +1896,7 @@ def get_metric_statistics(params = {}, options = {}) # * {Types::GetMetricStreamOutput#creation_date #creation_date} => Time # * {Types::GetMetricStreamOutput#last_update_date #last_update_date} => Time # * {Types::GetMetricStreamOutput#output_format #output_format} => String + # * {Types::GetMetricStreamOutput#statistics_configurations #statistics_configurations} => Array<Types::MetricStreamStatisticsConfiguration> # # @example Request syntax with placeholder values # @@ -1893,6 +1918,12 @@ def get_metric_statistics(params = {}, options = {}) # resp.creation_date #=> Time # resp.last_update_date #=> Time # resp.output_format #=> String, one of "json", "opentelemetry0.7" + # resp.statistics_configurations #=> Array + # resp.statistics_configurations[0].include_metrics #=> Array + # resp.statistics_configurations[0].include_metrics[0].namespace #=> String + # resp.statistics_configurations[0].include_metrics[0].metric_name #=> String + # resp.statistics_configurations[0].additional_statistics #=> Array + # resp.statistics_configurations[0].additional_statistics[0] #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricStream AWS API Documentation # @@ -2370,7 +2401,10 @@ def put_anomaly_detector(params = {}, options = {}) # rule are met. # # The alarms specified in a composite alarm's rule expression can - # include metric alarms and other composite alarms. + # include metric alarms and other composite alarms. The rule expression + # of a composite alarm can include as many as 100 underlying alarms. Any + # single alarm can be included in the rule expressions of as many as 150 + # composite alarms. # # Using composite alarms can reduce alarm noise. You can create multiple # metric alarms, and also create a composite alarm and set up alerts @@ -3207,6 +3241,13 @@ def put_metric_data(params = {}, options = {}) # * Stream metrics from only the metric namespaces that you list in # `IncludeFilters`. # + # By default, a metric stream always sends the `MAX`, `MIN`, `SUM`, and + # `SAMPLECOUNT` statistics for each metric that is streamed. You can use + # the `StatisticsConfigurations` parameter to have the metric stream + # also send extended statistics in the stream. Streaming extended + # statistics incurs additional costs. For more information, see [Amazon + # CloudWatch Pricing][2]. + # # When you use `PutMetricStream` to create a new metric stream, the # stream is created in the `running` state. If you use it to update an # existing stream, the state of the stream is not changed. @@ -3214,6 +3255,7 @@ def put_metric_data(params = {}, options = {}) # # # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html + # [2]: https://aws.amazon.com/cloudwatch/pricing/ # # @option params [required, String] :name # If you are creating a new metric stream, this is the name for the new @@ -3282,6 +3324,24 @@ def put_metric_data(params = {}, options = {}) # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html # + # @option params [Array] :statistics_configurations + # By default, a metric stream always sends the `MAX`, `MIN`, `SUM`, and + # `SAMPLECOUNT` statistics for each metric that is streamed. You can use + # this parameter to have the metric stream also send extended statistics + # in the stream. This array can have up to 100 members. + # + # For each entry in this array, you specify one or more metrics and the + # list of extended statistics to stream for those metrics. The extended + # statistics that you can stream depend on the stream's `OutputFormat`. + # If the `OutputFormat` is `json`, you can stream any extended statistic + # that is supported by CloudWatch, listed in [ CloudWatch statistics + # definitions][1]. If the `OutputFormat` is `opentelemetry0.7`, you can + # stream percentile statistics (p*??*). + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html + # # @return [Types::PutMetricStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutMetricStreamOutput#arn #arn} => String @@ -3309,6 +3369,17 @@ def put_metric_data(params = {}, options = {}) # value: "TagValue", # required # }, # ], + # statistics_configurations: [ + # { + # include_metrics: [ # required + # { + # namespace: "Namespace", # required + # metric_name: "MetricName", # required + # }, + # ], + # additional_statistics: ["MetricStreamStatistic"], # required + # }, + # ], # }) # # @example Response structure @@ -3554,7 +3625,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-cloudwatch' - context[:gem_version] = '1.62.0' + context[:gem_version] = '1.63.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb index 8a934f6d15b..addbde7ad53 100644 --- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb +++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/client_api.rb @@ -193,6 +193,12 @@ module ClientApi MetricStreamNames = Shapes::ListShape.new(name: 'MetricStreamNames') MetricStreamOutputFormat = Shapes::StringShape.new(name: 'MetricStreamOutputFormat') MetricStreamState = Shapes::StringShape.new(name: 'MetricStreamState') + MetricStreamStatistic = Shapes::StringShape.new(name: 'MetricStreamStatistic') + MetricStreamStatisticsAdditionalStatistics = Shapes::ListShape.new(name: 'MetricStreamStatisticsAdditionalStatistics') + MetricStreamStatisticsConfiguration = Shapes::StructureShape.new(name: 'MetricStreamStatisticsConfiguration') + MetricStreamStatisticsConfigurations = Shapes::ListShape.new(name: 'MetricStreamStatisticsConfigurations') + MetricStreamStatisticsIncludeMetrics = Shapes::ListShape.new(name: 'MetricStreamStatisticsIncludeMetrics') + MetricStreamStatisticsMetric = Shapes::StructureShape.new(name: 'MetricStreamStatisticsMetric') MetricWidget = Shapes::StringShape.new(name: 'MetricWidget') MetricWidgetImage = Shapes::BlobShape.new(name: 'MetricWidgetImage') Metrics = Shapes::ListShape.new(name: 'Metrics') @@ -542,6 +548,7 @@ module ClientApi GetMetricStreamOutput.add_member(:creation_date, Shapes::ShapeRef.new(shape: Timestamp, location_name: "CreationDate")) GetMetricStreamOutput.add_member(:last_update_date, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastUpdateDate")) GetMetricStreamOutput.add_member(:output_format, Shapes::ShapeRef.new(shape: MetricStreamOutputFormat, location_name: "OutputFormat")) + GetMetricStreamOutput.add_member(:statistics_configurations, Shapes::ShapeRef.new(shape: MetricStreamStatisticsConfigurations, location_name: "StatisticsConfigurations")) GetMetricStreamOutput.struct_class = Types::GetMetricStreamOutput GetMetricWidgetImageInput.add_member(:metric_widget, Shapes::ShapeRef.new(shape: MetricWidget, required: true, location_name: "MetricWidget")) @@ -751,6 +758,20 @@ module ClientApi MetricStreamNames.member = Shapes::ShapeRef.new(shape: MetricStreamName) + MetricStreamStatisticsAdditionalStatistics.member = Shapes::ShapeRef.new(shape: MetricStreamStatistic) + + MetricStreamStatisticsConfiguration.add_member(:include_metrics, Shapes::ShapeRef.new(shape: MetricStreamStatisticsIncludeMetrics, required: true, location_name: "IncludeMetrics")) + MetricStreamStatisticsConfiguration.add_member(:additional_statistics, Shapes::ShapeRef.new(shape: MetricStreamStatisticsAdditionalStatistics, required: true, location_name: "AdditionalStatistics")) + MetricStreamStatisticsConfiguration.struct_class = Types::MetricStreamStatisticsConfiguration + + MetricStreamStatisticsConfigurations.member = Shapes::ShapeRef.new(shape: MetricStreamStatisticsConfiguration) + + MetricStreamStatisticsIncludeMetrics.member = Shapes::ShapeRef.new(shape: MetricStreamStatisticsMetric) + + MetricStreamStatisticsMetric.add_member(:namespace, Shapes::ShapeRef.new(shape: Namespace, required: true, location_name: "Namespace")) + MetricStreamStatisticsMetric.add_member(:metric_name, Shapes::ShapeRef.new(shape: MetricName, required: true, location_name: "MetricName")) + MetricStreamStatisticsMetric.struct_class = Types::MetricStreamStatisticsMetric + Metrics.member = Shapes::ShapeRef.new(shape: Metric) MissingRequiredParameterException.add_member(:message, Shapes::ShapeRef.new(shape: AwsQueryErrorMessage, location_name: "message")) @@ -833,6 +854,7 @@ module ClientApi PutMetricStreamInput.add_member(:role_arn, Shapes::ShapeRef.new(shape: AmazonResourceName, required: true, location_name: "RoleArn")) PutMetricStreamInput.add_member(:output_format, Shapes::ShapeRef.new(shape: MetricStreamOutputFormat, required: true, location_name: "OutputFormat")) PutMetricStreamInput.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "Tags")) + PutMetricStreamInput.add_member(:statistics_configurations, Shapes::ShapeRef.new(shape: MetricStreamStatisticsConfigurations, location_name: "StatisticsConfigurations")) PutMetricStreamInput.struct_class = Types::PutMetricStreamInput PutMetricStreamOutput.add_member(:arn, Shapes::ShapeRef.new(shape: AmazonResourceName, location_name: "Arn")) diff --git a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb index f37044f9bc6..15dbe8c2d3f 100644 --- a/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb +++ b/gems/aws-sdk-cloudwatch/lib/aws-sdk-cloudwatch/types.rb @@ -1031,10 +1031,13 @@ class DescribeInsightRulesOutput < Struct.new( end # A dimension is a name/value pair that is part of the identity of a - # metric. You can assign up to 10 dimensions to a metric. Because - # dimensions are part of the unique identifier for a metric, whenever - # you add a unique name/value pair to one of your metrics, you are - # creating a new variation of that metric. + # metric. Because dimensions are part of the unique identifier for a + # metric, whenever you add a unique name/value pair to one of your + # metrics, you are creating a new variation of that metric. For example, + # many Amazon EC2 metrics publish `InstanceId` as a dimension name, and + # the actual instance ID as the value for that dimension. + # + # You can assign up to 10 dimensions to a metric. # # @note When making an API call, you may pass Dimension # data as a hash: @@ -1046,7 +1049,8 @@ class DescribeInsightRulesOutput < Struct.new( # # @!attribute [rw] name # The name of the dimension. Dimension names must contain only ASCII - # characters and must include at least one non-whitespace character. + # characters, must include at least one non-whitespace character, and + # cannot start with a colon (`:`). # @return [String] # # @!attribute [rw] value @@ -1429,8 +1433,8 @@ class GetInsightRuleReportOutput < Struct.new( # @!attribute [rw] metric_data_queries # The metric queries to be returned. A single `GetMetricData` call can # include as many as 500 `MetricDataQuery` structures. Each of these - # structures can specify either a metric to retrieve, or a math - # expression to perform on retrieved data. + # structures can specify either a metric to retrieve, a Metrics + # Insights query, or a math expression to perform on retrieved data. # @return [Array] # # @!attribute [rw] start_time @@ -1784,8 +1788,26 @@ class GetMetricStreamInput < Struct.new( # @return [Time] # # @!attribute [rw] output_format + # The output format for the stream. Valid values are `json` and + # `opentelemetry0.7`. For more information about metric stream output + # formats, see [ Metric streams output formats][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html # @return [String] # + # @!attribute [rw] statistics_configurations + # Each entry in this array displays information about one or more + # metrics that include extended statistics in the metric stream. For + # more information about extended statistics, see [ CloudWatch + # statistics definitions][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html + # @return [Array] + # # @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricStreamOutput AWS API Documentation # class GetMetricStreamOutput < Struct.new( @@ -1798,7 +1820,8 @@ class GetMetricStreamOutput < Struct.new( :state, :creation_date, :last_update_date, - :output_format) + :output_format, + :statistics_configurations) SENSITIVE = [] include Aws::Structure end @@ -2592,8 +2615,17 @@ class Metric < Struct.new( # @return [String] # # @!attribute [rw] treat_missing_data - # Sets how this alarm is to handle missing data points. If this - # parameter is omitted, the default behavior of `missing` is used. + # Sets how this alarm is to handle missing data points. The valid + # values are `breaching`, `notBreaching`, `ignore`, and `missing`. For + # more information, see [Configuring how CloudWatch alarms treat + # missing data][1]. + # + # If this parameter is omitted, the default behavior of `missing` is + # used. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data # @return [String] # # @!attribute [rw] evaluate_low_sample_count_percentile @@ -2658,9 +2690,9 @@ class MetricAlarm < Struct.new( # # When used in `GetMetricData`, it indicates the metric data to return, # and whether this call is just retrieving a batch set of data for one - # metric, or is performing a math expression on metric data. A single - # `GetMetricData` call can include up to 500 `MetricDataQuery` - # structures. + # metric, or is performing a Metrics Insights query or a math + # expression. A single `GetMetricData` call can include up to 500 + # `MetricDataQuery` structures. # # When used in `PutMetricAlarm`, it enables you to create an alarm based # on a metric math expression. Each `MetricDataQuery` in the array @@ -2733,20 +2765,25 @@ class MetricAlarm < Struct.new( # @return [Types::MetricStat] # # @!attribute [rw] expression - # The math expression to be performed on the returned data, if this - # object is performing a math expression. This expression can use the - # `Id` of the other metrics to refer to those metrics, and can also - # use the `Id` of other expressions to use the result of those - # expressions. For more information about metric math expressions, see - # [Metric Math Syntax and Functions][1] in the *Amazon CloudWatch User + # This field can contain either a Metrics Insights query, or a metric + # math expression to be performed on the returned data. For more + # information about Metrics Insights queries, see [Metrics Insights + # query components and syntax][1] in the *Amazon CloudWatch User # Guide*. # + # A math expression can use the `Id` of the other metrics or queries + # to refer to those metrics, and can also use the `Id` of other + # expressions to use the result of those expressions. For more + # information about metric math expressions, see [Metric Math Syntax + # and Functions][2] in the *Amazon CloudWatch User Guide*. + # # Within each MetricDataQuery object, you must specify either # `Expression` or `MetricStat` but not both. # # # - # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage + # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax # @return [String] # # @!attribute [rw] label @@ -3188,6 +3225,96 @@ class MetricStreamFilter < Struct.new( include Aws::Structure end + # By default, a metric stream always sends the `MAX`, `MIN`, `SUM`, and + # `SAMPLECOUNT` statistics for each metric that is streamed. This + # structure contains information for one metric that includes extended + # statistics in the stream. For more information about extended + # statistics, see CloudWatch, listed in [ CloudWatch statistics + # definitions][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html + # + # @note When making an API call, you may pass MetricStreamStatisticsConfiguration + # data as a hash: + # + # { + # include_metrics: [ # required + # { + # namespace: "Namespace", # required + # metric_name: "MetricName", # required + # }, + # ], + # additional_statistics: ["MetricStreamStatistic"], # required + # } + # + # @!attribute [rw] include_metrics + # An array of metric name and namespace pairs that stream the extended + # statistics listed in the value of the `AdditionalStatistics` + # parameter. There can be as many as 100 pairs in the array. + # + # All metrics that match the combination of metric name and namespace + # will be streamed with the extended statistics, no matter their + # dimensions. + # @return [Array] + # + # @!attribute [rw] additional_statistics + # The list of extended statistics that are to be streamed for the + # metrics listed in the `IncludeMetrics` array in this structure. This + # list can include as many as 20 statistics. + # + # If the `OutputFormat` for the stream is `opentelemetry0.7`, the only + # valid values are `p?? ` percentile statistics such as `p90`, `p99` + # and so on. + # + # If the `OutputFormat` for the stream is `json`, the valid values are + # include the abbreviations for all of the extended statistics listed + # in [ CloudWatch statistics definitions][1]. For example, this + # includes `tm98, ` `wm90`, `PR(:300)`, and so on. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/MetricStreamStatisticsConfiguration AWS API Documentation + # + class MetricStreamStatisticsConfiguration < Struct.new( + :include_metrics, + :additional_statistics) + SENSITIVE = [] + include Aws::Structure + end + + # This object contains the information for one metric that is to + # streamed with extended statistics. + # + # @note When making an API call, you may pass MetricStreamStatisticsMetric + # data as a hash: + # + # { + # namespace: "Namespace", # required + # metric_name: "MetricName", # required + # } + # + # @!attribute [rw] namespace + # The metric namespace for the metric. + # @return [String] + # + # @!attribute [rw] metric_name + # The name of the metric. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/MetricStreamStatisticsMetric AWS API Documentation + # + class MetricStreamStatisticsMetric < Struct.new( + :namespace, + :metric_name) + SENSITIVE = [] + include Aws::Structure + end + # An input parameter that is required is missing. # # @!attribute [rw] message @@ -4109,6 +4236,17 @@ class PutMetricDataInput < Struct.new( # value: "TagValue", # required # }, # ], + # statistics_configurations: [ + # { + # include_metrics: [ # required + # { + # namespace: "Namespace", # required + # metric_name: "MetricName", # required + # }, + # ], + # additional_statistics: ["MetricStreamStatistic"], # required + # }, + # ], # } # # @!attribute [rw] name @@ -4187,6 +4325,25 @@ class PutMetricDataInput < Struct.new( # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html # @return [Array] # + # @!attribute [rw] statistics_configurations + # By default, a metric stream always sends the `MAX`, `MIN`, `SUM`, + # and `SAMPLECOUNT` statistics for each metric that is streamed. You + # can use this parameter to have the metric stream also send extended + # statistics in the stream. This array can have up to 100 members. + # + # For each entry in this array, you specify one or more metrics and + # the list of extended statistics to stream for those metrics. The + # extended statistics that you can stream depend on the stream's + # `OutputFormat`. If the `OutputFormat` is `json`, you can stream any + # extended statistic that is supported by CloudWatch, listed in [ + # CloudWatch statistics definitions][1]. If the `OutputFormat` is + # `opentelemetry0.7`, you can stream percentile statistics (p*??*). + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html + # @return [Array] + # # @see http://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutMetricStreamInput AWS API Documentation # class PutMetricStreamInput < Struct.new( @@ -4196,7 +4353,8 @@ class PutMetricStreamInput < Struct.new( :firehose_arn, :role_arn, :output_format, - :tags) + :tags, + :statistics_configurations) SENSITIVE = [] include Aws::Structure end diff --git a/gems/aws-sdk-fsx/CHANGELOG.md b/gems/aws-sdk-fsx/CHANGELOG.md index cfd3022f4b6..345095d4754 100644 --- a/gems/aws-sdk-fsx/CHANGELOG.md +++ b/gems/aws-sdk-fsx/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.55.0 (2022-04-13) +------------------ + +* Feature - This release adds support for deploying FSx for ONTAP file systems in a single Availability Zone. + 1.54.0 (2022-04-05) ------------------ diff --git a/gems/aws-sdk-fsx/VERSION b/gems/aws-sdk-fsx/VERSION index b7921ae87bc..094d6ad00ce 100644 --- a/gems/aws-sdk-fsx/VERSION +++ b/gems/aws-sdk-fsx/VERSION @@ -1 +1 @@ -1.54.0 +1.55.0 diff --git a/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb b/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb index 22887838d9d..bee11701b01 100644 --- a/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb +++ b/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb @@ -48,6 +48,6 @@ # @!group service module Aws::FSx - GEM_VERSION = '1.54.0' + GEM_VERSION = '1.55.0' end diff --git a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb index 28d147c4bb7..a35e3a2a3c0 100644 --- a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb +++ b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb @@ -773,7 +773,7 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.file_system.administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.backup.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.backup.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -896,7 +896,7 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -1243,7 +1243,7 @@ def copy_backup(params = {}, options = {}) # resp.backup.file_system.administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.backup.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.backup.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -1366,7 +1366,7 @@ def copy_backup(params = {}, options = {}) # resp.backup.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -2057,7 +2057,7 @@ def create_data_repository_task(params = {}, options = {}) # ontap_configuration: { # automatic_backup_retention_days: 1, # daily_automatic_backup_start_time: "DailyTime", - # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1 + # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1, SINGLE_AZ_1 # endpoint_ip_address_range: "IpAddressRange", # fsx_admin_password: "AdminPassword", # disk_iops_configuration: { @@ -2232,7 +2232,7 @@ def create_data_repository_task(params = {}, options = {}) # resp.file_system.administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -2706,7 +2706,7 @@ def create_file_system(params = {}, options = {}) # resp.file_system.administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -2890,7 +2890,7 @@ def create_file_system_from_backup(params = {}, options = {}) # resp.snapshot.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -3270,7 +3270,7 @@ def create_storage_virtual_machine(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -3484,7 +3484,7 @@ def create_volume(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -4204,7 +4204,7 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].file_system.administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.backups[0].file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backups[0].file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backups[0].file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.backups[0].file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.backups[0].file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.backups[0].file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backups[0].file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -4327,7 +4327,7 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -4871,7 +4871,7 @@ def describe_file_system_aliases(params = {}, options = {}) # resp.file_systems[0].administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.file_systems[0].ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_systems[0].ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_systems[0].ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.file_systems[0].ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.file_systems[0].ontap_configuration.endpoint_ip_address_range #=> String # resp.file_systems[0].ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_systems[0].ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -5056,7 +5056,7 @@ def describe_file_systems(params = {}, options = {}) # resp.snapshots[0].administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -5357,7 +5357,7 @@ def describe_storage_virtual_machines(params = {}, options = {}) # resp.volumes[0].administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -5723,7 +5723,7 @@ def list_tags_for_resource(params = {}, options = {}) # resp.file_system.administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -6362,7 +6362,7 @@ def update_data_repository_association(params = {}, options = {}) # resp.file_system.administrative_actions[0].target_snapshot_values.administrative_actions #=> Types::AdministrativeActions # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -6508,7 +6508,7 @@ def update_file_system(params = {}, options = {}) # resp.snapshot.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -6839,7 +6839,7 @@ def update_storage_virtual_machine(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1" + # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1" # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -6920,7 +6920,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-fsx' - context[:gem_version] = '1.54.0' + context[:gem_version] = '1.55.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb index 9ae288fdf79..653809053ac 100644 --- a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb +++ b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb @@ -1722,7 +1722,7 @@ class CreateFileSystemLustreConfiguration < Struct.new( # { # automatic_backup_retention_days: 1, # daily_automatic_backup_start_time: "DailyTime", - # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1 + # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1, SINGLE_AZ_1 # endpoint_ip_address_range: "IpAddressRange", # fsx_admin_password: "AdminPassword", # disk_iops_configuration: { @@ -1749,14 +1749,28 @@ class CreateFileSystemLustreConfiguration < Struct.new( # # @!attribute [rw] deployment_type # Specifies the FSx for ONTAP file system deployment type to use in - # creating the file system. `MULTI_AZ_1` is the supported ONTAP - # deployment type. + # creating the file system. + # + # * `MULTI_AZ_1` - (Default) A high availability file system + # configured for Multi-AZ redundancy to tolerate temporary + # Availability Zone (AZ) unavailability. + # + # * `SINGLE_AZ_1` - A file system configured for Single-AZ redundancy. + # + # For information about the use cases for Multi-AZ and Single-AZ + # deployments, refer to [Choosing Multi-AZ or Single-AZ file system + # deployment][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html # @return [String] # # @!attribute [rw] endpoint_ip_address_range - # Specifies the IP address range in which the endpoints to access your - # file system will be created. By default, Amazon FSx selects an - # unused IP address range for you from the 198.19.* range. + # (Multi-AZ only) Specifies the IP address range in which the + # endpoints to access your file system will be created. By default, + # Amazon FSx selects an unused IP address range for you from the + # 198.19.* range. # # The Endpoint IP address range you select for your file system must # exist outside the VPC's CIDR range and must be at least /30 or @@ -1780,11 +1794,11 @@ class CreateFileSystemLustreConfiguration < Struct.new( # @return [String] # # @!attribute [rw] route_table_ids - # Specifies the virtual private cloud (VPC) route tables in which your - # file system's endpoints will be created. You should specify all VPC - # route tables associated with the subnets in which your clients are - # located. By default, Amazon FSx selects your VPC's default route - # table. + # (Multi-AZ only) Specifies the virtual private cloud (VPC) route + # tables in which your file system's endpoints will be created. You + # should specify all VPC route tables associated with the subnets in + # which your clients are located. By default, Amazon FSx selects your + # VPC's default route table. # @return [Array] # # @!attribute [rw] throughput_capacity @@ -2027,7 +2041,7 @@ class CreateFileSystemOpenZFSConfiguration < Struct.new( # ontap_configuration: { # automatic_backup_retention_days: 1, # daily_automatic_backup_start_time: "DailyTime", - # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1 + # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1, SINGLE_AZ_1 # endpoint_ip_address_range: "IpAddressRange", # fsx_admin_password: "AdminPassword", # disk_iops_configuration: { @@ -5936,12 +5950,27 @@ class NotServiceResourceError < Struct.new( # @return [String] # # @!attribute [rw] deployment_type - # The ONTAP file system deployment type. + # Specifies the FSx for ONTAP file system deployment type in use in + # the file system. + # + # * `MULTI_AZ_1` - (Default) A high availability file system + # configured for Multi-AZ redundancy to tolerate temporary + # Availability Zone (AZ) unavailability. + # + # * `SINGLE_AZ_1` - A file system configured for Single-AZ redundancy. + # + # For information about the use cases for Multi-AZ and Single-AZ + # deployments, refer to [Choosing Multi-AZ or Single-AZ file system + # deployment][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html # @return [String] # # @!attribute [rw] endpoint_ip_address_range - # The IP address range in which the endpoints to access your file - # system are created. + # (Multi-AZ only) The IP address range in which the endpoints to + # access your file system are created. # # The Endpoint IP address range you select for your file system must # exist outside the VPC's CIDR range and must be at least /30 or @@ -5971,8 +6000,8 @@ class NotServiceResourceError < Struct.new( # @return [String] # # @!attribute [rw] route_table_ids - # The VPC route tables in which your file system's endpoints are - # created. + # (Multi-AZ only) The VPC route tables in which your file system's + # endpoints are created. # @return [Array] # # @!attribute [rw] throughput_capacity