diff --git a/.chloggen/awsemfexporter_retein_intitial_delta_value.yaml b/.chloggen/awsemfexporter_retein_intitial_delta_value.yaml new file mode 100644 index 000000000000..62468b40cd45 --- /dev/null +++ b/.chloggen/awsemfexporter_retein_intitial_delta_value.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: awsemfexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: The AWS EMF exporter now supports the additional configuration flag `retain_initial_value_of_delta_metric`. With this flag active the first value of a metric is not discarded but instead sent to AWS. + +# One or more tracking issues related to the change +issues: [16218] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/exporter/awsemfexporter/README.md b/exporter/awsemfexporter/README.md index a2ab37af643d..02e40ef928f9 100644 --- a/exporter/awsemfexporter/README.md +++ b/exporter/awsemfexporter/README.md @@ -18,24 +18,25 @@ Convert OpenTelemetry ```Int64DataPoints```, ```DoubleDataPoints```, ```SummaryD The following exporter configuration parameters are supported. -| Name | Description | Default | -|:---------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ------- | -| `log_group_name` | Customized log group name which supports `{ClusterName}` and `{TaskId}` placeholders. One valid example is `/aws/metrics/{ClusterName}`. It will search for `ClusterName` (or `aws.ecs.cluster.name`) resource attribute in the metrics data and replace with the actual cluster name. If none of them are found in the resource attribute map, `{ClusterName}` will be replaced by `undefined`. Similar way, for the `{TaskId}`, it searches for `TaskId` (or `aws.ecs.task.id`) key in the resource attribute map. For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`) |"/metrics/default"| -| `log_stream_name` | Customized log stream name which supports `{TaskId}`, `{ClusterName}`, `{NodeName}`, `{ContainerInstanceId}`, and `{TaskDefinitionFamily}` placeholders. One valid example is `{TaskId}`. It will search for `TaskId` (or `aws.ecs.task.id`) resource attribute in the metrics data and replace with the actual task id. If none of them are found in the resource attribute map, `{TaskId}` will be replaced by `undefined`. Similarly, for the `{TaskDefinitionFamily}`, it searches for `TaskDefinitionFamily` (or `aws.ecs.task.family`). For the `{ClusterName}`, it searches for `ClusterName` (or `aws.ecs.cluster.name`). For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`). For `{ContainerInstanceId}`, it searches for `ContainerInstanceId` (or `aws.ecs.container.instance.id`). (Note: ContainerInstanceId (or `aws.ecs.container.instance.id`) only works for AWS ECS EC2 launch type. |"otel-stream"| -| `log_retention` | LogRetention is the option to set the log retention policy for only newly created CloudWatch Log Groups. Defaults to Never Expire if not specified or set to 0. Possible values for retention in days are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653. |"Never Expire"| -| `namespace` | Customized CloudWatch metrics namespace | "default" | -| `endpoint` | Optionally override the default CloudWatch service endpoint. | | -| `no_verify_ssl` | Enable or disable TLS certificate verification. | false | -| `proxy_address` | Upload Structured Logs to AWS CloudWatch through a proxy. | | -| `region` | Send Structured Logs to AWS CloudWatch in a specific region. If this field is not present in config, environment variable "AWS_REGION" can then be used to set region. | determined by metadata | -| `role_arn` | IAM role to upload segments to a different account. | | -| `max_retries` | Maximum number of retries before abandoning an attempt to post data. | 1 | -| `dimension_rollup_option` | DimensionRollupOption is the option for metrics dimension rollup. Three options are available: `NoDimensionRollup`, `SingleDimensionRollupOnly` and `ZeroAndSingleDimensionRollup` |"ZeroAndSingleDimensionRollup" (Enable both zero dimension rollup and single dimension rollup)| -| `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` | -| `output_destination` | "output_destination" is an option to specify the EMFExporter output. Currently, two options are available. "cloudwatch" or "stdout" | `cloudwatch` | -| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] | -| [`metric_declarations`](#metric_declaration) | List of rules for filtering exported metrics and their dimensions. | [ ] | -| [`metric_descriptors`](#metric_descriptor) | List of rules for inserting or updating metric descriptors. | [ ]| +| Name | Description | Default | +|:---------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------| +| `log_group_name` | Customized log group name which supports `{ClusterName}` and `{TaskId}` placeholders. One valid example is `/aws/metrics/{ClusterName}`. It will search for `ClusterName` (or `aws.ecs.cluster.name`) resource attribute in the metrics data and replace with the actual cluster name. If none of them are found in the resource attribute map, `{ClusterName}` will be replaced by `undefined`. Similar way, for the `{TaskId}`, it searches for `TaskId` (or `aws.ecs.task.id`) key in the resource attribute map. For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`) | "/metrics/default" | +| `log_stream_name` | Customized log stream name which supports `{TaskId}`, `{ClusterName}`, `{NodeName}`, `{ContainerInstanceId}`, and `{TaskDefinitionFamily}` placeholders. One valid example is `{TaskId}`. It will search for `TaskId` (or `aws.ecs.task.id`) resource attribute in the metrics data and replace with the actual task id. If none of them are found in the resource attribute map, `{TaskId}` will be replaced by `undefined`. Similarly, for the `{TaskDefinitionFamily}`, it searches for `TaskDefinitionFamily` (or `aws.ecs.task.family`). For the `{ClusterName}`, it searches for `ClusterName` (or `aws.ecs.cluster.name`). For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`). For `{ContainerInstanceId}`, it searches for `ContainerInstanceId` (or `aws.ecs.container.instance.id`). (Note: ContainerInstanceId (or `aws.ecs.container.instance.id`) only works for AWS ECS EC2 launch type. | "otel-stream" | +| `log_retention` | LogRetention is the option to set the log retention policy for only newly created CloudWatch Log Groups. Defaults to Never Expire if not specified or set to 0. Possible values for retention in days are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653. | "Never Expire" | +| `namespace` | Customized CloudWatch metrics namespace | "default" | +| `endpoint` | Optionally override the default CloudWatch service endpoint. | | +| `no_verify_ssl` | Enable or disable TLS certificate verification. | false | +| `proxy_address` | Upload Structured Logs to AWS CloudWatch through a proxy. | | +| `region` | Send Structured Logs to AWS CloudWatch in a specific region. If this field is not present in config, environment variable "AWS_REGION" can then be used to set region. | determined by metadata | +| `role_arn` | IAM role to upload segments to a different account. | | +| `max_retries` | Maximum number of retries before abandoning an attempt to post data. | 1 | +| `dimension_rollup_option` | DimensionRollupOption is the option for metrics dimension rollup. Three options are available: `NoDimensionRollup`, `SingleDimensionRollupOnly` and `ZeroAndSingleDimensionRollup` | "ZeroAndSingleDimensionRollup" (Enable both zero dimension rollup and single dimension rollup) | +| `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` | +| `output_destination` | "output_destination" is an option to specify the EMFExporter output. Currently, two options are available. "cloudwatch" or "stdout" | `cloudwatch` | +| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] | +| [`metric_declarations`](#metric_declaration) | List of rules for filtering exported metrics and their dimensions. | [ ] | +| [`metric_descriptors`](#metric_descriptor) | List of rules for inserting or updating metric descriptors. | [ ] | +| `retain_initial_value_of_delta_metric` | This option specifies how the first value of a metric is handled. AWS EMF expects metric values to only contain deltas to the previous value. In the default case the first received value is therefor not sent to AWS but only used as a baseline for follow up changes to this metric. This is fine for high throughput metrics with stable labels (e.g. `requests{code=200}`). In this case it does not matter if the first value of this metric is discarded. However when your metric describes infrequent events or events with high label cardinality, then the exporter in default configuration would still drop the first occurrence of this metric. With this configuration value set to `true` the first value of all metrics will instead be send to AWS. | false | ### metric_declaration A metric_declaration section characterizes a rule to be used to set dimensions for exported metrics, filtered by the incoming metrics' labels and metric names. diff --git a/exporter/awsemfexporter/config.go b/exporter/awsemfexporter/config.go index a6a2b2b19191..0860605e7b02 100644 --- a/exporter/awsemfexporter/config.go +++ b/exporter/awsemfexporter/config.go @@ -43,6 +43,11 @@ type Config struct { // Namespace is a container for CloudWatch metrics. // Metrics in different namespaces are isolated from each other. Namespace string `mapstructure:"namespace"` + // RetainInitialValueOfDeltaMetric is the flag to signal that the initial value of a metric is a valid datapoint. + // The default behavior is that the first value occurrence of a metric is set as the baseline for the calculation of + // the delta to the next occurrence. With this flag set to true the exporter will instead use this first value as the + // initial delta value. This is especially useful when handling low frequency metrics. + RetainInitialValueOfDeltaMetric bool `mapstructure:"retain_initial_value_of_delta_metric"` // DimensionRollupOption is the option for metrics dimension rollup. Three options are available, default option is "ZeroAndSingleDimensionRollup". // "ZeroAndSingleDimensionRollup" - Enable both zero dimension rollup and single dimension rollup // "SingleDimensionRollupOnly" - Enable single dimension rollup diff --git a/exporter/awsemfexporter/datapoint.go b/exporter/awsemfexporter/datapoint.go index 9703e65671f2..cc33ae5c22e7 100644 --- a/exporter/awsemfexporter/datapoint.go +++ b/exporter/awsemfexporter/datapoint.go @@ -63,11 +63,12 @@ type dataPoints interface { // deltaMetricMetadata contains the metadata required to perform rate/delta calculation type deltaMetricMetadata struct { - adjustToDelta bool - metricName string - namespace string - logGroup string - logStream string + adjustToDelta bool + retainInitialValueForDelta bool + metricName string + namespace string + logGroup string + logStream string } // numberDataPointSlice is a wrapper for pmetric.NumberDataPointSlice @@ -114,6 +115,13 @@ func (dps numberDataPointSlice) At(i int) (dataPoint, bool) { var deltaVal interface{} mKey := aws.NewKey(dps.deltaMetricMetadata, labels) deltaVal, retained = deltaMetricCalculator.Calculate(mKey, metricVal, metric.Timestamp().AsTime()) + + // If a delta to the previous data point could not be computed use the current metric value instead + if !retained && dps.retainInitialValueForDelta { + retained = true + deltaVal = metricVal + } + if !retained { return dataPoint{}, retained } @@ -163,6 +171,13 @@ func (dps summaryDataPointSlice) At(i int) (dataPoint, bool) { mKey := aws.NewKey(dps.deltaMetricMetadata, labels) delta, retained = summaryMetricCalculator.Calculate(mKey, summaryMetricEntry{sum, count}, metric.Timestamp().AsTime()) + + // If a delta to the previous data point could not be computed use the current metric value instead + if !retained && dps.retainInitialValueForDelta { + retained = true + delta = summaryMetricEntry{sum, count} + } + if !retained { return dataPoint{}, retained } @@ -208,6 +223,7 @@ func createLabels(attributes pcommon.Map, instrLibName string) map[string]string func getDataPoints(pmd pmetric.Metric, metadata cWMetricMetadata, logger *zap.Logger) (dps dataPoints) { adjusterMetadata := deltaMetricMetadata{ false, + metadata.retainInitialValueForDelta, pmd.Name(), metadata.namespace, metadata.logGroup, diff --git a/exporter/awsemfexporter/datapoint_test.go b/exporter/awsemfexporter/datapoint_test.go index 1b69eb95f95d..f0d94dd0671a 100644 --- a/exporter/awsemfexporter/datapoint_test.go +++ b/exporter/awsemfexporter/datapoint_test.go @@ -15,6 +15,7 @@ package awsemfexporter import ( + "fmt" "reflect" "testing" "time" @@ -275,131 +276,145 @@ func setupDataPointCache() { } func TestIntDataPointSliceAt(t *testing.T) { - setupDataPointCache() - - instrLibName := "cloudwatch-otel" - - testDeltaCases := []struct { - testName string - adjustToDelta bool - value interface{} - calculatedValue interface{} - }{ - { - "w/ 1st delta calculation", - true, - int64(-17), - float64(0), - }, - { - "w/ 2nd delta calculation", - true, - int64(1), - float64(18), - }, - { - "w/o delta calculation", - false, - int64(10), - float64(10), - }, - } - - for i, tc := range testDeltaCases { - t.Run(tc.testName, func(t *testing.T) { - testDPS := pmetric.NewNumberDataPointSlice() - testDP := testDPS.AppendEmpty() - testDP.SetIntValue(tc.value.(int64)) - testDP.Attributes().PutStr("label", "value") + for _, retainInitialValueOfDeltaMetric := range []bool{true, false} { + setupDataPointCache() + + instrLibName := "cloudwatch-otel" + + testDeltaCases := []struct { + testName string + adjustToDelta bool + value interface{} + calculatedValue interface{} + expectedRetained bool + }{ + { + fmt.Sprintf("w/ 1st delta calculation retainInitialValueOfDeltaMetric=%t", retainInitialValueOfDeltaMetric), + true, + int64(-17), + float64(-17), + retainInitialValueOfDeltaMetric, + }, + { + "w/ 2nd delta calculation", + true, + int64(1), + float64(18), + true, + }, + { + "w/o delta calculation", + false, + int64(10), + float64(10), + true, + }, + } - dps := numberDataPointSlice{ - instrLibName, - deltaMetricMetadata{ - tc.adjustToDelta, - "foo", - "namespace", - "log-group", - "log-stream", - }, - testDPS, - } + for _, tc := range testDeltaCases { + t.Run(tc.testName, func(t *testing.T) { + testDPS := pmetric.NewNumberDataPointSlice() + testDP := testDPS.AppendEmpty() + testDP.SetIntValue(tc.value.(int64)) + testDP.Attributes().PutStr("label", "value") + + dps := numberDataPointSlice{ + instrLibName, + deltaMetricMetadata{ + tc.adjustToDelta, + retainInitialValueOfDeltaMetric, + "foo", + "namespace", + "log-group", + "log-stream", + }, + testDPS, + } - expectedDP := dataPoint{ - value: tc.calculatedValue, - labels: map[string]string{ - oTellibDimensionKey: instrLibName, - "label": "value", - }, - } + expectedDP := dataPoint{ + value: tc.calculatedValue, + labels: map[string]string{ + oTellibDimensionKey: instrLibName, + "label": "value", + }, + } - assert.Equal(t, 1, dps.Len()) - dp, retained := dps.At(0) - assert.Equal(t, i > 0, retained) - if retained { - assert.Equal(t, expectedDP.labels, dp.labels) - assert.InDelta(t, expectedDP.value.(float64), dp.value.(float64), 0.02) - } - }) + assert.Equal(t, 1, dps.Len()) + dp, retained := dps.At(0) + assert.Equal(t, tc.expectedRetained, retained) + if retained { + assert.Equal(t, expectedDP.labels, dp.labels) + assert.InDelta(t, expectedDP.value.(float64), dp.value.(float64), 0.02) + } + }) + } } } func TestDoubleDataPointSliceAt(t *testing.T) { - setupDataPointCache() - - instrLibName := "cloudwatch-otel" - - testDeltaCases := []struct { - testName string - adjustToDelta bool - value interface{} - calculatedValue interface{} - }{ - { - "w/ 1st delta calculation", - true, - 0.4, - 0.4, - }, - { - "w/ 2nd delta calculation", - true, - 0.8, - 0.4, - }, - { - "w/o delta calculation", - false, - 0.5, - 0.5, - }, - } - - for i, tc := range testDeltaCases { - t.Run(tc.testName, func(t *testing.T) { - testDPS := pmetric.NewNumberDataPointSlice() - testDP := testDPS.AppendEmpty() - testDP.SetDoubleValue(tc.value.(float64)) - testDP.Attributes().PutStr("label1", "value1") + for _, retainInitialValueOfDeltaMetric := range []bool{true, false} { + setupDataPointCache() + + instrLibName := "cloudwatch-otel" + + testDeltaCases := []struct { + testName string + adjustToDelta bool + value interface{} + calculatedValue interface{} + expectedRetained bool + }{ + { + fmt.Sprintf("w/ 1st delta calculation retainInitialValueOfDeltaMetric=%t", retainInitialValueOfDeltaMetric), + true, + 0.4, + 0.4, + retainInitialValueOfDeltaMetric, + }, + { + "w/ 2nd delta calculation", + true, + 0.8, + 0.4, + true, + }, + { + "w/o delta calculation", + false, + 0.5, + 0.5, + true, + }, + } - dps := numberDataPointSlice{ - instrLibName, - deltaMetricMetadata{ - tc.adjustToDelta, - "foo", - "namespace", - "log-group", - "log-stream", - }, - testDPS, - } + for _, tc := range testDeltaCases { + t.Run(tc.testName, func(t *testing.T) { + testDPS := pmetric.NewNumberDataPointSlice() + testDP := testDPS.AppendEmpty() + testDP.SetDoubleValue(tc.value.(float64)) + testDP.Attributes().PutStr("label1", "value1") + + dps := numberDataPointSlice{ + instrLibName, + deltaMetricMetadata{ + tc.adjustToDelta, + retainInitialValueOfDeltaMetric, + "foo", + "namespace", + "log-group", + "log-stream", + }, + testDPS, + } - assert.Equal(t, 1, dps.Len()) - dp, retained := dps.At(0) - assert.Equal(t, i > 0, retained) - if retained { - assert.InDelta(t, tc.calculatedValue.(float64), dp.value.(float64), 0.002) - } - }) + assert.Equal(t, 1, dps.Len()) + dp, retained := dps.At(0) + assert.Equal(t, tc.expectedRetained, retained) + if retained { + assert.InDelta(t, tc.calculatedValue.(float64), dp.value.(float64), 0.002) + } + }) + } } } @@ -502,86 +517,93 @@ func TestHistogramDataPointSliceAtWithoutMinMax(t *testing.T) { } func TestSummaryDataPointSliceAt(t *testing.T) { - setupDataPointCache() + for _, retainInitialValueOfDeltaMetric := range []bool{true, false} { + setupDataPointCache() - instrLibName := "cloudwatch-otel" + instrLibName := "cloudwatch-otel" - testCases := []struct { - testName string - inputSumCount []interface{} - calculatedSumCount []interface{} - }{ - { - "1st summary count calculation", - []interface{}{17.3, uint64(17)}, - []interface{}{float64(0), uint64(0)}, - }, - { - "2nd summary count calculation", - []interface{}{float64(100), uint64(25)}, - []interface{}{82.7, uint64(8)}, - }, - { - "3rd summary count calculation", - []interface{}{float64(120), uint64(26)}, - []interface{}{float64(20), uint64(1)}, - }, - } + testCases := []struct { + testName string + inputSumCount []interface{} + calculatedSumCount []interface{} + expectedRetained bool + }{ + { + fmt.Sprintf("1st summary count calculation retainInitialValueOfDeltaMetric=%t", retainInitialValueOfDeltaMetric), + []interface{}{17.3, uint64(17)}, + []interface{}{17.3, uint64(17)}, + retainInitialValueOfDeltaMetric, + }, + { + "2nd summary count calculation", + []interface{}{float64(100), uint64(25)}, + []interface{}{82.7, uint64(8)}, + true, + }, + { + "3rd summary count calculation", + []interface{}{float64(120), uint64(26)}, + []interface{}{float64(20), uint64(1)}, + true, + }, + } - for i, tt := range testCases { - t.Run(tt.testName, func(t *testing.T) { - testDPS := pmetric.NewSummaryDataPointSlice() - testDP := testDPS.AppendEmpty() - testDP.SetSum(tt.inputSumCount[0].(float64)) - testDP.SetCount(tt.inputSumCount[1].(uint64)) - - testDP.QuantileValues().EnsureCapacity(2) - testQuantileValue := testDP.QuantileValues().AppendEmpty() - testQuantileValue.SetQuantile(0) - testQuantileValue.SetValue(float64(1)) - testQuantileValue = testDP.QuantileValues().AppendEmpty() - testQuantileValue.SetQuantile(100) - testQuantileValue.SetValue(float64(5)) - testDP.Attributes().PutStr("label1", "value1") - - dps := summaryDataPointSlice{ - instrLibName, - deltaMetricMetadata{ - true, - "foo", - "namespace", - "log-group", - "log-stream", - }, - testDPS, - } + for _, tt := range testCases { + t.Run(tt.testName, func(t *testing.T) { + testDPS := pmetric.NewSummaryDataPointSlice() + testDP := testDPS.AppendEmpty() + testDP.SetSum(tt.inputSumCount[0].(float64)) + testDP.SetCount(tt.inputSumCount[1].(uint64)) + + testDP.QuantileValues().EnsureCapacity(2) + testQuantileValue := testDP.QuantileValues().AppendEmpty() + testQuantileValue.SetQuantile(0) + testQuantileValue.SetValue(float64(1)) + testQuantileValue = testDP.QuantileValues().AppendEmpty() + testQuantileValue.SetQuantile(100) + testQuantileValue.SetValue(float64(5)) + testDP.Attributes().PutStr("label1", "value1") + + dps := summaryDataPointSlice{ + instrLibName, + deltaMetricMetadata{ + true, + retainInitialValueOfDeltaMetric, + "foo", + "namespace", + "log-group", + "log-stream", + }, + testDPS, + } - expectedDP := dataPoint{ - value: &cWMetricStats{ - Max: 5, - Min: 1, - Sum: tt.calculatedSumCount[0].(float64), - Count: tt.calculatedSumCount[1].(uint64), - }, - labels: map[string]string{ - oTellibDimensionKey: instrLibName, - "label1": "value1", - }, - } + expectedDP := dataPoint{ + value: &cWMetricStats{ + Max: 5, + Min: 1, + Sum: tt.calculatedSumCount[0].(float64), + Count: tt.calculatedSumCount[1].(uint64), + }, + labels: map[string]string{ + oTellibDimensionKey: instrLibName, + "label1": "value1", + }, + } - assert.Equal(t, 1, dps.Len()) - dp, retained := dps.At(0) - assert.Equal(t, i > 0, retained) - if retained { - expectedMetricStats := expectedDP.value.(*cWMetricStats) - actualMetricsStats := dp.value.(*cWMetricStats) - assert.Equal(t, expectedDP.labels, dp.labels) - assert.Equal(t, expectedMetricStats.Max, actualMetricsStats.Max) - assert.Equal(t, expectedMetricStats.Min, actualMetricsStats.Min) - assert.InDelta(t, expectedMetricStats.Count, actualMetricsStats.Count, 0.1) - assert.InDelta(t, expectedMetricStats.Sum, actualMetricsStats.Sum, 0.02) - } - }) + assert.Equal(t, 1, dps.Len()) + dp, retained := dps.At(0) + assert.Equal(t, tt.expectedRetained, retained) + if retained { + expectedMetricStats := expectedDP.value.(*cWMetricStats) + actualMetricsStats := dp.value.(*cWMetricStats) + assert.Equal(t, expectedDP.labels, dp.labels) + assert.Equal(t, expectedMetricStats.Max, actualMetricsStats.Max) + assert.Equal(t, expectedMetricStats.Min, actualMetricsStats.Min) + assert.InDelta(t, expectedMetricStats.Count, actualMetricsStats.Count, 0.1) + assert.InDelta(t, expectedMetricStats.Sum, actualMetricsStats.Sum, 0.02) + } + }) + } } } @@ -619,6 +641,7 @@ func TestGetDataPoints(t *testing.T) { } dmm := deltaMetricMetadata{ + false, false, "foo", "namespace", @@ -627,6 +650,7 @@ func TestGetDataPoints(t *testing.T) { } cumulativeDmm := deltaMetricMetadata{ true, + false, "foo", "namespace", "log-group", diff --git a/exporter/awsemfexporter/factory.go b/exporter/awsemfexporter/factory.go index 552f6e707d2a..e73eab57e592 100644 --- a/exporter/awsemfexporter/factory.go +++ b/exporter/awsemfexporter/factory.go @@ -42,13 +42,14 @@ func NewFactory() exporter.Factory { // CreateDefaultConfig creates the default configuration for exporter. func createDefaultConfig() component.Config { return &Config{ - AWSSessionSettings: awsutil.CreateDefaultSessionConfig(), - LogGroupName: "", - LogStreamName: "", - Namespace: "", - DimensionRollupOption: "ZeroAndSingleDimensionRollup", - OutputDestination: "cloudwatch", - logger: zap.NewNop(), + AWSSessionSettings: awsutil.CreateDefaultSessionConfig(), + LogGroupName: "", + LogStreamName: "", + Namespace: "", + DimensionRollupOption: "ZeroAndSingleDimensionRollup", + RetainInitialValueOfDeltaMetric: false, + OutputDestination: "cloudwatch", + logger: zap.NewNop(), } } diff --git a/exporter/awsemfexporter/metric_translator.go b/exporter/awsemfexporter/metric_translator.go index 13fd1f43b6ea..ed698ff6efac 100644 --- a/exporter/awsemfexporter/metric_translator.go +++ b/exporter/awsemfexporter/metric_translator.go @@ -69,11 +69,12 @@ type cWMetricStats struct { } type groupedMetricMetadata struct { - namespace string - timestampMs int64 - logGroup string - logStream string - metricDataType pmetric.MetricType + namespace string + timestampMs int64 + logGroup string + logStream string + metricDataType pmetric.MetricType + retainInitialValueForDelta bool } // cWMetricMetadata represents the metadata associated with a given CloudWatch metric