From 9b7332f0924cc64e65a5d839e0b9f14427d58066 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 13 Jul 2023 09:19:16 -0700 Subject: [PATCH 01/36] feat: update kafka connect for NR --- .chloggen/expand-kafka-broker-metrics.yaml | 16 + .../kafkametricsreceiver/broker_scraper.go | 82 ++- .../kafkametricsreceiver/documentation.md | 153 +++- .../internal/metadata/generated_config.go | 62 +- .../metadata/generated_config_test.go | 64 +- .../internal/metadata/generated_metrics.go | 690 +++++++++++++++++- .../metadata/generated_metrics_test.go | 221 ++++++ .../internal/metadata/testdata/config.yaml | 40 + receiver/kafkametricsreceiver/metadata.yaml | 95 ++- 9 files changed, 1349 insertions(+), 74 deletions(-) create mode 100644 .chloggen/expand-kafka-broker-metrics.yaml diff --git a/.chloggen/expand-kafka-broker-metrics.yaml b/.chloggen/expand-kafka-broker-metrics.yaml new file mode 100644 index 000000000000..5a2d4bc0ab02 --- /dev/null +++ b/.chloggen/expand-kafka-broker-metrics.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: kafkametricsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "expanding the broker metrics that are scraped." + +# One or more tracking issues related to the change +issues: [14166] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: \ No newline at end of file diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index b1fb72e72d4b..581728569c8d 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -17,6 +17,8 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver/internal/metadata" ) +type saramaMetrics map[string]map[string]interface{} // saramaMetrics is a map of metric name to tags + type brokerScraper struct { client sarama.Client @@ -26,6 +28,18 @@ type brokerScraper struct { mb *metadata.MetricsBuilder } +var nrMetricsPrefix = [...]string{ + "consumer-fetch-rate-for-broker-", + "incoming-byte-rate-for-broker-", + "outgoing-byte-rate-for-broker-", + "request-rate-for-broker-", + "response-rate-for-broker-", + "response-size-for-broker-", + "request-size-for-broker-", + "requests-in-flight-for-broker-", + "request-latency-in-ms-for-broker-", +} + func (s *brokerScraper) Name() string { return brokersScraperName } @@ -42,6 +56,53 @@ func (s *brokerScraper) shutdown(context.Context) error { return nil } +func (s *brokerScraper) scrapeMetric(now pcommon.Timestamp, allMetrics saramaMetrics, brokerID int64, prefix string) { + key := fmt.Sprint(prefix, brokerID) + + if metric, ok := allMetrics[key]; ok { + switch prefix { + case "consumer-fetch-rate-for-broker-": + if v, ok := metric["mean.rate"].(float64); ok { + s.mb.RecordKafkaBrokersConsumerFetchRateDataPoint(now, v, brokerID) + } + case "incoming-byte-rate-for-broker-": + if v, ok := metric["mean.rate"].(float64); ok { + s.mb.RecordKafkaBrokersIncomingByteRateDataPoint(now, v, brokerID) + } + case "outgoing-byte-rate-for-broker-": + if v, ok := metric["mean.rate"].(float64); ok { + s.mb.RecordKafkaBrokersOutgoingByteRateDataPoint(now, v, brokerID) + } + case "request-rate-for-broker-": + if v, ok := metric["mean.rate"].(float64); ok { + s.mb.RecordKafkaBrokersRequestRateDataPoint(now, v, brokerID) + } + case "response-rate-for-broker-": + if v, ok := metric["mean.rate"].(float64); ok { + s.mb.RecordKafkaBrokersResponseRateDataPoint(now, v, brokerID) + } + case "response-size-for-broker-": + if v, ok := metric["mean"].(float64); ok { + s.mb.RecordKafkaBrokersResponseSizeDataPoint(now, v, brokerID) + } + case "request-size-for-broker-": + if v, ok := metric["mean"].(float64); ok { + s.mb.RecordKafkaBrokersRequestSizeDataPoint(now, v, brokerID) + } + case "requests-in-flight-for-broker-": + if v, ok := metric["count"].(int64); ok { + s.mb.RecordKafkaBrokersRequestsInFlightDataPoint(now, v, brokerID) + } + case "request-latency-in-ms-for-broker-": + if v, ok := metric["mean"].(float64); ok { + s.mb.RecordKafkaBrokersRequestLatencyDataPoint(now, v, brokerID) + } + default: + fmt.Printf("undefined for prefix %s\n", prefix) + } + } +} + func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { if s.client == nil { client, err := newSaramaClient(s.config.Brokers, s.saramaConfig) @@ -53,7 +114,26 @@ func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { brokers := s.client.Brokers() - s.mb.RecordKafkaBrokersDataPoint(pcommon.NewTimestampFromTime(time.Now()), int64(len(brokers))) + allMetrics := make(map[string]map[string]interface{}) + + if s.saramaConfig != nil { + allMetrics = s.saramaConfig.MetricRegistry.GetAll() + } + + now := pcommon.NewTimestampFromTime(time.Now()) + for _, broker := range brokers { + brokerID := int64(broker.ID()) + for _, prefix := range nrMetricsPrefix { + s.scrapeMetric(now, allMetrics, brokerID, prefix) + } + } + + brokerCount := int64(len(brokers)) + // kafka.brokers is deprecated. This should be removed in a future release. + s.mb.RecordKafkaBrokersDataPoint(now, brokerCount) + + // kafka.brokers.count should replace kafka.brokers. + s.mb.RecordKafkaBrokersCountDataPoint(now, brokerCount) return s.mb.Emit(), nil } diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 691195089183..7fae06054bdb 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -14,11 +14,11 @@ metrics: ### kafka.brokers -Number of brokers in the cluster. +[DEPRECATED] Number of brokers in the cluster. -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {brokers} | Sum | Int | Cumulative | false | +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {brokers} | Gauge | Int | ### kafka.consumer_group.lag @@ -169,3 +169,148 @@ Number of partitions in topic. | Name | Description | Values | | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | + + +## Optional Metrics + +The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: + +```yaml +metrics: + : + enabled: true +``` + +### kafka.brokers.consumer_fetch_rate + +Average consumer fetch Rate + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {fetches}/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.count + +Number of brokers in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {brokers} | Sum | Int | Cumulative | false | + +### kafka.brokers.incoming_byte_rate + +Average tncoming Byte Rate in bytes/second + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.outgoing_byte_rate + +Average outgoing Byte Rate in bytes/second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.request_latency + +Average request latency in ms + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.request_rate + +Average request rate per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {requests}/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.request_size + +Average request size in bytes + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.requests_in_flight + +Requests in flight + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {requests} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.response_rate + +Average response rate per second + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {response}/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### kafka.brokers.response_size + +Average response size in bytes + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | \ No newline at end of file diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index 1775e4c01445..5abf60966aa0 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -25,17 +25,27 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for kafkametrics metrics. type MetricsConfig struct { - KafkaBrokers MetricConfig `mapstructure:"kafka.brokers"` - KafkaConsumerGroupLag MetricConfig `mapstructure:"kafka.consumer_group.lag"` - KafkaConsumerGroupLagSum MetricConfig `mapstructure:"kafka.consumer_group.lag_sum"` - KafkaConsumerGroupMembers MetricConfig `mapstructure:"kafka.consumer_group.members"` - KafkaConsumerGroupOffset MetricConfig `mapstructure:"kafka.consumer_group.offset"` - KafkaConsumerGroupOffsetSum MetricConfig `mapstructure:"kafka.consumer_group.offset_sum"` - KafkaPartitionCurrentOffset MetricConfig `mapstructure:"kafka.partition.current_offset"` - KafkaPartitionOldestOffset MetricConfig `mapstructure:"kafka.partition.oldest_offset"` - KafkaPartitionReplicas MetricConfig `mapstructure:"kafka.partition.replicas"` - KafkaPartitionReplicasInSync MetricConfig `mapstructure:"kafka.partition.replicas_in_sync"` - KafkaTopicPartitions MetricConfig `mapstructure:"kafka.topic.partitions"` + KafkaBrokers MetricConfig `mapstructure:"kafka.brokers"` + KafkaBrokersConsumerFetchRate MetricConfig `mapstructure:"kafka.brokers.consumer_fetch_rate"` + KafkaBrokersCount MetricConfig `mapstructure:"kafka.brokers.count"` + KafkaBrokersIncomingByteRate MetricConfig `mapstructure:"kafka.brokers.incoming_byte_rate"` + KafkaBrokersOutgoingByteRate MetricConfig `mapstructure:"kafka.brokers.outgoing_byte_rate"` + KafkaBrokersRequestLatency MetricConfig `mapstructure:"kafka.brokers.request_latency"` + KafkaBrokersRequestRate MetricConfig `mapstructure:"kafka.brokers.request_rate"` + KafkaBrokersRequestSize MetricConfig `mapstructure:"kafka.brokers.request_size"` + KafkaBrokersRequestsInFlight MetricConfig `mapstructure:"kafka.brokers.requests_in_flight"` + KafkaBrokersResponseRate MetricConfig `mapstructure:"kafka.brokers.response_rate"` + KafkaBrokersResponseSize MetricConfig `mapstructure:"kafka.brokers.response_size"` + KafkaConsumerGroupLag MetricConfig `mapstructure:"kafka.consumer_group.lag"` + KafkaConsumerGroupLagSum MetricConfig `mapstructure:"kafka.consumer_group.lag_sum"` + KafkaConsumerGroupMembers MetricConfig `mapstructure:"kafka.consumer_group.members"` + KafkaConsumerGroupOffset MetricConfig `mapstructure:"kafka.consumer_group.offset"` + KafkaConsumerGroupOffsetSum MetricConfig `mapstructure:"kafka.consumer_group.offset_sum"` + KafkaPartitionCurrentOffset MetricConfig `mapstructure:"kafka.partition.current_offset"` + KafkaPartitionOldestOffset MetricConfig `mapstructure:"kafka.partition.oldest_offset"` + KafkaPartitionReplicas MetricConfig `mapstructure:"kafka.partition.replicas"` + KafkaPartitionReplicasInSync MetricConfig `mapstructure:"kafka.partition.replicas_in_sync"` + KafkaTopicPartitions MetricConfig `mapstructure:"kafka.topic.partitions"` } func DefaultMetricsConfig() MetricsConfig { @@ -43,6 +53,36 @@ func DefaultMetricsConfig() MetricsConfig { KafkaBrokers: MetricConfig{ Enabled: true, }, + KafkaBrokersConsumerFetchRate: MetricConfig{ + Enabled: false, + }, + KafkaBrokersCount: MetricConfig{ + Enabled: false, + }, + KafkaBrokersIncomingByteRate: MetricConfig{ + Enabled: false, + }, + KafkaBrokersOutgoingByteRate: MetricConfig{ + Enabled: false, + }, + KafkaBrokersRequestLatency: MetricConfig{ + Enabled: false, + }, + KafkaBrokersRequestRate: MetricConfig{ + Enabled: false, + }, + KafkaBrokersRequestSize: MetricConfig{ + Enabled: false, + }, + KafkaBrokersRequestsInFlight: MetricConfig{ + Enabled: false, + }, + KafkaBrokersResponseRate: MetricConfig{ + Enabled: false, + }, + KafkaBrokersResponseSize: MetricConfig{ + Enabled: false, + }, KafkaConsumerGroupLag: MetricConfig{ Enabled: true, }, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go index 312133d990b3..957b4c475293 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go @@ -26,17 +26,27 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - KafkaBrokers: MetricConfig{Enabled: true}, - KafkaConsumerGroupLag: MetricConfig{Enabled: true}, - KafkaConsumerGroupLagSum: MetricConfig{Enabled: true}, - KafkaConsumerGroupMembers: MetricConfig{Enabled: true}, - KafkaConsumerGroupOffset: MetricConfig{Enabled: true}, - KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: true}, - KafkaPartitionCurrentOffset: MetricConfig{Enabled: true}, - KafkaPartitionOldestOffset: MetricConfig{Enabled: true}, - KafkaPartitionReplicas: MetricConfig{Enabled: true}, - KafkaPartitionReplicasInSync: MetricConfig{Enabled: true}, - KafkaTopicPartitions: MetricConfig{Enabled: true}, + KafkaBrokers: MetricConfig{Enabled: true}, + KafkaBrokersConsumerFetchRate: MetricConfig{Enabled: true}, + KafkaBrokersCount: MetricConfig{Enabled: true}, + KafkaBrokersIncomingByteRate: MetricConfig{Enabled: true}, + KafkaBrokersOutgoingByteRate: MetricConfig{Enabled: true}, + KafkaBrokersRequestLatency: MetricConfig{Enabled: true}, + KafkaBrokersRequestRate: MetricConfig{Enabled: true}, + KafkaBrokersRequestSize: MetricConfig{Enabled: true}, + KafkaBrokersRequestsInFlight: MetricConfig{Enabled: true}, + KafkaBrokersResponseRate: MetricConfig{Enabled: true}, + KafkaBrokersResponseSize: MetricConfig{Enabled: true}, + KafkaConsumerGroupLag: MetricConfig{Enabled: true}, + KafkaConsumerGroupLagSum: MetricConfig{Enabled: true}, + KafkaConsumerGroupMembers: MetricConfig{Enabled: true}, + KafkaConsumerGroupOffset: MetricConfig{Enabled: true}, + KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: true}, + KafkaPartitionCurrentOffset: MetricConfig{Enabled: true}, + KafkaPartitionOldestOffset: MetricConfig{Enabled: true}, + KafkaPartitionReplicas: MetricConfig{Enabled: true}, + KafkaPartitionReplicasInSync: MetricConfig{Enabled: true}, + KafkaTopicPartitions: MetricConfig{Enabled: true}, }, }, }, @@ -44,17 +54,27 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - KafkaBrokers: MetricConfig{Enabled: false}, - KafkaConsumerGroupLag: MetricConfig{Enabled: false}, - KafkaConsumerGroupLagSum: MetricConfig{Enabled: false}, - KafkaConsumerGroupMembers: MetricConfig{Enabled: false}, - KafkaConsumerGroupOffset: MetricConfig{Enabled: false}, - KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: false}, - KafkaPartitionCurrentOffset: MetricConfig{Enabled: false}, - KafkaPartitionOldestOffset: MetricConfig{Enabled: false}, - KafkaPartitionReplicas: MetricConfig{Enabled: false}, - KafkaPartitionReplicasInSync: MetricConfig{Enabled: false}, - KafkaTopicPartitions: MetricConfig{Enabled: false}, + KafkaBrokers: MetricConfig{Enabled: false}, + KafkaBrokersConsumerFetchRate: MetricConfig{Enabled: false}, + KafkaBrokersCount: MetricConfig{Enabled: false}, + KafkaBrokersIncomingByteRate: MetricConfig{Enabled: false}, + KafkaBrokersOutgoingByteRate: MetricConfig{Enabled: false}, + KafkaBrokersRequestLatency: MetricConfig{Enabled: false}, + KafkaBrokersRequestRate: MetricConfig{Enabled: false}, + KafkaBrokersRequestSize: MetricConfig{Enabled: false}, + KafkaBrokersRequestsInFlight: MetricConfig{Enabled: false}, + KafkaBrokersResponseRate: MetricConfig{Enabled: false}, + KafkaBrokersResponseSize: MetricConfig{Enabled: false}, + KafkaConsumerGroupLag: MetricConfig{Enabled: false}, + KafkaConsumerGroupLagSum: MetricConfig{Enabled: false}, + KafkaConsumerGroupMembers: MetricConfig{Enabled: false}, + KafkaConsumerGroupOffset: MetricConfig{Enabled: false}, + KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: false}, + KafkaPartitionCurrentOffset: MetricConfig{Enabled: false}, + KafkaPartitionOldestOffset: MetricConfig{Enabled: false}, + KafkaPartitionReplicas: MetricConfig{Enabled: false}, + KafkaPartitionReplicasInSync: MetricConfig{Enabled: false}, + KafkaTopicPartitions: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 6f1e45c52efd..4e2791bfea13 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -20,6 +20,106 @@ type metricKafkaBrokers struct { // init fills kafka.brokers metric with initial data. func (m *metricKafkaBrokers) init() { m.data.SetName("kafka.brokers") + m.data.SetDescription("[DEPRECATED] Number of brokers in the cluster.") + m.data.SetUnit("{brokers}") + m.data.SetEmptyGauge() +} + +func (m *metricKafkaBrokers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokers) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokers) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokers(cfg MetricConfig) metricKafkaBrokers { + m := metricKafkaBrokers{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersConsumerFetchRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.consumer_fetch_rate metric with initial data. +func (m *metricKafkaBrokersConsumerFetchRate) init() { + m.data.SetName("kafka.brokers.consumer_fetch_rate") + m.data.SetDescription("Average consumer fetch Rate") + m.data.SetUnit("{fetches}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersConsumerFetchRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersConsumerFetchRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersConsumerFetchRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersConsumerFetchRate(cfg MetricConfig) metricKafkaBrokersConsumerFetchRate { + m := metricKafkaBrokersConsumerFetchRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.count metric with initial data. +func (m *metricKafkaBrokersCount) init() { + m.data.SetName("kafka.brokers.count") m.data.SetDescription("Number of brokers in the cluster.") m.data.SetUnit("{brokers}") m.data.SetEmptySum() @@ -27,7 +127,7 @@ func (m *metricKafkaBrokers) init() { m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } -func (m *metricKafkaBrokers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { +func (m *metricKafkaBrokersCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } @@ -38,14 +138,14 @@ func (m *metricKafkaBrokers) recordDataPoint(start pcommon.Timestamp, ts pcommon } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokers) updateCapacity() { +func (m *metricKafkaBrokersCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokers) emit(metrics pmetric.MetricSlice) { +func (m *metricKafkaBrokersCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -53,8 +153,416 @@ func (m *metricKafkaBrokers) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaBrokers(cfg MetricConfig) metricKafkaBrokers { - m := metricKafkaBrokers{config: cfg} +func newMetricKafkaBrokersCount(cfg MetricConfig) metricKafkaBrokersCount { + m := metricKafkaBrokersCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersIncomingByteRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.incoming_byte_rate metric with initial data. +func (m *metricKafkaBrokersIncomingByteRate) init() { + m.data.SetName("kafka.brokers.incoming_byte_rate") + m.data.SetDescription("Average tncoming Byte Rate in bytes/second") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersIncomingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersIncomingByteRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersIncomingByteRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersIncomingByteRate(cfg MetricConfig) metricKafkaBrokersIncomingByteRate { + m := metricKafkaBrokersIncomingByteRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersOutgoingByteRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.outgoing_byte_rate metric with initial data. +func (m *metricKafkaBrokersOutgoingByteRate) init() { + m.data.SetName("kafka.brokers.outgoing_byte_rate") + m.data.SetDescription("Average outgoing Byte Rate in bytes/second.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersOutgoingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersOutgoingByteRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersOutgoingByteRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersOutgoingByteRate(cfg MetricConfig) metricKafkaBrokersOutgoingByteRate { + m := metricKafkaBrokersOutgoingByteRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersRequestLatency struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.request_latency metric with initial data. +func (m *metricKafkaBrokersRequestLatency) init() { + m.data.SetName("kafka.brokers.request_latency") + m.data.SetDescription("Average request latency in ms") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersRequestLatency) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersRequestLatency) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersRequestLatency) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersRequestLatency(cfg MetricConfig) metricKafkaBrokersRequestLatency { + m := metricKafkaBrokersRequestLatency{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersRequestRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.request_rate metric with initial data. +func (m *metricKafkaBrokersRequestRate) init() { + m.data.SetName("kafka.brokers.request_rate") + m.data.SetDescription("Average request rate per second.") + m.data.SetUnit("{requests}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersRequestRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersRequestRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersRequestRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersRequestRate(cfg MetricConfig) metricKafkaBrokersRequestRate { + m := metricKafkaBrokersRequestRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersRequestSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.request_size metric with initial data. +func (m *metricKafkaBrokersRequestSize) init() { + m.data.SetName("kafka.brokers.request_size") + m.data.SetDescription("Average request size in bytes") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersRequestSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersRequestSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersRequestSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersRequestSize(cfg MetricConfig) metricKafkaBrokersRequestSize { + m := metricKafkaBrokersRequestSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersRequestsInFlight struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.requests_in_flight metric with initial data. +func (m *metricKafkaBrokersRequestsInFlight) init() { + m.data.SetName("kafka.brokers.requests_in_flight") + m.data.SetDescription("Requests in flight") + m.data.SetUnit("{requests}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersRequestsInFlight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersRequestsInFlight) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersRequestsInFlight) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersRequestsInFlight(cfg MetricConfig) metricKafkaBrokersRequestsInFlight { + m := metricKafkaBrokersRequestsInFlight{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersResponseRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.response_rate metric with initial data. +func (m *metricKafkaBrokersResponseRate) init() { + m.data.SetName("kafka.brokers.response_rate") + m.data.SetDescription("Average response rate per second") + m.data.SetUnit("{response}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersResponseRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersResponseRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersResponseRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersResponseRate(cfg MetricConfig) metricKafkaBrokersResponseRate { + m := metricKafkaBrokersResponseRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricKafkaBrokersResponseSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills kafka.brokers.response_size metric with initial data. +func (m *metricKafkaBrokersResponseSize) init() { + m.data.SetName("kafka.brokers.response_size") + m.data.SetDescription("Average response size in bytes") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricKafkaBrokersResponseSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricKafkaBrokersResponseSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricKafkaBrokersResponseSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricKafkaBrokersResponseSize(cfg MetricConfig) metricKafkaBrokersResponseSize { + m := metricKafkaBrokersResponseSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -593,22 +1101,32 @@ func newMetricKafkaTopicPartitions(cfg MetricConfig) metricKafkaTopicPartitions // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information - metricKafkaBrokers metricKafkaBrokers - metricKafkaConsumerGroupLag metricKafkaConsumerGroupLag - metricKafkaConsumerGroupLagSum metricKafkaConsumerGroupLagSum - metricKafkaConsumerGroupMembers metricKafkaConsumerGroupMembers - metricKafkaConsumerGroupOffset metricKafkaConsumerGroupOffset - metricKafkaConsumerGroupOffsetSum metricKafkaConsumerGroupOffsetSum - metricKafkaPartitionCurrentOffset metricKafkaPartitionCurrentOffset - metricKafkaPartitionOldestOffset metricKafkaPartitionOldestOffset - metricKafkaPartitionReplicas metricKafkaPartitionReplicas - metricKafkaPartitionReplicasInSync metricKafkaPartitionReplicasInSync - metricKafkaTopicPartitions metricKafkaTopicPartitions + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information + metricKafkaBrokers metricKafkaBrokers + metricKafkaBrokersConsumerFetchRate metricKafkaBrokersConsumerFetchRate + metricKafkaBrokersCount metricKafkaBrokersCount + metricKafkaBrokersIncomingByteRate metricKafkaBrokersIncomingByteRate + metricKafkaBrokersOutgoingByteRate metricKafkaBrokersOutgoingByteRate + metricKafkaBrokersRequestLatency metricKafkaBrokersRequestLatency + metricKafkaBrokersRequestRate metricKafkaBrokersRequestRate + metricKafkaBrokersRequestSize metricKafkaBrokersRequestSize + metricKafkaBrokersRequestsInFlight metricKafkaBrokersRequestsInFlight + metricKafkaBrokersResponseRate metricKafkaBrokersResponseRate + metricKafkaBrokersResponseSize metricKafkaBrokersResponseSize + metricKafkaConsumerGroupLag metricKafkaConsumerGroupLag + metricKafkaConsumerGroupLagSum metricKafkaConsumerGroupLagSum + metricKafkaConsumerGroupMembers metricKafkaConsumerGroupMembers + metricKafkaConsumerGroupOffset metricKafkaConsumerGroupOffset + metricKafkaConsumerGroupOffsetSum metricKafkaConsumerGroupOffsetSum + metricKafkaPartitionCurrentOffset metricKafkaPartitionCurrentOffset + metricKafkaPartitionOldestOffset metricKafkaPartitionOldestOffset + metricKafkaPartitionReplicas metricKafkaPartitionReplicas + metricKafkaPartitionReplicasInSync metricKafkaPartitionReplicasInSync + metricKafkaTopicPartitions metricKafkaTopicPartitions } // metricBuilderOption applies changes to default metrics builder. @@ -622,21 +1140,64 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { + if mbc.Metrics.KafkaBrokers.Enabled { + settings.Logger.Warn("[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count`") + } + if !mbc.Metrics.KafkaBrokersConsumerFetchRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersCount.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.count`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersIncomingByteRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersOutgoingByteRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersRequestLatency.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersRequestRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersRequestSize.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_size`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersRequestsInFlight.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersResponseRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.") + } + if !mbc.Metrics.KafkaBrokersResponseSize.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_size`: This metric will be enabled by default in the next versions.") + } mb := &MetricsBuilder{ - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricKafkaBrokers: newMetricKafkaBrokers(mbc.Metrics.KafkaBrokers), - metricKafkaConsumerGroupLag: newMetricKafkaConsumerGroupLag(mbc.Metrics.KafkaConsumerGroupLag), - metricKafkaConsumerGroupLagSum: newMetricKafkaConsumerGroupLagSum(mbc.Metrics.KafkaConsumerGroupLagSum), - metricKafkaConsumerGroupMembers: newMetricKafkaConsumerGroupMembers(mbc.Metrics.KafkaConsumerGroupMembers), - metricKafkaConsumerGroupOffset: newMetricKafkaConsumerGroupOffset(mbc.Metrics.KafkaConsumerGroupOffset), - metricKafkaConsumerGroupOffsetSum: newMetricKafkaConsumerGroupOffsetSum(mbc.Metrics.KafkaConsumerGroupOffsetSum), - metricKafkaPartitionCurrentOffset: newMetricKafkaPartitionCurrentOffset(mbc.Metrics.KafkaPartitionCurrentOffset), - metricKafkaPartitionOldestOffset: newMetricKafkaPartitionOldestOffset(mbc.Metrics.KafkaPartitionOldestOffset), - metricKafkaPartitionReplicas: newMetricKafkaPartitionReplicas(mbc.Metrics.KafkaPartitionReplicas), - metricKafkaPartitionReplicasInSync: newMetricKafkaPartitionReplicasInSync(mbc.Metrics.KafkaPartitionReplicasInSync), - metricKafkaTopicPartitions: newMetricKafkaTopicPartitions(mbc.Metrics.KafkaTopicPartitions), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricKafkaBrokers: newMetricKafkaBrokers(mbc.Metrics.KafkaBrokers), + metricKafkaBrokersConsumerFetchRate: newMetricKafkaBrokersConsumerFetchRate(mbc.Metrics.KafkaBrokersConsumerFetchRate), + metricKafkaBrokersCount: newMetricKafkaBrokersCount(mbc.Metrics.KafkaBrokersCount), + metricKafkaBrokersIncomingByteRate: newMetricKafkaBrokersIncomingByteRate(mbc.Metrics.KafkaBrokersIncomingByteRate), + metricKafkaBrokersOutgoingByteRate: newMetricKafkaBrokersOutgoingByteRate(mbc.Metrics.KafkaBrokersOutgoingByteRate), + metricKafkaBrokersRequestLatency: newMetricKafkaBrokersRequestLatency(mbc.Metrics.KafkaBrokersRequestLatency), + metricKafkaBrokersRequestRate: newMetricKafkaBrokersRequestRate(mbc.Metrics.KafkaBrokersRequestRate), + metricKafkaBrokersRequestSize: newMetricKafkaBrokersRequestSize(mbc.Metrics.KafkaBrokersRequestSize), + metricKafkaBrokersRequestsInFlight: newMetricKafkaBrokersRequestsInFlight(mbc.Metrics.KafkaBrokersRequestsInFlight), + metricKafkaBrokersResponseRate: newMetricKafkaBrokersResponseRate(mbc.Metrics.KafkaBrokersResponseRate), + metricKafkaBrokersResponseSize: newMetricKafkaBrokersResponseSize(mbc.Metrics.KafkaBrokersResponseSize), + metricKafkaConsumerGroupLag: newMetricKafkaConsumerGroupLag(mbc.Metrics.KafkaConsumerGroupLag), + metricKafkaConsumerGroupLagSum: newMetricKafkaConsumerGroupLagSum(mbc.Metrics.KafkaConsumerGroupLagSum), + metricKafkaConsumerGroupMembers: newMetricKafkaConsumerGroupMembers(mbc.Metrics.KafkaConsumerGroupMembers), + metricKafkaConsumerGroupOffset: newMetricKafkaConsumerGroupOffset(mbc.Metrics.KafkaConsumerGroupOffset), + metricKafkaConsumerGroupOffsetSum: newMetricKafkaConsumerGroupOffsetSum(mbc.Metrics.KafkaConsumerGroupOffsetSum), + metricKafkaPartitionCurrentOffset: newMetricKafkaPartitionCurrentOffset(mbc.Metrics.KafkaPartitionCurrentOffset), + metricKafkaPartitionOldestOffset: newMetricKafkaPartitionOldestOffset(mbc.Metrics.KafkaPartitionOldestOffset), + metricKafkaPartitionReplicas: newMetricKafkaPartitionReplicas(mbc.Metrics.KafkaPartitionReplicas), + metricKafkaPartitionReplicasInSync: newMetricKafkaPartitionReplicasInSync(mbc.Metrics.KafkaPartitionReplicasInSync), + metricKafkaTopicPartitions: newMetricKafkaTopicPartitions(mbc.Metrics.KafkaTopicPartitions), } for _, op := range options { op(mb) @@ -690,6 +1251,16 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricKafkaBrokers.emit(ils.Metrics()) + mb.metricKafkaBrokersConsumerFetchRate.emit(ils.Metrics()) + mb.metricKafkaBrokersCount.emit(ils.Metrics()) + mb.metricKafkaBrokersIncomingByteRate.emit(ils.Metrics()) + mb.metricKafkaBrokersOutgoingByteRate.emit(ils.Metrics()) + mb.metricKafkaBrokersRequestLatency.emit(ils.Metrics()) + mb.metricKafkaBrokersRequestRate.emit(ils.Metrics()) + mb.metricKafkaBrokersRequestSize.emit(ils.Metrics()) + mb.metricKafkaBrokersRequestsInFlight.emit(ils.Metrics()) + mb.metricKafkaBrokersResponseRate.emit(ils.Metrics()) + mb.metricKafkaBrokersResponseSize.emit(ils.Metrics()) mb.metricKafkaConsumerGroupLag.emit(ils.Metrics()) mb.metricKafkaConsumerGroupLagSum.emit(ils.Metrics()) mb.metricKafkaConsumerGroupMembers.emit(ils.Metrics()) @@ -774,6 +1345,55 @@ func (mb *MetricsBuilder) RecordKafkaPartitionReplicasInSyncDataPoint(ts pcommon func (mb *MetricsBuilder) RecordKafkaTopicPartitionsDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string) { mb.metricKafkaTopicPartitions.recordDataPoint(mb.startTime, ts, val, topicAttributeValue) } +// RecordKafkaBrokersConsumerFetchRateDataPoint adds a data point to kafka.brokers.consumer_fetch_rate metric. +func (mb *MetricsBuilder) RecordKafkaBrokersConsumerFetchRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersConsumerFetchRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersCountDataPoint adds a data point to kafka.brokers.count metric. +func (mb *MetricsBuilder) RecordKafkaBrokersCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricKafkaBrokersCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordKafkaBrokersIncomingByteRateDataPoint adds a data point to kafka.brokers.incoming_byte_rate metric. +func (mb *MetricsBuilder) RecordKafkaBrokersIncomingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersIncomingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersOutgoingByteRateDataPoint adds a data point to kafka.brokers.outgoing_byte_rate metric. +func (mb *MetricsBuilder) RecordKafkaBrokersOutgoingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersOutgoingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersRequestLatencyDataPoint adds a data point to kafka.brokers.request_latency metric. +func (mb *MetricsBuilder) RecordKafkaBrokersRequestLatencyDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersRequestLatency.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersRequestRateDataPoint adds a data point to kafka.brokers.request_rate metric. +func (mb *MetricsBuilder) RecordKafkaBrokersRequestRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersRequestRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersRequestSizeDataPoint adds a data point to kafka.brokers.request_size metric. +func (mb *MetricsBuilder) RecordKafkaBrokersRequestSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersRequestSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersRequestsInFlightDataPoint adds a data point to kafka.brokers.requests_in_flight metric. +func (mb *MetricsBuilder) RecordKafkaBrokersRequestsInFlightDataPoint(ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { + mb.metricKafkaBrokersRequestsInFlight.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersResponseRateDataPoint adds a data point to kafka.brokers.response_rate metric. +func (mb *MetricsBuilder) RecordKafkaBrokersResponseRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersResponseRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordKafkaBrokersResponseSizeDataPoint adds a data point to kafka.brokers.response_size metric. +func (mb *MetricsBuilder) RecordKafkaBrokersResponseSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricKafkaBrokersResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 874fa212e032..542360efde8c 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -49,6 +49,50 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 + if test.configSet == testSetDefault || test.configSet == testSetAll { + assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count`", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } assert.Equal(t, expectedWarnings, observedLogs.Len()) defaultMetricsCount := 0 @@ -58,6 +102,36 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordKafkaBrokersDataPoint(ts, 1) + allMetricsCount++ + mb.RecordKafkaBrokersConsumerFetchRateDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersCountDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersIncomingByteRateDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersOutgoingByteRateDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersRequestLatencyDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersRequestRateDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersRequestSizeDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersRequestsInFlightDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersResponseRateDataPoint(ts, 1, 1) + + allMetricsCount++ + mb.RecordKafkaBrokersResponseSizeDataPoint(ts, 1, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordKafkaConsumerGroupLagDataPoint(ts, 1, "attr-val", "attr-val", 1) @@ -126,6 +200,33 @@ func TestMetricsBuilder(t *testing.T) { case "kafka.brokers": assert.False(t, validatedMetrics["kafka.brokers"], "Found a duplicate in the metrics slice: kafka.brokers") validatedMetrics["kafka.brokers"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "[DEPRECATED] Number of brokers in the cluster.", ms.At(i).Description()) + assert.Equal(t, "{brokers}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "kafka.brokers.consumer_fetch_rate": + assert.False(t, validatedMetrics["kafka.brokers.consumer_fetch_rate"], "Found a duplicate in the metrics slice: kafka.brokers.consumer_fetch_rate") + validatedMetrics["kafka.brokers.consumer_fetch_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average consumer fetch Rate", ms.At(i).Description()) + assert.Equal(t, "{fetches}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.count": + assert.False(t, validatedMetrics["kafka.brokers.count"], "Found a duplicate in the metrics slice: kafka.brokers.count") + validatedMetrics["kafka.brokers.count"] = true assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of brokers in the cluster.", ms.At(i).Description()) @@ -137,6 +238,126 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "kafka.brokers.incoming_byte_rate": + assert.False(t, validatedMetrics["kafka.brokers.incoming_byte_rate"], "Found a duplicate in the metrics slice: kafka.brokers.incoming_byte_rate") + validatedMetrics["kafka.brokers.incoming_byte_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average tncoming Byte Rate in bytes/second", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.outgoing_byte_rate": + assert.False(t, validatedMetrics["kafka.brokers.outgoing_byte_rate"], "Found a duplicate in the metrics slice: kafka.brokers.outgoing_byte_rate") + validatedMetrics["kafka.brokers.outgoing_byte_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average outgoing Byte Rate in bytes/second.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.request_latency": + assert.False(t, validatedMetrics["kafka.brokers.request_latency"], "Found a duplicate in the metrics slice: kafka.brokers.request_latency") + validatedMetrics["kafka.brokers.request_latency"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average request latency in ms", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.request_rate": + assert.False(t, validatedMetrics["kafka.brokers.request_rate"], "Found a duplicate in the metrics slice: kafka.brokers.request_rate") + validatedMetrics["kafka.brokers.request_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average request rate per second.", ms.At(i).Description()) + assert.Equal(t, "{requests}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.request_size": + assert.False(t, validatedMetrics["kafka.brokers.request_size"], "Found a duplicate in the metrics slice: kafka.brokers.request_size") + validatedMetrics["kafka.brokers.request_size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average request size in bytes", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.requests_in_flight": + assert.False(t, validatedMetrics["kafka.brokers.requests_in_flight"], "Found a duplicate in the metrics slice: kafka.brokers.requests_in_flight") + validatedMetrics["kafka.brokers.requests_in_flight"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Requests in flight", ms.At(i).Description()) + assert.Equal(t, "{requests}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.response_rate": + assert.False(t, validatedMetrics["kafka.brokers.response_rate"], "Found a duplicate in the metrics slice: kafka.brokers.response_rate") + validatedMetrics["kafka.brokers.response_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average response rate per second", ms.At(i).Description()) + assert.Equal(t, "{response}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) + case "kafka.brokers.response_size": + assert.False(t, validatedMetrics["kafka.brokers.response_size"], "Found a duplicate in the metrics slice: kafka.brokers.response_size") + validatedMetrics["kafka.brokers.response_size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average response size in bytes", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 1, attrVal.Int()) case "kafka.consumer_group.lag": assert.False(t, validatedMetrics["kafka.consumer_group.lag"], "Found a duplicate in the metrics slice: kafka.consumer_group.lag") validatedMetrics["kafka.consumer_group.lag"] = true diff --git a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml index 6d0255145cbc..675b183ca2f0 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml @@ -3,6 +3,26 @@ all_set: metrics: kafka.brokers: enabled: true + kafka.brokers.consumer_fetch_rate: + enabled: true + kafka.brokers.count: + enabled: true + kafka.brokers.incoming_byte_rate: + enabled: true + kafka.brokers.outgoing_byte_rate: + enabled: true + kafka.brokers.request_latency: + enabled: true + kafka.brokers.request_rate: + enabled: true + kafka.brokers.request_size: + enabled: true + kafka.brokers.requests_in_flight: + enabled: true + kafka.brokers.response_rate: + enabled: true + kafka.brokers.response_size: + enabled: true kafka.consumer_group.lag: enabled: true kafka.consumer_group.lag_sum: @@ -27,6 +47,26 @@ none_set: metrics: kafka.brokers: enabled: false + kafka.brokers.consumer_fetch_rate: + enabled: false + kafka.brokers.count: + enabled: false + kafka.brokers.incoming_byte_rate: + enabled: false + kafka.brokers.outgoing_byte_rate: + enabled: false + kafka.brokers.request_latency: + enabled: false + kafka.brokers.request_rate: + enabled: false + kafka.brokers.request_size: + enabled: false + kafka.brokers.requests_in_flight: + enabled: false + kafka.brokers.response_rate: + enabled: false + kafka.brokers.response_size: + enabled: false kafka.consumer_group.lag: enabled: false kafka.consumer_group.lag_sum: diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 06c79be6741b..e3a0a640ce72 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -16,18 +16,111 @@ attributes: group: description: The ID (string) of a consumer group type: string + broker: + description: The ID (integer) of a broker + type: int metrics: # brokers scraper kafka.brokers: enabled: true + description: "[DEPRECATED] Number of brokers in the cluster." + unit: "{brokers}" + gauge: + value_type: int + warnings: + if_enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count` + kafka.brokers.count: + enabled: false description: Number of brokers in the cluster. unit: "{brokers}" sum: monotonic: false value_type: int aggregation: cumulative - # topics scraper + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.consumer_fetch_rate: + enabled: false + description: Average consumer fetch Rate + unit: "{fetches}/s" + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.incoming_byte_rate: + enabled: false + description: Average tncoming Byte Rate in bytes/second + unit: 1 + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.outgoing_byte_rate: + enabled: false + description: Average outgoing Byte Rate in bytes/second. + unit: 1 + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.request_latency: + enabled: false + description: Average request latency in ms + unit: "ms" + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.response_rate: + enabled: false + description: Average response rate per second + unit: "{response}/s" + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.response_size: + enabled: false + description: Average response size in bytes + unit: "By" + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.request_rate: + enabled: false + description: Average request rate per second. + unit: "{requests}/s" + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.request_size: + enabled: false + description: Average request size in bytes + unit: "By" + gauge: + value_type: double + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. + kafka.brokers.requests_in_flight: + enabled: false + description: Requests in flight + unit: "{requests}" + gauge: + value_type: int + attributes: [broker] + warnings: + if_enabled_not_set: This metric will be enabled by default in the next versions. kafka.topic.partitions: enabled: true description: Number of partitions in topic. From 277a38dd31bbb75ee3560648db8e421bde9b7958 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Wed, 26 Jul 2023 10:59:50 -0700 Subject: [PATCH 02/36] chore: attempt to fix sorting order 1 --- receiver/kafkametricsreceiver/broker_scraper.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index 581728569c8d..f21d6339386b 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -17,8 +17,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver/internal/metadata" ) -type saramaMetrics map[string]map[string]interface{} // saramaMetrics is a map of metric name to tags - type brokerScraper struct { client sarama.Client @@ -28,6 +26,8 @@ type brokerScraper struct { mb *metadata.MetricsBuilder } +type saramaMetrics map[string]map[string]interface{} // saramaMetrics is a map of metric name to tags + var nrMetricsPrefix = [...]string{ "consumer-fetch-rate-for-broker-", "incoming-byte-rate-for-broker-", From 863cfd6fd100519ae5897e0027a8d7bd8795e6d0 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Wed, 26 Jul 2023 14:04:43 -0700 Subject: [PATCH 03/36] chore: enable brokers.count and disable brokers --- receiver/kafkametricsreceiver/metadata.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 773dd8328557..aae7085397d4 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -25,7 +25,7 @@ attributes: metrics: # brokers scraper kafka.brokers: - enabled: true + enabled: false description: "[DEPRECATED] Number of brokers in the cluster." unit: "{brokers}" gauge: @@ -33,7 +33,7 @@ metrics: warnings: if_enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count` kafka.brokers.count: - enabled: false + enabled: true description: Number of brokers in the cluster. unit: "{brokers}" sum: From 12b6be52dad1349c2bce7cd39982a370ba4f6b74 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 27 Jul 2023 07:42:17 -0700 Subject: [PATCH 04/36] chore: fix unit test issues --- .../broker_scraper_test.go | 23 ++++++------------- receiver/kafkametricsreceiver/metadata.yaml | 4 ++-- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index 37079cb16c97..65935cd9de6d 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -10,11 +10,7 @@ import ( "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/receiver/receivertest" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver/internal/metadata" ) func TestBrokerShutdown(t *testing.T) { @@ -94,20 +90,15 @@ func TestBrokerScraper_shutdown_handles_nil_client(t *testing.T) { } func TestBrokerScraper_scrape(t *testing.T) { - client := newMockClient() - client.Mock.On("Brokers").Return(testBrokers) - bs := brokerScraper{ - client: client, - settings: receivertest.NewNopCreateSettings(), - config: Config{MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig()}, + newSaramaClient = func(addrs []string, conf *sarama.Config) (sarama.Client, error) { + return nil, fmt.Errorf("new client failed") } - require.NoError(t, bs.start(context.Background(), componenttest.NewNopHost())) - md, err := bs.scrape(context.Background()) + sc := sarama.NewConfig() + bs, err := createBrokerScraper(context.Background(), Config{}, sc, receivertest.NewNopCreateSettings()) + assert.NoError(t, err) + assert.NotNil(t, bs) + err = bs.Shutdown(context.Background()) assert.NoError(t, err) - expectedDp := int64(len(testBrokers)) - receivedMetrics := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) - receivedDp := receivedMetrics.Sum().DataPoints().At(0).IntValue() - assert.Equal(t, expectedDp, receivedDp) } func TestBrokersScraper_createBrokerScraper(t *testing.T) { diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index aae7085397d4..773dd8328557 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -25,7 +25,7 @@ attributes: metrics: # brokers scraper kafka.brokers: - enabled: false + enabled: true description: "[DEPRECATED] Number of brokers in the cluster." unit: "{brokers}" gauge: @@ -33,7 +33,7 @@ metrics: warnings: if_enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count` kafka.brokers.count: - enabled: true + enabled: false description: Number of brokers in the cluster. unit: "{brokers}" sum: From 44a7d7ddce4bc7836929dde56815dc2ad30cda5f Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 27 Jul 2023 09:03:21 -0700 Subject: [PATCH 05/36] fix --- .../broker_scraper_test.go | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index 65935cd9de6d..fd5f0fb6b868 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -9,7 +9,10 @@ import ( "testing" "github.com/Shopify/sarama" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver/internal/metadata" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/receiver/receivertest" ) @@ -90,15 +93,20 @@ func TestBrokerScraper_shutdown_handles_nil_client(t *testing.T) { } func TestBrokerScraper_scrape(t *testing.T) { - newSaramaClient = func(addrs []string, conf *sarama.Config) (sarama.Client, error) { - return nil, fmt.Errorf("new client failed") + client := newMockClient() + client.Mock.On("Brokers").Return(testBrokers) + bs := brokerScraper{ + client: client, + settings: receivertest.NewNopCreateSettings(), + config: Config{MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig()}, } - sc := sarama.NewConfig() - bs, err := createBrokerScraper(context.Background(), Config{}, sc, receivertest.NewNopCreateSettings()) - assert.NoError(t, err) - assert.NotNil(t, bs) - err = bs.Shutdown(context.Background()) + require.NoError(t, bs.start(context.Background(), componenttest.NewNopHost())) + md, err := bs.scrape(context.Background()) assert.NoError(t, err) + expectedDp := int64(len(testBrokers)) + receivedMetrics := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + receivedDp := receivedMetrics.Sum().DataPoints().At(0).IntValue() + assert.Equal(t, expectedDp, receivedDp) } func TestBrokersScraper_createBrokerScraper(t *testing.T) { From bac452903a8e7bfc2f3d83f4cd0eb2ad8fd50fb7 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 27 Jul 2023 09:49:15 -0700 Subject: [PATCH 06/36] fix test --- receiver/kafkametricsreceiver/broker_scraper_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index fd5f0fb6b868..f4617b5c6e66 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -105,7 +105,7 @@ func TestBrokerScraper_scrape(t *testing.T) { assert.NoError(t, err) expectedDp := int64(len(testBrokers)) receivedMetrics := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) - receivedDp := receivedMetrics.Sum().DataPoints().At(0).IntValue() + receivedDp := receivedMetrics.Gauge().DataPoints().At(0).IntValue() assert.Equal(t, expectedDp, receivedDp) } From 9b5ce27ef8b9b863293d4ad977e282493337fbf3 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Mon, 31 Jul 2023 11:38:27 -0700 Subject: [PATCH 07/36] chore: generate run --- .../kafkametricsreceiver/documentation.md | 3 +- .../internal/metadata/generated_metrics.go | 101 +++++++++--------- .../metadata/generated_metrics_test.go | 36 +++---- 3 files changed, 69 insertions(+), 71 deletions(-) diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 7fae06054bdb..77ab774de826 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -170,7 +170,6 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | - ## Optional Metrics The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: @@ -313,4 +312,4 @@ Average response size in bytes | Name | Description | Values | | ---- | ----------- | ------ | -| broker | The ID (integer) of a broker | Any Int | \ No newline at end of file +| broker | The ID (integer) of a broker | Any Int | diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index c45aa709fbbf..54a2d28640e8 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -1103,7 +1103,6 @@ func newMetricKafkaTopicPartitions(cfg MetricConfig) metricKafkaTopicPartitions type MetricsBuilder struct { startTime pcommon.Timestamp // start time that will be applied to all recorded data points. metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information metricKafkaBrokers metricKafkaBrokers @@ -1127,7 +1126,6 @@ type MetricsBuilder struct { metricKafkaPartitionReplicas metricKafkaPartitionReplicas metricKafkaPartitionReplicasInSync metricKafkaPartitionReplicasInSync metricKafkaTopicPartitions metricKafkaTopicPartitions - } // metricBuilderOption applies changes to default metrics builder. @@ -1301,55 +1299,6 @@ func (mb *MetricsBuilder) RecordKafkaBrokersDataPoint(ts pcommon.Timestamp, val mb.metricKafkaBrokers.recordDataPoint(mb.startTime, ts, val) } -// RecordKafkaConsumerGroupLagDataPoint adds a data point to kafka.consumer_group.lag metric. -func (mb *MetricsBuilder) RecordKafkaConsumerGroupLagDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { - mb.metricKafkaConsumerGroupLag.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue, partitionAttributeValue) -} - -// RecordKafkaConsumerGroupLagSumDataPoint adds a data point to kafka.consumer_group.lag_sum metric. -func (mb *MetricsBuilder) RecordKafkaConsumerGroupLagSumDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { - mb.metricKafkaConsumerGroupLagSum.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue) -} - -// RecordKafkaConsumerGroupMembersDataPoint adds a data point to kafka.consumer_group.members metric. -func (mb *MetricsBuilder) RecordKafkaConsumerGroupMembersDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string) { - mb.metricKafkaConsumerGroupMembers.recordDataPoint(mb.startTime, ts, val, groupAttributeValue) -} - -// RecordKafkaConsumerGroupOffsetDataPoint adds a data point to kafka.consumer_group.offset metric. -func (mb *MetricsBuilder) RecordKafkaConsumerGroupOffsetDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { - mb.metricKafkaConsumerGroupOffset.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue, partitionAttributeValue) -} - -// RecordKafkaConsumerGroupOffsetSumDataPoint adds a data point to kafka.consumer_group.offset_sum metric. -func (mb *MetricsBuilder) RecordKafkaConsumerGroupOffsetSumDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { - mb.metricKafkaConsumerGroupOffsetSum.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue) -} - -// RecordKafkaPartitionCurrentOffsetDataPoint adds a data point to kafka.partition.current_offset metric. -func (mb *MetricsBuilder) RecordKafkaPartitionCurrentOffsetDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { - mb.metricKafkaPartitionCurrentOffset.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) -} - -// RecordKafkaPartitionOldestOffsetDataPoint adds a data point to kafka.partition.oldest_offset metric. -func (mb *MetricsBuilder) RecordKafkaPartitionOldestOffsetDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { - mb.metricKafkaPartitionOldestOffset.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) -} - -// RecordKafkaPartitionReplicasDataPoint adds a data point to kafka.partition.replicas metric. -func (mb *MetricsBuilder) RecordKafkaPartitionReplicasDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { - mb.metricKafkaPartitionReplicas.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) -} - -// RecordKafkaPartitionReplicasInSyncDataPoint adds a data point to kafka.partition.replicas_in_sync metric. -func (mb *MetricsBuilder) RecordKafkaPartitionReplicasInSyncDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { - mb.metricKafkaPartitionReplicasInSync.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) -} - -// RecordKafkaTopicPartitionsDataPoint adds a data point to kafka.topic.partitions metric. -func (mb *MetricsBuilder) RecordKafkaTopicPartitionsDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string) { - mb.metricKafkaTopicPartitions.recordDataPoint(mb.startTime, ts, val, topicAttributeValue) -} // RecordKafkaBrokersConsumerFetchRateDataPoint adds a data point to kafka.brokers.consumer_fetch_rate metric. func (mb *MetricsBuilder) RecordKafkaBrokersConsumerFetchRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersConsumerFetchRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) @@ -1400,6 +1349,56 @@ func (mb *MetricsBuilder) RecordKafkaBrokersResponseSizeDataPoint(ts pcommon.Tim mb.metricKafkaBrokersResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } +// RecordKafkaConsumerGroupLagDataPoint adds a data point to kafka.consumer_group.lag metric. +func (mb *MetricsBuilder) RecordKafkaConsumerGroupLagDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { + mb.metricKafkaConsumerGroupLag.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue, partitionAttributeValue) +} + +// RecordKafkaConsumerGroupLagSumDataPoint adds a data point to kafka.consumer_group.lag_sum metric. +func (mb *MetricsBuilder) RecordKafkaConsumerGroupLagSumDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { + mb.metricKafkaConsumerGroupLagSum.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue) +} + +// RecordKafkaConsumerGroupMembersDataPoint adds a data point to kafka.consumer_group.members metric. +func (mb *MetricsBuilder) RecordKafkaConsumerGroupMembersDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string) { + mb.metricKafkaConsumerGroupMembers.recordDataPoint(mb.startTime, ts, val, groupAttributeValue) +} + +// RecordKafkaConsumerGroupOffsetDataPoint adds a data point to kafka.consumer_group.offset metric. +func (mb *MetricsBuilder) RecordKafkaConsumerGroupOffsetDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { + mb.metricKafkaConsumerGroupOffset.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue, partitionAttributeValue) +} + +// RecordKafkaConsumerGroupOffsetSumDataPoint adds a data point to kafka.consumer_group.offset_sum metric. +func (mb *MetricsBuilder) RecordKafkaConsumerGroupOffsetSumDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { + mb.metricKafkaConsumerGroupOffsetSum.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue) +} + +// RecordKafkaPartitionCurrentOffsetDataPoint adds a data point to kafka.partition.current_offset metric. +func (mb *MetricsBuilder) RecordKafkaPartitionCurrentOffsetDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { + mb.metricKafkaPartitionCurrentOffset.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) +} + +// RecordKafkaPartitionOldestOffsetDataPoint adds a data point to kafka.partition.oldest_offset metric. +func (mb *MetricsBuilder) RecordKafkaPartitionOldestOffsetDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { + mb.metricKafkaPartitionOldestOffset.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) +} + +// RecordKafkaPartitionReplicasDataPoint adds a data point to kafka.partition.replicas metric. +func (mb *MetricsBuilder) RecordKafkaPartitionReplicasDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { + mb.metricKafkaPartitionReplicas.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) +} + +// RecordKafkaPartitionReplicasInSyncDataPoint adds a data point to kafka.partition.replicas_in_sync metric. +func (mb *MetricsBuilder) RecordKafkaPartitionReplicasInSyncDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { + mb.metricKafkaPartitionReplicasInSync.recordDataPoint(mb.startTime, ts, val, topicAttributeValue, partitionAttributeValue) +} + +// RecordKafkaTopicPartitionsDataPoint adds a data point to kafka.topic.partitions metric. +func (mb *MetricsBuilder) RecordKafkaTopicPartitionsDataPoint(ts pcommon.Timestamp, val int64, topicAttributeValue string) { + mb.metricKafkaTopicPartitions.recordDataPoint(mb.startTime, ts, val, topicAttributeValue) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 05e4877288b9..f935170c15f4 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -103,34 +103,34 @@ func TestMetricsBuilder(t *testing.T) { mb.RecordKafkaBrokersDataPoint(ts, 1) allMetricsCount++ - mb.RecordKafkaBrokersConsumerFetchRateDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersConsumerFetchRateDataPoint(ts, 1, 6) allMetricsCount++ mb.RecordKafkaBrokersCountDataPoint(ts, 1) allMetricsCount++ - mb.RecordKafkaBrokersIncomingByteRateDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersIncomingByteRateDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordKafkaBrokersOutgoingByteRateDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersOutgoingByteRateDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordKafkaBrokersRequestLatencyDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersRequestLatencyDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordKafkaBrokersRequestRateDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersRequestRateDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordKafkaBrokersRequestSizeDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersRequestSizeDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordKafkaBrokersRequestsInFlightDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersRequestsInFlightDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordKafkaBrokersResponseRateDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersResponseRateDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordKafkaBrokersResponseSizeDataPoint(ts, 1, 1) + mb.RecordKafkaBrokersResponseSizeDataPoint(ts, 1, 6) defaultMetricsCount++ allMetricsCount++ @@ -221,7 +221,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.count": assert.False(t, validatedMetrics["kafka.brokers.count"], "Found a duplicate in the metrics slice: kafka.brokers.count") validatedMetrics["kafka.brokers.count"] = true @@ -250,7 +250,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.outgoing_byte_rate": assert.False(t, validatedMetrics["kafka.brokers.outgoing_byte_rate"], "Found a duplicate in the metrics slice: kafka.brokers.outgoing_byte_rate") validatedMetrics["kafka.brokers.outgoing_byte_rate"] = true @@ -265,7 +265,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.request_latency": assert.False(t, validatedMetrics["kafka.brokers.request_latency"], "Found a duplicate in the metrics slice: kafka.brokers.request_latency") validatedMetrics["kafka.brokers.request_latency"] = true @@ -280,7 +280,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.request_rate": assert.False(t, validatedMetrics["kafka.brokers.request_rate"], "Found a duplicate in the metrics slice: kafka.brokers.request_rate") validatedMetrics["kafka.brokers.request_rate"] = true @@ -295,7 +295,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.request_size": assert.False(t, validatedMetrics["kafka.brokers.request_size"], "Found a duplicate in the metrics slice: kafka.brokers.request_size") validatedMetrics["kafka.brokers.request_size"] = true @@ -310,7 +310,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.requests_in_flight": assert.False(t, validatedMetrics["kafka.brokers.requests_in_flight"], "Found a duplicate in the metrics slice: kafka.brokers.requests_in_flight") validatedMetrics["kafka.brokers.requests_in_flight"] = true @@ -325,7 +325,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, int64(1), dp.IntValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.response_rate": assert.False(t, validatedMetrics["kafka.brokers.response_rate"], "Found a duplicate in the metrics slice: kafka.brokers.response_rate") validatedMetrics["kafka.brokers.response_rate"] = true @@ -340,7 +340,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.brokers.response_size": assert.False(t, validatedMetrics["kafka.brokers.response_size"], "Found a duplicate in the metrics slice: kafka.brokers.response_size") validatedMetrics["kafka.brokers.response_size"] = true @@ -355,7 +355,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) - assert.EqualValues(t, 1, attrVal.Int()) + assert.EqualValues(t, 6, attrVal.Int()) case "kafka.consumer_group.lag": assert.False(t, validatedMetrics["kafka.consumer_group.lag"], "Found a duplicate in the metrics slice: kafka.consumer_group.lag") validatedMetrics["kafka.consumer_group.lag"] = true From a85f2c3252c937248a81a3acc0a7ede0a75edd21 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Mon, 31 Jul 2023 12:28:26 -0700 Subject: [PATCH 08/36] fix: fix linting issue --- receiver/kafkametricsreceiver/broker_scraper_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index f4617b5c6e66..5be948ad1340 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -9,11 +9,12 @@ import ( "testing" "github.com/Shopify/sarama" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver/internal/metadata" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver/internal/metadata" ) func TestBrokerShutdown(t *testing.T) { From 3d2404459ea1e403b42733603c9497077f5bc30f Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Mon, 31 Jul 2023 13:15:32 -0700 Subject: [PATCH 09/36] chore: updated to messaging.kafka --- .../kafkametricsreceiver/broker_scraper.go | 2 +- .../kafkametricsreceiver/documentation.md | 20 ++--- .../internal/metadata/generated_config.go | 20 ++--- .../internal/metadata/generated_metrics.go | 82 +++++++++---------- .../metadata/generated_metrics_test.go | 82 +++++++++---------- .../internal/metadata/testdata/config.yaml | 40 ++++----- receiver/kafkametricsreceiver/metadata.yaml | 22 ++--- .../testdata/integration/expected.yaml | 2 +- 8 files changed, 135 insertions(+), 135 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index f21d6339386b..c8c370288435 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -132,7 +132,7 @@ func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { // kafka.brokers is deprecated. This should be removed in a future release. s.mb.RecordKafkaBrokersDataPoint(now, brokerCount) - // kafka.brokers.count should replace kafka.brokers. + // messaging.kafka.brokers.count should replace kafka.brokers. s.mb.RecordKafkaBrokersCountDataPoint(now, brokerCount) return s.mb.Emit(), nil diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 77ab774de826..bc3607c21ee1 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -180,7 +180,7 @@ metrics: enabled: true ``` -### kafka.brokers.consumer_fetch_rate +### messaging.kafka.brokers.consumer_fetch_rate Average consumer fetch Rate @@ -194,7 +194,7 @@ Average consumer fetch Rate | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.count +### messaging.kafka.brokers.count Number of brokers in the cluster. @@ -202,7 +202,7 @@ Number of brokers in the cluster. | ---- | ----------- | ---------- | ----------------------- | --------- | | {brokers} | Sum | Int | Cumulative | false | -### kafka.brokers.incoming_byte_rate +### messaging.kafka.brokers.incoming_byte_rate Average tncoming Byte Rate in bytes/second @@ -216,7 +216,7 @@ Average tncoming Byte Rate in bytes/second | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.outgoing_byte_rate +### messaging.kafka.brokers.outgoing_byte_rate Average outgoing Byte Rate in bytes/second. @@ -230,7 +230,7 @@ Average outgoing Byte Rate in bytes/second. | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.request_latency +### messaging.kafka.brokers.request_latency Average request latency in ms @@ -244,7 +244,7 @@ Average request latency in ms | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.request_rate +### messaging.kafka.brokers.request_rate Average request rate per second. @@ -258,7 +258,7 @@ Average request rate per second. | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.request_size +### messaging.kafka.brokers.request_size Average request size in bytes @@ -272,7 +272,7 @@ Average request size in bytes | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.requests_in_flight +### messaging.kafka.brokers.requests_in_flight Requests in flight @@ -286,7 +286,7 @@ Requests in flight | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.response_rate +### messaging.kafka.brokers.response_rate Average response rate per second @@ -300,7 +300,7 @@ Average response rate per second | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### kafka.brokers.response_size +### messaging.kafka.brokers.response_size Average response size in bytes diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index 5abf60966aa0..c8350898aba6 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -26,16 +26,16 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for kafkametrics metrics. type MetricsConfig struct { KafkaBrokers MetricConfig `mapstructure:"kafka.brokers"` - KafkaBrokersConsumerFetchRate MetricConfig `mapstructure:"kafka.brokers.consumer_fetch_rate"` - KafkaBrokersCount MetricConfig `mapstructure:"kafka.brokers.count"` - KafkaBrokersIncomingByteRate MetricConfig `mapstructure:"kafka.brokers.incoming_byte_rate"` - KafkaBrokersOutgoingByteRate MetricConfig `mapstructure:"kafka.brokers.outgoing_byte_rate"` - KafkaBrokersRequestLatency MetricConfig `mapstructure:"kafka.brokers.request_latency"` - KafkaBrokersRequestRate MetricConfig `mapstructure:"kafka.brokers.request_rate"` - KafkaBrokersRequestSize MetricConfig `mapstructure:"kafka.brokers.request_size"` - KafkaBrokersRequestsInFlight MetricConfig `mapstructure:"kafka.brokers.requests_in_flight"` - KafkaBrokersResponseRate MetricConfig `mapstructure:"kafka.brokers.response_rate"` - KafkaBrokersResponseSize MetricConfig `mapstructure:"kafka.brokers.response_size"` + KafkaBrokersConsumerFetchRate MetricConfig `mapstructure:"messaging.kafka.brokers.consumer_fetch_rate"` + KafkaBrokersCount MetricConfig `mapstructure:"messaging.kafka.brokers.count"` + KafkaBrokersIncomingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.incoming_byte_rate"` + KafkaBrokersOutgoingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.outgoing_byte_rate"` + KafkaBrokersRequestLatency MetricConfig `mapstructure:"messaging.kafka.brokers.request_latency"` + KafkaBrokersRequestRate MetricConfig `mapstructure:"messaging.kafka.brokers.request_rate"` + KafkaBrokersRequestSize MetricConfig `mapstructure:"messaging.kafka.brokers.request_size"` + KafkaBrokersRequestsInFlight MetricConfig `mapstructure:"messaging.kafka.brokers.requests_in_flight"` + KafkaBrokersResponseRate MetricConfig `mapstructure:"messaging.kafka.brokers.response_rate"` + KafkaBrokersResponseSize MetricConfig `mapstructure:"messaging.kafka.brokers.response_size"` KafkaConsumerGroupLag MetricConfig `mapstructure:"kafka.consumer_group.lag"` KafkaConsumerGroupLagSum MetricConfig `mapstructure:"kafka.consumer_group.lag_sum"` KafkaConsumerGroupMembers MetricConfig `mapstructure:"kafka.consumer_group.members"` diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 54a2d28640e8..0f0cc9f1142b 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -66,9 +66,9 @@ type metricKafkaBrokersConsumerFetchRate struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.consumer_fetch_rate metric with initial data. +// init fills messaging.kafka.brokers.consumer_fetch_rate metric with initial data. func (m *metricKafkaBrokersConsumerFetchRate) init() { - m.data.SetName("kafka.brokers.consumer_fetch_rate") + m.data.SetName("messaging.kafka.brokers.consumer_fetch_rate") m.data.SetDescription("Average consumer fetch Rate") m.data.SetUnit("{fetches}/s") m.data.SetEmptyGauge() @@ -117,9 +117,9 @@ type metricKafkaBrokersCount struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.count metric with initial data. +// init fills messaging.kafka.brokers.count metric with initial data. func (m *metricKafkaBrokersCount) init() { - m.data.SetName("kafka.brokers.count") + m.data.SetName("messaging.kafka.brokers.count") m.data.SetDescription("Number of brokers in the cluster.") m.data.SetUnit("{brokers}") m.data.SetEmptySum() @@ -168,9 +168,9 @@ type metricKafkaBrokersIncomingByteRate struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.incoming_byte_rate metric with initial data. +// init fills messaging.kafka.brokers.incoming_byte_rate metric with initial data. func (m *metricKafkaBrokersIncomingByteRate) init() { - m.data.SetName("kafka.brokers.incoming_byte_rate") + m.data.SetName("messaging.kafka.brokers.incoming_byte_rate") m.data.SetDescription("Average tncoming Byte Rate in bytes/second") m.data.SetUnit("1") m.data.SetEmptyGauge() @@ -219,9 +219,9 @@ type metricKafkaBrokersOutgoingByteRate struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.outgoing_byte_rate metric with initial data. +// init fills messaging.kafka.brokers.outgoing_byte_rate metric with initial data. func (m *metricKafkaBrokersOutgoingByteRate) init() { - m.data.SetName("kafka.brokers.outgoing_byte_rate") + m.data.SetName("messaging.kafka.brokers.outgoing_byte_rate") m.data.SetDescription("Average outgoing Byte Rate in bytes/second.") m.data.SetUnit("1") m.data.SetEmptyGauge() @@ -270,9 +270,9 @@ type metricKafkaBrokersRequestLatency struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.request_latency metric with initial data. +// init fills messaging.kafka.brokers.request_latency metric with initial data. func (m *metricKafkaBrokersRequestLatency) init() { - m.data.SetName("kafka.brokers.request_latency") + m.data.SetName("messaging.kafka.brokers.request_latency") m.data.SetDescription("Average request latency in ms") m.data.SetUnit("ms") m.data.SetEmptyGauge() @@ -321,9 +321,9 @@ type metricKafkaBrokersRequestRate struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.request_rate metric with initial data. +// init fills messaging.kafka.brokers.request_rate metric with initial data. func (m *metricKafkaBrokersRequestRate) init() { - m.data.SetName("kafka.brokers.request_rate") + m.data.SetName("messaging.kafka.brokers.request_rate") m.data.SetDescription("Average request rate per second.") m.data.SetUnit("{requests}/s") m.data.SetEmptyGauge() @@ -372,9 +372,9 @@ type metricKafkaBrokersRequestSize struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.request_size metric with initial data. +// init fills messaging.kafka.brokers.request_size metric with initial data. func (m *metricKafkaBrokersRequestSize) init() { - m.data.SetName("kafka.brokers.request_size") + m.data.SetName("messaging.kafka.brokers.request_size") m.data.SetDescription("Average request size in bytes") m.data.SetUnit("By") m.data.SetEmptyGauge() @@ -423,9 +423,9 @@ type metricKafkaBrokersRequestsInFlight struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.requests_in_flight metric with initial data. +// init fills messaging.kafka.brokers.requests_in_flight metric with initial data. func (m *metricKafkaBrokersRequestsInFlight) init() { - m.data.SetName("kafka.brokers.requests_in_flight") + m.data.SetName("messaging.kafka.brokers.requests_in_flight") m.data.SetDescription("Requests in flight") m.data.SetUnit("{requests}") m.data.SetEmptyGauge() @@ -474,9 +474,9 @@ type metricKafkaBrokersResponseRate struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.response_rate metric with initial data. +// init fills messaging.kafka.brokers.response_rate metric with initial data. func (m *metricKafkaBrokersResponseRate) init() { - m.data.SetName("kafka.brokers.response_rate") + m.data.SetName("messaging.kafka.brokers.response_rate") m.data.SetDescription("Average response rate per second") m.data.SetUnit("{response}/s") m.data.SetEmptyGauge() @@ -525,9 +525,9 @@ type metricKafkaBrokersResponseSize struct { capacity int // max observed number of data points added to the metric. } -// init fills kafka.brokers.response_size metric with initial data. +// init fills messaging.kafka.brokers.response_size metric with initial data. func (m *metricKafkaBrokersResponseSize) init() { - m.data.SetName("kafka.brokers.response_size") + m.data.SetName("messaging.kafka.brokers.response_size") m.data.SetDescription("Average response size in bytes") m.data.SetUnit("By") m.data.SetEmptyGauge() @@ -1140,37 +1140,37 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { if mbc.Metrics.KafkaBrokers.Enabled { - settings.Logger.Warn("[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count`") + settings.Logger.Warn("[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.brokers.count`") } if !mbc.Metrics.KafkaBrokersConsumerFetchRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersCount.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.count`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.count`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersIncomingByteRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersOutgoingByteRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersRequestLatency.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersRequestRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersRequestSize.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_size`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_size`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersRequestsInFlight.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersResponseRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.") } if !mbc.Metrics.KafkaBrokersResponseSize.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_size`: This metric will be enabled by default in the next versions.") + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_size`: This metric will be enabled by default in the next versions.") } mb := &MetricsBuilder{ startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -1299,52 +1299,52 @@ func (mb *MetricsBuilder) RecordKafkaBrokersDataPoint(ts pcommon.Timestamp, val mb.metricKafkaBrokers.recordDataPoint(mb.startTime, ts, val) } -// RecordKafkaBrokersConsumerFetchRateDataPoint adds a data point to kafka.brokers.consumer_fetch_rate metric. +// RecordKafkaBrokersConsumerFetchRateDataPoint adds a data point to messaging.kafka.brokers.consumer_fetch_rate metric. func (mb *MetricsBuilder) RecordKafkaBrokersConsumerFetchRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersConsumerFetchRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersCountDataPoint adds a data point to kafka.brokers.count metric. +// RecordKafkaBrokersCountDataPoint adds a data point to messaging.kafka.brokers.count metric. func (mb *MetricsBuilder) RecordKafkaBrokersCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricKafkaBrokersCount.recordDataPoint(mb.startTime, ts, val) } -// RecordKafkaBrokersIncomingByteRateDataPoint adds a data point to kafka.brokers.incoming_byte_rate metric. +// RecordKafkaBrokersIncomingByteRateDataPoint adds a data point to messaging.kafka.brokers.incoming_byte_rate metric. func (mb *MetricsBuilder) RecordKafkaBrokersIncomingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersIncomingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersOutgoingByteRateDataPoint adds a data point to kafka.brokers.outgoing_byte_rate metric. +// RecordKafkaBrokersOutgoingByteRateDataPoint adds a data point to messaging.kafka.brokers.outgoing_byte_rate metric. func (mb *MetricsBuilder) RecordKafkaBrokersOutgoingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersOutgoingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersRequestLatencyDataPoint adds a data point to kafka.brokers.request_latency metric. +// RecordKafkaBrokersRequestLatencyDataPoint adds a data point to messaging.kafka.brokers.request_latency metric. func (mb *MetricsBuilder) RecordKafkaBrokersRequestLatencyDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersRequestLatency.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersRequestRateDataPoint adds a data point to kafka.brokers.request_rate metric. +// RecordKafkaBrokersRequestRateDataPoint adds a data point to messaging.kafka.brokers.request_rate metric. func (mb *MetricsBuilder) RecordKafkaBrokersRequestRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersRequestRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersRequestSizeDataPoint adds a data point to kafka.brokers.request_size metric. +// RecordKafkaBrokersRequestSizeDataPoint adds a data point to messaging.kafka.brokers.request_size metric. func (mb *MetricsBuilder) RecordKafkaBrokersRequestSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersRequestSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersRequestsInFlightDataPoint adds a data point to kafka.brokers.requests_in_flight metric. +// RecordKafkaBrokersRequestsInFlightDataPoint adds a data point to messaging.kafka.brokers.requests_in_flight metric. func (mb *MetricsBuilder) RecordKafkaBrokersRequestsInFlightDataPoint(ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { mb.metricKafkaBrokersRequestsInFlight.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersResponseRateDataPoint adds a data point to kafka.brokers.response_rate metric. +// RecordKafkaBrokersResponseRateDataPoint adds a data point to messaging.kafka.brokers.response_rate metric. func (mb *MetricsBuilder) RecordKafkaBrokersResponseRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersResponseRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordKafkaBrokersResponseSizeDataPoint adds a data point to kafka.brokers.response_size metric. +// RecordKafkaBrokersResponseSizeDataPoint adds a data point to messaging.kafka.brokers.response_size metric. func (mb *MetricsBuilder) RecordKafkaBrokersResponseSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricKafkaBrokersResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index f935170c15f4..c486894f54d5 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -50,47 +50,47 @@ func TestMetricsBuilder(t *testing.T) { expectedWarnings := 0 if test.configSet == testSetDefault || test.configSet == testSetAll { - assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count`", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.brokers.count`", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.request_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `kafka.brokers.response_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } assert.Equal(t, expectedWarnings, observedLogs.Len()) @@ -207,9 +207,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "kafka.brokers.consumer_fetch_rate": - assert.False(t, validatedMetrics["kafka.brokers.consumer_fetch_rate"], "Found a duplicate in the metrics slice: kafka.brokers.consumer_fetch_rate") - validatedMetrics["kafka.brokers.consumer_fetch_rate"] = true + case "messaging.kafka.brokers.consumer_fetch_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.consumer_fetch_rate") + validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average consumer fetch Rate", ms.At(i).Description()) @@ -222,9 +222,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.count": - assert.False(t, validatedMetrics["kafka.brokers.count"], "Found a duplicate in the metrics slice: kafka.brokers.count") - validatedMetrics["kafka.brokers.count"] = true + case "messaging.kafka.brokers.count": + assert.False(t, validatedMetrics["messaging.kafka.brokers.count"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.count") + validatedMetrics["messaging.kafka.brokers.count"] = true assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of brokers in the cluster.", ms.At(i).Description()) @@ -236,9 +236,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "kafka.brokers.incoming_byte_rate": - assert.False(t, validatedMetrics["kafka.brokers.incoming_byte_rate"], "Found a duplicate in the metrics slice: kafka.brokers.incoming_byte_rate") - validatedMetrics["kafka.brokers.incoming_byte_rate"] = true + case "messaging.kafka.brokers.incoming_byte_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.incoming_byte_rate") + validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average tncoming Byte Rate in bytes/second", ms.At(i).Description()) @@ -251,9 +251,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.outgoing_byte_rate": - assert.False(t, validatedMetrics["kafka.brokers.outgoing_byte_rate"], "Found a duplicate in the metrics slice: kafka.brokers.outgoing_byte_rate") - validatedMetrics["kafka.brokers.outgoing_byte_rate"] = true + case "messaging.kafka.brokers.outgoing_byte_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.outgoing_byte_rate") + validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average outgoing Byte Rate in bytes/second.", ms.At(i).Description()) @@ -266,9 +266,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.request_latency": - assert.False(t, validatedMetrics["kafka.brokers.request_latency"], "Found a duplicate in the metrics slice: kafka.brokers.request_latency") - validatedMetrics["kafka.brokers.request_latency"] = true + case "messaging.kafka.brokers.request_latency": + assert.False(t, validatedMetrics["messaging.kafka.brokers.request_latency"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_latency") + validatedMetrics["messaging.kafka.brokers.request_latency"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average request latency in ms", ms.At(i).Description()) @@ -281,9 +281,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.request_rate": - assert.False(t, validatedMetrics["kafka.brokers.request_rate"], "Found a duplicate in the metrics slice: kafka.brokers.request_rate") - validatedMetrics["kafka.brokers.request_rate"] = true + case "messaging.kafka.brokers.request_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.request_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_rate") + validatedMetrics["messaging.kafka.brokers.request_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average request rate per second.", ms.At(i).Description()) @@ -296,9 +296,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.request_size": - assert.False(t, validatedMetrics["kafka.brokers.request_size"], "Found a duplicate in the metrics slice: kafka.brokers.request_size") - validatedMetrics["kafka.brokers.request_size"] = true + case "messaging.kafka.brokers.request_size": + assert.False(t, validatedMetrics["messaging.kafka.brokers.request_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_size") + validatedMetrics["messaging.kafka.brokers.request_size"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average request size in bytes", ms.At(i).Description()) @@ -311,9 +311,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.requests_in_flight": - assert.False(t, validatedMetrics["kafka.brokers.requests_in_flight"], "Found a duplicate in the metrics slice: kafka.brokers.requests_in_flight") - validatedMetrics["kafka.brokers.requests_in_flight"] = true + case "messaging.kafka.brokers.requests_in_flight": + assert.False(t, validatedMetrics["messaging.kafka.brokers.requests_in_flight"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.requests_in_flight") + validatedMetrics["messaging.kafka.brokers.requests_in_flight"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Requests in flight", ms.At(i).Description()) @@ -326,9 +326,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.response_rate": - assert.False(t, validatedMetrics["kafka.brokers.response_rate"], "Found a duplicate in the metrics slice: kafka.brokers.response_rate") - validatedMetrics["kafka.brokers.response_rate"] = true + case "messaging.kafka.brokers.response_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.response_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_rate") + validatedMetrics["messaging.kafka.brokers.response_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average response rate per second", ms.At(i).Description()) @@ -341,9 +341,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "kafka.brokers.response_size": - assert.False(t, validatedMetrics["kafka.brokers.response_size"], "Found a duplicate in the metrics slice: kafka.brokers.response_size") - validatedMetrics["kafka.brokers.response_size"] = true + case "messaging.kafka.brokers.response_size": + assert.False(t, validatedMetrics["messaging.kafka.brokers.response_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_size") + validatedMetrics["messaging.kafka.brokers.response_size"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average response size in bytes", ms.At(i).Description()) diff --git a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml index 675b183ca2f0..efe5c647fcd4 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml @@ -3,25 +3,25 @@ all_set: metrics: kafka.brokers: enabled: true - kafka.brokers.consumer_fetch_rate: + messaging.kafka.brokers.consumer_fetch_rate: enabled: true - kafka.brokers.count: + messaging.kafka.brokers.count: enabled: true - kafka.brokers.incoming_byte_rate: + messaging.kafka.brokers.incoming_byte_rate: enabled: true - kafka.brokers.outgoing_byte_rate: + messaging.kafka.brokers.outgoing_byte_rate: enabled: true - kafka.brokers.request_latency: + messaging.kafka.brokers.request_latency: enabled: true - kafka.brokers.request_rate: + messaging.kafka.brokers.request_rate: enabled: true - kafka.brokers.request_size: + messaging.kafka.brokers.request_size: enabled: true - kafka.brokers.requests_in_flight: + messaging.kafka.brokers.requests_in_flight: enabled: true - kafka.brokers.response_rate: + messaging.kafka.brokers.response_rate: enabled: true - kafka.brokers.response_size: + messaging.kafka.brokers.response_size: enabled: true kafka.consumer_group.lag: enabled: true @@ -47,25 +47,25 @@ none_set: metrics: kafka.brokers: enabled: false - kafka.brokers.consumer_fetch_rate: + messaging.kafka.brokers.consumer_fetch_rate: enabled: false - kafka.brokers.count: + messaging.kafka.brokers.count: enabled: false - kafka.brokers.incoming_byte_rate: + messaging.kafka.brokers.incoming_byte_rate: enabled: false - kafka.brokers.outgoing_byte_rate: + messaging.kafka.brokers.outgoing_byte_rate: enabled: false - kafka.brokers.request_latency: + messaging.kafka.brokers.request_latency: enabled: false - kafka.brokers.request_rate: + messaging.kafka.brokers.request_rate: enabled: false - kafka.brokers.request_size: + messaging.kafka.brokers.request_size: enabled: false - kafka.brokers.requests_in_flight: + messaging.kafka.brokers.requests_in_flight: enabled: false - kafka.brokers.response_rate: + messaging.kafka.brokers.response_rate: enabled: false - kafka.brokers.response_size: + messaging.kafka.brokers.response_size: enabled: false kafka.consumer_group.lag: enabled: false diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 773dd8328557..69572f91faa1 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -31,8 +31,8 @@ metrics: gauge: value_type: int warnings: - if_enabled: The metric is deprecated and will be removed. Use `kafka.brokers.count` - kafka.brokers.count: + if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.brokers.count` + messaging.kafka.brokers.count: enabled: false description: Number of brokers in the cluster. unit: "{brokers}" @@ -42,7 +42,7 @@ metrics: aggregation: cumulative warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.consumer_fetch_rate: + messaging.kafka.brokers.consumer_fetch_rate: enabled: false description: Average consumer fetch Rate unit: "{fetches}/s" @@ -51,7 +51,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.incoming_byte_rate: + messaging.kafka.brokers.incoming_byte_rate: enabled: false description: Average tncoming Byte Rate in bytes/second unit: 1 @@ -60,7 +60,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.outgoing_byte_rate: + messaging.kafka.brokers.outgoing_byte_rate: enabled: false description: Average outgoing Byte Rate in bytes/second. unit: 1 @@ -69,7 +69,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.request_latency: + messaging.kafka.brokers.request_latency: enabled: false description: Average request latency in ms unit: "ms" @@ -78,7 +78,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.response_rate: + messaging.kafka.brokers.response_rate: enabled: false description: Average response rate per second unit: "{response}/s" @@ -87,7 +87,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.response_size: + messaging.kafka.brokers.response_size: enabled: false description: Average response size in bytes unit: "By" @@ -96,7 +96,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.request_rate: + messaging.kafka.brokers.request_rate: enabled: false description: Average request rate per second. unit: "{requests}/s" @@ -105,7 +105,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.request_size: + messaging.kafka.brokers.request_size: enabled: false description: Average request size in bytes unit: "By" @@ -114,7 +114,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - kafka.brokers.requests_in_flight: + messaging.kafka.brokers.requests_in_flight: enabled: false description: Requests in flight unit: "{requests}" diff --git a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml index cff4331032c4..f332dc2f569d 100644 --- a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml +++ b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml @@ -3,7 +3,7 @@ resourceMetrics: scopeMetrics: - metrics: - description: Number of brokers in the cluster. - sum: + Gauge: aggregationTemporality: 2 dataPoints: - asInt: "1" From 075d14954df9529d08bf4593ba7ca2ee1dd329f6 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Mon, 31 Jul 2023 14:40:32 -0700 Subject: [PATCH 10/36] fix: ran make generate again --- .../internal/metadata/generated_config.go | 102 +- .../metadata/generated_config_test.go | 84 +- .../internal/metadata/generated_metrics.go | 902 +++++++++--------- .../metadata/generated_metrics_test.go | 358 +++---- .../internal/metadata/testdata/config.yaml | 66 +- .../testdata/integration/expected.yaml | 2 +- 6 files changed, 757 insertions(+), 757 deletions(-) diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index c8350898aba6..e679464972f0 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -25,27 +25,27 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for kafkametrics metrics. type MetricsConfig struct { - KafkaBrokers MetricConfig `mapstructure:"kafka.brokers"` - KafkaBrokersConsumerFetchRate MetricConfig `mapstructure:"messaging.kafka.brokers.consumer_fetch_rate"` - KafkaBrokersCount MetricConfig `mapstructure:"messaging.kafka.brokers.count"` - KafkaBrokersIncomingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.incoming_byte_rate"` - KafkaBrokersOutgoingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.outgoing_byte_rate"` - KafkaBrokersRequestLatency MetricConfig `mapstructure:"messaging.kafka.brokers.request_latency"` - KafkaBrokersRequestRate MetricConfig `mapstructure:"messaging.kafka.brokers.request_rate"` - KafkaBrokersRequestSize MetricConfig `mapstructure:"messaging.kafka.brokers.request_size"` - KafkaBrokersRequestsInFlight MetricConfig `mapstructure:"messaging.kafka.brokers.requests_in_flight"` - KafkaBrokersResponseRate MetricConfig `mapstructure:"messaging.kafka.brokers.response_rate"` - KafkaBrokersResponseSize MetricConfig `mapstructure:"messaging.kafka.brokers.response_size"` - KafkaConsumerGroupLag MetricConfig `mapstructure:"kafka.consumer_group.lag"` - KafkaConsumerGroupLagSum MetricConfig `mapstructure:"kafka.consumer_group.lag_sum"` - KafkaConsumerGroupMembers MetricConfig `mapstructure:"kafka.consumer_group.members"` - KafkaConsumerGroupOffset MetricConfig `mapstructure:"kafka.consumer_group.offset"` - KafkaConsumerGroupOffsetSum MetricConfig `mapstructure:"kafka.consumer_group.offset_sum"` - KafkaPartitionCurrentOffset MetricConfig `mapstructure:"kafka.partition.current_offset"` - KafkaPartitionOldestOffset MetricConfig `mapstructure:"kafka.partition.oldest_offset"` - KafkaPartitionReplicas MetricConfig `mapstructure:"kafka.partition.replicas"` - KafkaPartitionReplicasInSync MetricConfig `mapstructure:"kafka.partition.replicas_in_sync"` - KafkaTopicPartitions MetricConfig `mapstructure:"kafka.topic.partitions"` + KafkaBrokers MetricConfig `mapstructure:"kafka.brokers"` + KafkaConsumerGroupLag MetricConfig `mapstructure:"kafka.consumer_group.lag"` + KafkaConsumerGroupLagSum MetricConfig `mapstructure:"kafka.consumer_group.lag_sum"` + KafkaConsumerGroupMembers MetricConfig `mapstructure:"kafka.consumer_group.members"` + KafkaConsumerGroupOffset MetricConfig `mapstructure:"kafka.consumer_group.offset"` + KafkaConsumerGroupOffsetSum MetricConfig `mapstructure:"kafka.consumer_group.offset_sum"` + KafkaPartitionCurrentOffset MetricConfig `mapstructure:"kafka.partition.current_offset"` + KafkaPartitionOldestOffset MetricConfig `mapstructure:"kafka.partition.oldest_offset"` + KafkaPartitionReplicas MetricConfig `mapstructure:"kafka.partition.replicas"` + KafkaPartitionReplicasInSync MetricConfig `mapstructure:"kafka.partition.replicas_in_sync"` + KafkaTopicPartitions MetricConfig `mapstructure:"kafka.topic.partitions"` + MessagingKafkaBrokersConsumerFetchRate MetricConfig `mapstructure:"messaging.kafka.brokers.consumer_fetch_rate"` + MessagingKafkaBrokersCount MetricConfig `mapstructure:"messaging.kafka.brokers.count"` + MessagingKafkaBrokersIncomingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.incoming_byte_rate"` + MessagingKafkaBrokersOutgoingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.outgoing_byte_rate"` + MessagingKafkaBrokersRequestLatency MetricConfig `mapstructure:"messaging.kafka.brokers.request_latency"` + MessagingKafkaBrokersRequestRate MetricConfig `mapstructure:"messaging.kafka.brokers.request_rate"` + MessagingKafkaBrokersRequestSize MetricConfig `mapstructure:"messaging.kafka.brokers.request_size"` + MessagingKafkaBrokersRequestsInFlight MetricConfig `mapstructure:"messaging.kafka.brokers.requests_in_flight"` + MessagingKafkaBrokersResponseRate MetricConfig `mapstructure:"messaging.kafka.brokers.response_rate"` + MessagingKafkaBrokersResponseSize MetricConfig `mapstructure:"messaging.kafka.brokers.response_size"` } func DefaultMetricsConfig() MetricsConfig { @@ -53,36 +53,6 @@ func DefaultMetricsConfig() MetricsConfig { KafkaBrokers: MetricConfig{ Enabled: true, }, - KafkaBrokersConsumerFetchRate: MetricConfig{ - Enabled: false, - }, - KafkaBrokersCount: MetricConfig{ - Enabled: false, - }, - KafkaBrokersIncomingByteRate: MetricConfig{ - Enabled: false, - }, - KafkaBrokersOutgoingByteRate: MetricConfig{ - Enabled: false, - }, - KafkaBrokersRequestLatency: MetricConfig{ - Enabled: false, - }, - KafkaBrokersRequestRate: MetricConfig{ - Enabled: false, - }, - KafkaBrokersRequestSize: MetricConfig{ - Enabled: false, - }, - KafkaBrokersRequestsInFlight: MetricConfig{ - Enabled: false, - }, - KafkaBrokersResponseRate: MetricConfig{ - Enabled: false, - }, - KafkaBrokersResponseSize: MetricConfig{ - Enabled: false, - }, KafkaConsumerGroupLag: MetricConfig{ Enabled: true, }, @@ -113,6 +83,36 @@ func DefaultMetricsConfig() MetricsConfig { KafkaTopicPartitions: MetricConfig{ Enabled: true, }, + MessagingKafkaBrokersConsumerFetchRate: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersCount: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersIncomingByteRate: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersOutgoingByteRate: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersRequestLatency: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersRequestRate: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersRequestSize: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersRequestsInFlight: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersResponseRate: MetricConfig{ + Enabled: false, + }, + MessagingKafkaBrokersResponseSize: MetricConfig{ + Enabled: false, + }, } } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go index 957b4c475293..b43b3d6480fd 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go @@ -26,27 +26,27 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - KafkaBrokers: MetricConfig{Enabled: true}, - KafkaBrokersConsumerFetchRate: MetricConfig{Enabled: true}, - KafkaBrokersCount: MetricConfig{Enabled: true}, - KafkaBrokersIncomingByteRate: MetricConfig{Enabled: true}, - KafkaBrokersOutgoingByteRate: MetricConfig{Enabled: true}, - KafkaBrokersRequestLatency: MetricConfig{Enabled: true}, - KafkaBrokersRequestRate: MetricConfig{Enabled: true}, - KafkaBrokersRequestSize: MetricConfig{Enabled: true}, - KafkaBrokersRequestsInFlight: MetricConfig{Enabled: true}, - KafkaBrokersResponseRate: MetricConfig{Enabled: true}, - KafkaBrokersResponseSize: MetricConfig{Enabled: true}, - KafkaConsumerGroupLag: MetricConfig{Enabled: true}, - KafkaConsumerGroupLagSum: MetricConfig{Enabled: true}, - KafkaConsumerGroupMembers: MetricConfig{Enabled: true}, - KafkaConsumerGroupOffset: MetricConfig{Enabled: true}, - KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: true}, - KafkaPartitionCurrentOffset: MetricConfig{Enabled: true}, - KafkaPartitionOldestOffset: MetricConfig{Enabled: true}, - KafkaPartitionReplicas: MetricConfig{Enabled: true}, - KafkaPartitionReplicasInSync: MetricConfig{Enabled: true}, - KafkaTopicPartitions: MetricConfig{Enabled: true}, + KafkaBrokers: MetricConfig{Enabled: true}, + KafkaConsumerGroupLag: MetricConfig{Enabled: true}, + KafkaConsumerGroupLagSum: MetricConfig{Enabled: true}, + KafkaConsumerGroupMembers: MetricConfig{Enabled: true}, + KafkaConsumerGroupOffset: MetricConfig{Enabled: true}, + KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: true}, + KafkaPartitionCurrentOffset: MetricConfig{Enabled: true}, + KafkaPartitionOldestOffset: MetricConfig{Enabled: true}, + KafkaPartitionReplicas: MetricConfig{Enabled: true}, + KafkaPartitionReplicasInSync: MetricConfig{Enabled: true}, + KafkaTopicPartitions: MetricConfig{Enabled: true}, + MessagingKafkaBrokersConsumerFetchRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokersCount: MetricConfig{Enabled: true}, + MessagingKafkaBrokersIncomingByteRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokersOutgoingByteRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokersRequestLatency: MetricConfig{Enabled: true}, + MessagingKafkaBrokersRequestRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokersRequestSize: MetricConfig{Enabled: true}, + MessagingKafkaBrokersRequestsInFlight: MetricConfig{Enabled: true}, + MessagingKafkaBrokersResponseRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokersResponseSize: MetricConfig{Enabled: true}, }, }, }, @@ -54,27 +54,27 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - KafkaBrokers: MetricConfig{Enabled: false}, - KafkaBrokersConsumerFetchRate: MetricConfig{Enabled: false}, - KafkaBrokersCount: MetricConfig{Enabled: false}, - KafkaBrokersIncomingByteRate: MetricConfig{Enabled: false}, - KafkaBrokersOutgoingByteRate: MetricConfig{Enabled: false}, - KafkaBrokersRequestLatency: MetricConfig{Enabled: false}, - KafkaBrokersRequestRate: MetricConfig{Enabled: false}, - KafkaBrokersRequestSize: MetricConfig{Enabled: false}, - KafkaBrokersRequestsInFlight: MetricConfig{Enabled: false}, - KafkaBrokersResponseRate: MetricConfig{Enabled: false}, - KafkaBrokersResponseSize: MetricConfig{Enabled: false}, - KafkaConsumerGroupLag: MetricConfig{Enabled: false}, - KafkaConsumerGroupLagSum: MetricConfig{Enabled: false}, - KafkaConsumerGroupMembers: MetricConfig{Enabled: false}, - KafkaConsumerGroupOffset: MetricConfig{Enabled: false}, - KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: false}, - KafkaPartitionCurrentOffset: MetricConfig{Enabled: false}, - KafkaPartitionOldestOffset: MetricConfig{Enabled: false}, - KafkaPartitionReplicas: MetricConfig{Enabled: false}, - KafkaPartitionReplicasInSync: MetricConfig{Enabled: false}, - KafkaTopicPartitions: MetricConfig{Enabled: false}, + KafkaBrokers: MetricConfig{Enabled: false}, + KafkaConsumerGroupLag: MetricConfig{Enabled: false}, + KafkaConsumerGroupLagSum: MetricConfig{Enabled: false}, + KafkaConsumerGroupMembers: MetricConfig{Enabled: false}, + KafkaConsumerGroupOffset: MetricConfig{Enabled: false}, + KafkaConsumerGroupOffsetSum: MetricConfig{Enabled: false}, + KafkaPartitionCurrentOffset: MetricConfig{Enabled: false}, + KafkaPartitionOldestOffset: MetricConfig{Enabled: false}, + KafkaPartitionReplicas: MetricConfig{Enabled: false}, + KafkaPartitionReplicasInSync: MetricConfig{Enabled: false}, + KafkaTopicPartitions: MetricConfig{Enabled: false}, + MessagingKafkaBrokersConsumerFetchRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokersCount: MetricConfig{Enabled: false}, + MessagingKafkaBrokersIncomingByteRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokersOutgoingByteRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokersRequestLatency: MetricConfig{Enabled: false}, + MessagingKafkaBrokersRequestRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokersRequestSize: MetricConfig{Enabled: false}, + MessagingKafkaBrokersRequestsInFlight: MetricConfig{Enabled: false}, + MessagingKafkaBrokersResponseRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokersResponseSize: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 0f0cc9f1142b..6d5edec4d36c 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -60,41 +60,43 @@ func newMetricKafkaBrokers(cfg MetricConfig) metricKafkaBrokers { return m } -type metricKafkaBrokersConsumerFetchRate struct { +type metricKafkaConsumerGroupLag struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.consumer_fetch_rate metric with initial data. -func (m *metricKafkaBrokersConsumerFetchRate) init() { - m.data.SetName("messaging.kafka.brokers.consumer_fetch_rate") - m.data.SetDescription("Average consumer fetch Rate") - m.data.SetUnit("{fetches}/s") +// init fills kafka.consumer_group.lag metric with initial data. +func (m *metricKafkaConsumerGroupLag) init() { + m.data.SetName("kafka.consumer_group.lag") + m.data.SetDescription("Current approximate lag of consumer group at partition of topic") + m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersConsumerFetchRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaConsumerGroupLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("group", groupAttributeValue) + dp.Attributes().PutStr("topic", topicAttributeValue) + dp.Attributes().PutInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersConsumerFetchRate) updateCapacity() { +func (m *metricKafkaConsumerGroupLag) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersConsumerFetchRate) emit(metrics pmetric.MetricSlice) { +func (m *metricKafkaConsumerGroupLag) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -102,8 +104,8 @@ func (m *metricKafkaBrokersConsumerFetchRate) emit(metrics pmetric.MetricSlice) } } -func newMetricKafkaBrokersConsumerFetchRate(cfg MetricConfig) metricKafkaBrokersConsumerFetchRate { - m := metricKafkaBrokersConsumerFetchRate{config: cfg} +func newMetricKafkaConsumerGroupLag(cfg MetricConfig) metricKafkaConsumerGroupLag { + m := metricKafkaConsumerGroupLag{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -111,50 +113,51 @@ func newMetricKafkaBrokersConsumerFetchRate(cfg MetricConfig) metricKafkaBrokers return m } -type metricKafkaBrokersCount struct { +type metricKafkaConsumerGroupLagSum struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.count metric with initial data. -func (m *metricKafkaBrokersCount) init() { - m.data.SetName("messaging.kafka.brokers.count") - m.data.SetDescription("Number of brokers in the cluster.") - m.data.SetUnit("{brokers}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +// init fills kafka.consumer_group.lag_sum metric with initial data. +func (m *metricKafkaConsumerGroupLagSum) init() { + m.data.SetName("kafka.consumer_group.lag_sum") + m.data.SetDescription("Current approximate sum of consumer group lag across all partitions of topic") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { +func (m *metricKafkaConsumerGroupLagSum) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Sum().DataPoints().AppendEmpty() + dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) + dp.Attributes().PutStr("group", groupAttributeValue) + dp.Attributes().PutStr("topic", topicAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersCount) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() +func (m *metricKafkaConsumerGroupLagSum) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersCount) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { +func (m *metricKafkaConsumerGroupLagSum) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaBrokersCount(cfg MetricConfig) metricKafkaBrokersCount { - m := metricKafkaBrokersCount{config: cfg} +func newMetricKafkaConsumerGroupLagSum(cfg MetricConfig) metricKafkaConsumerGroupLagSum { + m := metricKafkaConsumerGroupLagSum{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -162,50 +165,52 @@ func newMetricKafkaBrokersCount(cfg MetricConfig) metricKafkaBrokersCount { return m } -type metricKafkaBrokersIncomingByteRate struct { +type metricKafkaConsumerGroupMembers struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.incoming_byte_rate metric with initial data. -func (m *metricKafkaBrokersIncomingByteRate) init() { - m.data.SetName("messaging.kafka.brokers.incoming_byte_rate") - m.data.SetDescription("Average tncoming Byte Rate in bytes/second") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills kafka.consumer_group.members metric with initial data. +func (m *metricKafkaConsumerGroupMembers) init() { + m.data.SetName("kafka.consumer_group.members") + m.data.SetDescription("Count of members in the consumer group") + m.data.SetUnit("{members}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersIncomingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaConsumerGroupMembers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("group", groupAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersIncomingByteRate) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricKafkaConsumerGroupMembers) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersIncomingByteRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricKafkaConsumerGroupMembers) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaBrokersIncomingByteRate(cfg MetricConfig) metricKafkaBrokersIncomingByteRate { - m := metricKafkaBrokersIncomingByteRate{config: cfg} +func newMetricKafkaConsumerGroupMembers(cfg MetricConfig) metricKafkaConsumerGroupMembers { + m := metricKafkaConsumerGroupMembers{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -213,41 +218,43 @@ func newMetricKafkaBrokersIncomingByteRate(cfg MetricConfig) metricKafkaBrokersI return m } -type metricKafkaBrokersOutgoingByteRate struct { +type metricKafkaConsumerGroupOffset struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.outgoing_byte_rate metric with initial data. -func (m *metricKafkaBrokersOutgoingByteRate) init() { - m.data.SetName("messaging.kafka.brokers.outgoing_byte_rate") - m.data.SetDescription("Average outgoing Byte Rate in bytes/second.") +// init fills kafka.consumer_group.offset metric with initial data. +func (m *metricKafkaConsumerGroupOffset) init() { + m.data.SetName("kafka.consumer_group.offset") + m.data.SetDescription("Current offset of the consumer group at partition of topic") m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersOutgoingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaConsumerGroupOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("group", groupAttributeValue) + dp.Attributes().PutStr("topic", topicAttributeValue) + dp.Attributes().PutInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersOutgoingByteRate) updateCapacity() { +func (m *metricKafkaConsumerGroupOffset) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersOutgoingByteRate) emit(metrics pmetric.MetricSlice) { +func (m *metricKafkaConsumerGroupOffset) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -255,8 +262,8 @@ func (m *metricKafkaBrokersOutgoingByteRate) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaBrokersOutgoingByteRate(cfg MetricConfig) metricKafkaBrokersOutgoingByteRate { - m := metricKafkaBrokersOutgoingByteRate{config: cfg} +func newMetricKafkaConsumerGroupOffset(cfg MetricConfig) metricKafkaConsumerGroupOffset { + m := metricKafkaConsumerGroupOffset{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -264,41 +271,42 @@ func newMetricKafkaBrokersOutgoingByteRate(cfg MetricConfig) metricKafkaBrokersO return m } -type metricKafkaBrokersRequestLatency struct { +type metricKafkaConsumerGroupOffsetSum struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.request_latency metric with initial data. -func (m *metricKafkaBrokersRequestLatency) init() { - m.data.SetName("messaging.kafka.brokers.request_latency") - m.data.SetDescription("Average request latency in ms") - m.data.SetUnit("ms") +// init fills kafka.consumer_group.offset_sum metric with initial data. +func (m *metricKafkaConsumerGroupOffsetSum) init() { + m.data.SetName("kafka.consumer_group.offset_sum") + m.data.SetDescription("Sum of consumer group offset across partitions of topic") + m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersRequestLatency) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaConsumerGroupOffsetSum) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("group", groupAttributeValue) + dp.Attributes().PutStr("topic", topicAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersRequestLatency) updateCapacity() { +func (m *metricKafkaConsumerGroupOffsetSum) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersRequestLatency) emit(metrics pmetric.MetricSlice) { +func (m *metricKafkaConsumerGroupOffsetSum) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -306,8 +314,8 @@ func (m *metricKafkaBrokersRequestLatency) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaBrokersRequestLatency(cfg MetricConfig) metricKafkaBrokersRequestLatency { - m := metricKafkaBrokersRequestLatency{config: cfg} +func newMetricKafkaConsumerGroupOffsetSum(cfg MetricConfig) metricKafkaConsumerGroupOffsetSum { + m := metricKafkaConsumerGroupOffsetSum{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -315,41 +323,42 @@ func newMetricKafkaBrokersRequestLatency(cfg MetricConfig) metricKafkaBrokersReq return m } -type metricKafkaBrokersRequestRate struct { +type metricKafkaPartitionCurrentOffset struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.request_rate metric with initial data. -func (m *metricKafkaBrokersRequestRate) init() { - m.data.SetName("messaging.kafka.brokers.request_rate") - m.data.SetDescription("Average request rate per second.") - m.data.SetUnit("{requests}/s") +// init fills kafka.partition.current_offset metric with initial data. +func (m *metricKafkaPartitionCurrentOffset) init() { + m.data.SetName("kafka.partition.current_offset") + m.data.SetDescription("Current offset of partition of topic.") + m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersRequestRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaPartitionCurrentOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("topic", topicAttributeValue) + dp.Attributes().PutInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersRequestRate) updateCapacity() { +func (m *metricKafkaPartitionCurrentOffset) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersRequestRate) emit(metrics pmetric.MetricSlice) { +func (m *metricKafkaPartitionCurrentOffset) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -357,8 +366,8 @@ func (m *metricKafkaBrokersRequestRate) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaBrokersRequestRate(cfg MetricConfig) metricKafkaBrokersRequestRate { - m := metricKafkaBrokersRequestRate{config: cfg} +func newMetricKafkaPartitionCurrentOffset(cfg MetricConfig) metricKafkaPartitionCurrentOffset { + m := metricKafkaPartitionCurrentOffset{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -366,41 +375,42 @@ func newMetricKafkaBrokersRequestRate(cfg MetricConfig) metricKafkaBrokersReques return m } -type metricKafkaBrokersRequestSize struct { +type metricKafkaPartitionOldestOffset struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.request_size metric with initial data. -func (m *metricKafkaBrokersRequestSize) init() { - m.data.SetName("messaging.kafka.brokers.request_size") - m.data.SetDescription("Average request size in bytes") - m.data.SetUnit("By") +// init fills kafka.partition.oldest_offset metric with initial data. +func (m *metricKafkaPartitionOldestOffset) init() { + m.data.SetName("kafka.partition.oldest_offset") + m.data.SetDescription("Oldest offset of partition of topic") + m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersRequestSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaPartitionOldestOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("topic", topicAttributeValue) + dp.Attributes().PutInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersRequestSize) updateCapacity() { +func (m *metricKafkaPartitionOldestOffset) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersRequestSize) emit(metrics pmetric.MetricSlice) { +func (m *metricKafkaPartitionOldestOffset) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -408,8 +418,8 @@ func (m *metricKafkaBrokersRequestSize) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaBrokersRequestSize(cfg MetricConfig) metricKafkaBrokersRequestSize { - m := metricKafkaBrokersRequestSize{config: cfg} +func newMetricKafkaPartitionOldestOffset(cfg MetricConfig) metricKafkaPartitionOldestOffset { + m := metricKafkaPartitionOldestOffset{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -417,50 +427,53 @@ func newMetricKafkaBrokersRequestSize(cfg MetricConfig) metricKafkaBrokersReques return m } -type metricKafkaBrokersRequestsInFlight struct { +type metricKafkaPartitionReplicas struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.requests_in_flight metric with initial data. -func (m *metricKafkaBrokersRequestsInFlight) init() { - m.data.SetName("messaging.kafka.brokers.requests_in_flight") - m.data.SetDescription("Requests in flight") - m.data.SetUnit("{requests}") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills kafka.partition.replicas metric with initial data. +func (m *metricKafkaPartitionReplicas) init() { + m.data.SetName("kafka.partition.replicas") + m.data.SetDescription("Number of replicas for partition of topic") + m.data.SetUnit("{replicas}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersRequestsInFlight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { +func (m *metricKafkaPartitionReplicas) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.Attributes().PutStr("topic", topicAttributeValue) + dp.Attributes().PutInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersRequestsInFlight) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricKafkaPartitionReplicas) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersRequestsInFlight) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricKafkaPartitionReplicas) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaBrokersRequestsInFlight(cfg MetricConfig) metricKafkaBrokersRequestsInFlight { - m := metricKafkaBrokersRequestsInFlight{config: cfg} +func newMetricKafkaPartitionReplicas(cfg MetricConfig) metricKafkaPartitionReplicas { + m := metricKafkaPartitionReplicas{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -468,50 +481,53 @@ func newMetricKafkaBrokersRequestsInFlight(cfg MetricConfig) metricKafkaBrokersR return m } -type metricKafkaBrokersResponseRate struct { +type metricKafkaPartitionReplicasInSync struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.response_rate metric with initial data. -func (m *metricKafkaBrokersResponseRate) init() { - m.data.SetName("messaging.kafka.brokers.response_rate") - m.data.SetDescription("Average response rate per second") - m.data.SetUnit("{response}/s") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills kafka.partition.replicas_in_sync metric with initial data. +func (m *metricKafkaPartitionReplicasInSync) init() { + m.data.SetName("kafka.partition.replicas_in_sync") + m.data.SetDescription("Number of synchronized replicas of partition") + m.data.SetUnit("{replicas}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersResponseRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaPartitionReplicasInSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("topic", topicAttributeValue) + dp.Attributes().PutInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersResponseRate) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricKafkaPartitionReplicasInSync) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersResponseRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricKafkaPartitionReplicasInSync) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaBrokersResponseRate(cfg MetricConfig) metricKafkaBrokersResponseRate { - m := metricKafkaBrokersResponseRate{config: cfg} +func newMetricKafkaPartitionReplicasInSync(cfg MetricConfig) metricKafkaPartitionReplicasInSync { + m := metricKafkaPartitionReplicasInSync{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -519,50 +535,52 @@ func newMetricKafkaBrokersResponseRate(cfg MetricConfig) metricKafkaBrokersRespo return m } -type metricKafkaBrokersResponseSize struct { +type metricKafkaTopicPartitions struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.response_size metric with initial data. -func (m *metricKafkaBrokersResponseSize) init() { - m.data.SetName("messaging.kafka.brokers.response_size") - m.data.SetDescription("Average response size in bytes") - m.data.SetUnit("By") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills kafka.topic.partitions metric with initial data. +func (m *metricKafkaTopicPartitions) init() { + m.data.SetName("kafka.topic.partitions") + m.data.SetDescription("Number of partitions in topic.") + m.data.SetUnit("{partitions}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaBrokersResponseSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricKafkaTopicPartitions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) - dp.Attributes().PutInt("broker", brokerAttributeValue) + dp.SetIntValue(val) + dp.Attributes().PutStr("topic", topicAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaBrokersResponseSize) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricKafkaTopicPartitions) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaBrokersResponseSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricKafkaTopicPartitions) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaBrokersResponseSize(cfg MetricConfig) metricKafkaBrokersResponseSize { - m := metricKafkaBrokersResponseSize{config: cfg} +func newMetricKafkaTopicPartitions(cfg MetricConfig) metricKafkaTopicPartitions { + m := metricKafkaTopicPartitions{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -570,43 +588,41 @@ func newMetricKafkaBrokersResponseSize(cfg MetricConfig) metricKafkaBrokersRespo return m } -type metricKafkaConsumerGroupLag struct { +type metricMessagingKafkaBrokersConsumerFetchRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.consumer_group.lag metric with initial data. -func (m *metricKafkaConsumerGroupLag) init() { - m.data.SetName("kafka.consumer_group.lag") - m.data.SetDescription("Current approximate lag of consumer group at partition of topic") - m.data.SetUnit("1") +// init fills messaging.kafka.brokers.consumer_fetch_rate metric with initial data. +func (m *metricMessagingKafkaBrokersConsumerFetchRate) init() { + m.data.SetName("messaging.kafka.brokers.consumer_fetch_rate") + m.data.SetDescription("Average consumer fetch Rate") + m.data.SetUnit("{fetches}/s") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaConsumerGroupLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { +func (m *metricMessagingKafkaBrokersConsumerFetchRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("group", groupAttributeValue) - dp.Attributes().PutStr("topic", topicAttributeValue) - dp.Attributes().PutInt("partition", partitionAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaConsumerGroupLag) updateCapacity() { +func (m *metricMessagingKafkaBrokersConsumerFetchRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaConsumerGroupLag) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokersConsumerFetchRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -614,8 +630,8 @@ func (m *metricKafkaConsumerGroupLag) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaConsumerGroupLag(cfg MetricConfig) metricKafkaConsumerGroupLag { - m := metricKafkaConsumerGroupLag{config: cfg} +func newMetricMessagingKafkaBrokersConsumerFetchRate(cfg MetricConfig) metricMessagingKafkaBrokersConsumerFetchRate { + m := metricMessagingKafkaBrokersConsumerFetchRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -623,51 +639,50 @@ func newMetricKafkaConsumerGroupLag(cfg MetricConfig) metricKafkaConsumerGroupLa return m } -type metricKafkaConsumerGroupLagSum struct { +type metricMessagingKafkaBrokersCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.consumer_group.lag_sum metric with initial data. -func (m *metricKafkaConsumerGroupLagSum) init() { - m.data.SetName("kafka.consumer_group.lag_sum") - m.data.SetDescription("Current approximate sum of consumer group lag across all partitions of topic") - m.data.SetUnit("1") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.brokers.count metric with initial data. +func (m *metricMessagingKafkaBrokersCount) init() { + m.data.SetName("messaging.kafka.brokers.count") + m.data.SetDescription("Number of brokers in the cluster.") + m.data.SetUnit("{brokers}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } -func (m *metricKafkaConsumerGroupLagSum) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { +func (m *metricMessagingKafkaBrokersCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("group", groupAttributeValue) - dp.Attributes().PutStr("topic", topicAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaConsumerGroupLagSum) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricMessagingKafkaBrokersCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaConsumerGroupLagSum) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokersCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaConsumerGroupLagSum(cfg MetricConfig) metricKafkaConsumerGroupLagSum { - m := metricKafkaConsumerGroupLagSum{config: cfg} +func newMetricMessagingKafkaBrokersCount(cfg MetricConfig) metricMessagingKafkaBrokersCount { + m := metricMessagingKafkaBrokersCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -675,52 +690,50 @@ func newMetricKafkaConsumerGroupLagSum(cfg MetricConfig) metricKafkaConsumerGrou return m } -type metricKafkaConsumerGroupMembers struct { +type metricMessagingKafkaBrokersIncomingByteRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.consumer_group.members metric with initial data. -func (m *metricKafkaConsumerGroupMembers) init() { - m.data.SetName("kafka.consumer_group.members") - m.data.SetDescription("Count of members in the consumer group") - m.data.SetUnit("{members}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.brokers.incoming_byte_rate metric with initial data. +func (m *metricMessagingKafkaBrokersIncomingByteRate) init() { + m.data.SetName("messaging.kafka.brokers.incoming_byte_rate") + m.data.SetDescription("Average tncoming Byte Rate in bytes/second") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaConsumerGroupMembers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string) { +func (m *metricMessagingKafkaBrokersIncomingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Sum().DataPoints().AppendEmpty() + dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("group", groupAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaConsumerGroupMembers) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() +func (m *metricMessagingKafkaBrokersIncomingByteRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaConsumerGroupMembers) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokersIncomingByteRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaConsumerGroupMembers(cfg MetricConfig) metricKafkaConsumerGroupMembers { - m := metricKafkaConsumerGroupMembers{config: cfg} +func newMetricMessagingKafkaBrokersIncomingByteRate(cfg MetricConfig) metricMessagingKafkaBrokersIncomingByteRate { + m := metricMessagingKafkaBrokersIncomingByteRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -728,43 +741,41 @@ func newMetricKafkaConsumerGroupMembers(cfg MetricConfig) metricKafkaConsumerGro return m } -type metricKafkaConsumerGroupOffset struct { +type metricMessagingKafkaBrokersOutgoingByteRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.consumer_group.offset metric with initial data. -func (m *metricKafkaConsumerGroupOffset) init() { - m.data.SetName("kafka.consumer_group.offset") - m.data.SetDescription("Current offset of the consumer group at partition of topic") +// init fills messaging.kafka.brokers.outgoing_byte_rate metric with initial data. +func (m *metricMessagingKafkaBrokersOutgoingByteRate) init() { + m.data.SetName("messaging.kafka.brokers.outgoing_byte_rate") + m.data.SetDescription("Average outgoing Byte Rate in bytes/second.") m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaConsumerGroupOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { +func (m *metricMessagingKafkaBrokersOutgoingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("group", groupAttributeValue) - dp.Attributes().PutStr("topic", topicAttributeValue) - dp.Attributes().PutInt("partition", partitionAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaConsumerGroupOffset) updateCapacity() { +func (m *metricMessagingKafkaBrokersOutgoingByteRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaConsumerGroupOffset) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokersOutgoingByteRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -772,8 +783,8 @@ func (m *metricKafkaConsumerGroupOffset) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaConsumerGroupOffset(cfg MetricConfig) metricKafkaConsumerGroupOffset { - m := metricKafkaConsumerGroupOffset{config: cfg} +func newMetricMessagingKafkaBrokersOutgoingByteRate(cfg MetricConfig) metricMessagingKafkaBrokersOutgoingByteRate { + m := metricMessagingKafkaBrokersOutgoingByteRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -781,42 +792,41 @@ func newMetricKafkaConsumerGroupOffset(cfg MetricConfig) metricKafkaConsumerGrou return m } -type metricKafkaConsumerGroupOffsetSum struct { +type metricMessagingKafkaBrokersRequestLatency struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.consumer_group.offset_sum metric with initial data. -func (m *metricKafkaConsumerGroupOffsetSum) init() { - m.data.SetName("kafka.consumer_group.offset_sum") - m.data.SetDescription("Sum of consumer group offset across partitions of topic") - m.data.SetUnit("1") +// init fills messaging.kafka.brokers.request_latency metric with initial data. +func (m *metricMessagingKafkaBrokersRequestLatency) init() { + m.data.SetName("messaging.kafka.brokers.request_latency") + m.data.SetDescription("Average request latency in ms") + m.data.SetUnit("ms") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaConsumerGroupOffsetSum) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string) { +func (m *metricMessagingKafkaBrokersRequestLatency) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("group", groupAttributeValue) - dp.Attributes().PutStr("topic", topicAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaConsumerGroupOffsetSum) updateCapacity() { +func (m *metricMessagingKafkaBrokersRequestLatency) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaConsumerGroupOffsetSum) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokersRequestLatency) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -824,8 +834,8 @@ func (m *metricKafkaConsumerGroupOffsetSum) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaConsumerGroupOffsetSum(cfg MetricConfig) metricKafkaConsumerGroupOffsetSum { - m := metricKafkaConsumerGroupOffsetSum{config: cfg} +func newMetricMessagingKafkaBrokersRequestLatency(cfg MetricConfig) metricMessagingKafkaBrokersRequestLatency { + m := metricMessagingKafkaBrokersRequestLatency{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -833,42 +843,41 @@ func newMetricKafkaConsumerGroupOffsetSum(cfg MetricConfig) metricKafkaConsumerG return m } -type metricKafkaPartitionCurrentOffset struct { +type metricMessagingKafkaBrokersRequestRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.partition.current_offset metric with initial data. -func (m *metricKafkaPartitionCurrentOffset) init() { - m.data.SetName("kafka.partition.current_offset") - m.data.SetDescription("Current offset of partition of topic.") - m.data.SetUnit("1") +// init fills messaging.kafka.brokers.request_rate metric with initial data. +func (m *metricMessagingKafkaBrokersRequestRate) init() { + m.data.SetName("messaging.kafka.brokers.request_rate") + m.data.SetDescription("Average request rate per second.") + m.data.SetUnit("{requests}/s") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaPartitionCurrentOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { +func (m *metricMessagingKafkaBrokersRequestRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("topic", topicAttributeValue) - dp.Attributes().PutInt("partition", partitionAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaPartitionCurrentOffset) updateCapacity() { +func (m *metricMessagingKafkaBrokersRequestRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaPartitionCurrentOffset) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokersRequestRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -876,8 +885,8 @@ func (m *metricKafkaPartitionCurrentOffset) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaPartitionCurrentOffset(cfg MetricConfig) metricKafkaPartitionCurrentOffset { - m := metricKafkaPartitionCurrentOffset{config: cfg} +func newMetricMessagingKafkaBrokersRequestRate(cfg MetricConfig) metricMessagingKafkaBrokersRequestRate { + m := metricMessagingKafkaBrokersRequestRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -885,42 +894,41 @@ func newMetricKafkaPartitionCurrentOffset(cfg MetricConfig) metricKafkaPartition return m } -type metricKafkaPartitionOldestOffset struct { +type metricMessagingKafkaBrokersRequestSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.partition.oldest_offset metric with initial data. -func (m *metricKafkaPartitionOldestOffset) init() { - m.data.SetName("kafka.partition.oldest_offset") - m.data.SetDescription("Oldest offset of partition of topic") - m.data.SetUnit("1") +// init fills messaging.kafka.brokers.request_size metric with initial data. +func (m *metricMessagingKafkaBrokersRequestSize) init() { + m.data.SetName("messaging.kafka.brokers.request_size") + m.data.SetDescription("Average request size in bytes") + m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaPartitionOldestOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { +func (m *metricMessagingKafkaBrokersRequestSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("topic", topicAttributeValue) - dp.Attributes().PutInt("partition", partitionAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaPartitionOldestOffset) updateCapacity() { +func (m *metricMessagingKafkaBrokersRequestSize) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaPartitionOldestOffset) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokersRequestSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -928,8 +936,8 @@ func (m *metricKafkaPartitionOldestOffset) emit(metrics pmetric.MetricSlice) { } } -func newMetricKafkaPartitionOldestOffset(cfg MetricConfig) metricKafkaPartitionOldestOffset { - m := metricKafkaPartitionOldestOffset{config: cfg} +func newMetricMessagingKafkaBrokersRequestSize(cfg MetricConfig) metricMessagingKafkaBrokersRequestSize { + m := metricMessagingKafkaBrokersRequestSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -937,53 +945,50 @@ func newMetricKafkaPartitionOldestOffset(cfg MetricConfig) metricKafkaPartitionO return m } -type metricKafkaPartitionReplicas struct { +type metricMessagingKafkaBrokersRequestsInFlight struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.partition.replicas metric with initial data. -func (m *metricKafkaPartitionReplicas) init() { - m.data.SetName("kafka.partition.replicas") - m.data.SetDescription("Number of replicas for partition of topic") - m.data.SetUnit("{replicas}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.brokers.requests_in_flight metric with initial data. +func (m *metricMessagingKafkaBrokersRequestsInFlight) init() { + m.data.SetName("messaging.kafka.brokers.requests_in_flight") + m.data.SetDescription("Requests in flight") + m.data.SetUnit("{requests}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaPartitionReplicas) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { +func (m *metricMessagingKafkaBrokersRequestsInFlight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Sum().DataPoints().AppendEmpty() + dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("topic", topicAttributeValue) - dp.Attributes().PutInt("partition", partitionAttributeValue) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaPartitionReplicas) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() +func (m *metricMessagingKafkaBrokersRequestsInFlight) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaPartitionReplicas) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokersRequestsInFlight) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaPartitionReplicas(cfg MetricConfig) metricKafkaPartitionReplicas { - m := metricKafkaPartitionReplicas{config: cfg} +func newMetricMessagingKafkaBrokersRequestsInFlight(cfg MetricConfig) metricMessagingKafkaBrokersRequestsInFlight { + m := metricMessagingKafkaBrokersRequestsInFlight{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -991,53 +996,50 @@ func newMetricKafkaPartitionReplicas(cfg MetricConfig) metricKafkaPartitionRepli return m } -type metricKafkaPartitionReplicasInSync struct { +type metricMessagingKafkaBrokersResponseRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.partition.replicas_in_sync metric with initial data. -func (m *metricKafkaPartitionReplicasInSync) init() { - m.data.SetName("kafka.partition.replicas_in_sync") - m.data.SetDescription("Number of synchronized replicas of partition") - m.data.SetUnit("{replicas}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.brokers.response_rate metric with initial data. +func (m *metricMessagingKafkaBrokersResponseRate) init() { + m.data.SetName("messaging.kafka.brokers.response_rate") + m.data.SetDescription("Average response rate per second") + m.data.SetUnit("{response}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaPartitionReplicasInSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string, partitionAttributeValue int64) { +func (m *metricMessagingKafkaBrokersResponseRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Sum().DataPoints().AppendEmpty() + dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("topic", topicAttributeValue) - dp.Attributes().PutInt("partition", partitionAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaPartitionReplicasInSync) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() +func (m *metricMessagingKafkaBrokersResponseRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaPartitionReplicasInSync) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokersResponseRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaPartitionReplicasInSync(cfg MetricConfig) metricKafkaPartitionReplicasInSync { - m := metricKafkaPartitionReplicasInSync{config: cfg} +func newMetricMessagingKafkaBrokersResponseRate(cfg MetricConfig) metricMessagingKafkaBrokersResponseRate { + m := metricMessagingKafkaBrokersResponseRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -1045,52 +1047,50 @@ func newMetricKafkaPartitionReplicasInSync(cfg MetricConfig) metricKafkaPartitio return m } -type metricKafkaTopicPartitions struct { +type metricMessagingKafkaBrokersResponseSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills kafka.topic.partitions metric with initial data. -func (m *metricKafkaTopicPartitions) init() { - m.data.SetName("kafka.topic.partitions") - m.data.SetDescription("Number of partitions in topic.") - m.data.SetUnit("{partitions}") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.brokers.response_size metric with initial data. +func (m *metricMessagingKafkaBrokersResponseSize) init() { + m.data.SetName("messaging.kafka.brokers.response_size") + m.data.SetDescription("Average response size in bytes") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricKafkaTopicPartitions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, topicAttributeValue string) { +func (m *metricMessagingKafkaBrokersResponseSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Sum().DataPoints().AppendEmpty() + dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("topic", topicAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricKafkaTopicPartitions) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() +func (m *metricMessagingKafkaBrokersResponseSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricKafkaTopicPartitions) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokersResponseSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricKafkaTopicPartitions(cfg MetricConfig) metricKafkaTopicPartitions { - m := metricKafkaTopicPartitions{config: cfg} +func newMetricMessagingKafkaBrokersResponseSize(cfg MetricConfig) metricMessagingKafkaBrokersResponseSize { + m := metricMessagingKafkaBrokersResponseSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -1101,31 +1101,31 @@ func newMetricKafkaTopicPartitions(cfg MetricConfig) metricKafkaTopicPartitions // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information - metricKafkaBrokers metricKafkaBrokers - metricKafkaBrokersConsumerFetchRate metricKafkaBrokersConsumerFetchRate - metricKafkaBrokersCount metricKafkaBrokersCount - metricKafkaBrokersIncomingByteRate metricKafkaBrokersIncomingByteRate - metricKafkaBrokersOutgoingByteRate metricKafkaBrokersOutgoingByteRate - metricKafkaBrokersRequestLatency metricKafkaBrokersRequestLatency - metricKafkaBrokersRequestRate metricKafkaBrokersRequestRate - metricKafkaBrokersRequestSize metricKafkaBrokersRequestSize - metricKafkaBrokersRequestsInFlight metricKafkaBrokersRequestsInFlight - metricKafkaBrokersResponseRate metricKafkaBrokersResponseRate - metricKafkaBrokersResponseSize metricKafkaBrokersResponseSize - metricKafkaConsumerGroupLag metricKafkaConsumerGroupLag - metricKafkaConsumerGroupLagSum metricKafkaConsumerGroupLagSum - metricKafkaConsumerGroupMembers metricKafkaConsumerGroupMembers - metricKafkaConsumerGroupOffset metricKafkaConsumerGroupOffset - metricKafkaConsumerGroupOffsetSum metricKafkaConsumerGroupOffsetSum - metricKafkaPartitionCurrentOffset metricKafkaPartitionCurrentOffset - metricKafkaPartitionOldestOffset metricKafkaPartitionOldestOffset - metricKafkaPartitionReplicas metricKafkaPartitionReplicas - metricKafkaPartitionReplicasInSync metricKafkaPartitionReplicasInSync - metricKafkaTopicPartitions metricKafkaTopicPartitions + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information + metricKafkaBrokers metricKafkaBrokers + metricKafkaConsumerGroupLag metricKafkaConsumerGroupLag + metricKafkaConsumerGroupLagSum metricKafkaConsumerGroupLagSum + metricKafkaConsumerGroupMembers metricKafkaConsumerGroupMembers + metricKafkaConsumerGroupOffset metricKafkaConsumerGroupOffset + metricKafkaConsumerGroupOffsetSum metricKafkaConsumerGroupOffsetSum + metricKafkaPartitionCurrentOffset metricKafkaPartitionCurrentOffset + metricKafkaPartitionOldestOffset metricKafkaPartitionOldestOffset + metricKafkaPartitionReplicas metricKafkaPartitionReplicas + metricKafkaPartitionReplicasInSync metricKafkaPartitionReplicasInSync + metricKafkaTopicPartitions metricKafkaTopicPartitions + metricMessagingKafkaBrokersConsumerFetchRate metricMessagingKafkaBrokersConsumerFetchRate + metricMessagingKafkaBrokersCount metricMessagingKafkaBrokersCount + metricMessagingKafkaBrokersIncomingByteRate metricMessagingKafkaBrokersIncomingByteRate + metricMessagingKafkaBrokersOutgoingByteRate metricMessagingKafkaBrokersOutgoingByteRate + metricMessagingKafkaBrokersRequestLatency metricMessagingKafkaBrokersRequestLatency + metricMessagingKafkaBrokersRequestRate metricMessagingKafkaBrokersRequestRate + metricMessagingKafkaBrokersRequestSize metricMessagingKafkaBrokersRequestSize + metricMessagingKafkaBrokersRequestsInFlight metricMessagingKafkaBrokersRequestsInFlight + metricMessagingKafkaBrokersResponseRate metricMessagingKafkaBrokersResponseRate + metricMessagingKafkaBrokersResponseSize metricMessagingKafkaBrokersResponseSize } // metricBuilderOption applies changes to default metrics builder. @@ -1142,61 +1142,61 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting if mbc.Metrics.KafkaBrokers.Enabled { settings.Logger.Warn("[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.brokers.count`") } - if !mbc.Metrics.KafkaBrokersConsumerFetchRate.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersConsumerFetchRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersCount.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersCount.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.count`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersIncomingByteRate.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersIncomingByteRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersOutgoingByteRate.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersOutgoingByteRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersRequestLatency.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersRequestLatency.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersRequestRate.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersRequestRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersRequestSize.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersRequestSize.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_size`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersRequestsInFlight.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersRequestsInFlight.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersResponseRate.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersResponseRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.KafkaBrokersResponseSize.enabledSetByUser { + if !mbc.Metrics.MessagingKafkaBrokersResponseSize.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_size`: This metric will be enabled by default in the next versions.") } mb := &MetricsBuilder{ - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricKafkaBrokers: newMetricKafkaBrokers(mbc.Metrics.KafkaBrokers), - metricKafkaBrokersConsumerFetchRate: newMetricKafkaBrokersConsumerFetchRate(mbc.Metrics.KafkaBrokersConsumerFetchRate), - metricKafkaBrokersCount: newMetricKafkaBrokersCount(mbc.Metrics.KafkaBrokersCount), - metricKafkaBrokersIncomingByteRate: newMetricKafkaBrokersIncomingByteRate(mbc.Metrics.KafkaBrokersIncomingByteRate), - metricKafkaBrokersOutgoingByteRate: newMetricKafkaBrokersOutgoingByteRate(mbc.Metrics.KafkaBrokersOutgoingByteRate), - metricKafkaBrokersRequestLatency: newMetricKafkaBrokersRequestLatency(mbc.Metrics.KafkaBrokersRequestLatency), - metricKafkaBrokersRequestRate: newMetricKafkaBrokersRequestRate(mbc.Metrics.KafkaBrokersRequestRate), - metricKafkaBrokersRequestSize: newMetricKafkaBrokersRequestSize(mbc.Metrics.KafkaBrokersRequestSize), - metricKafkaBrokersRequestsInFlight: newMetricKafkaBrokersRequestsInFlight(mbc.Metrics.KafkaBrokersRequestsInFlight), - metricKafkaBrokersResponseRate: newMetricKafkaBrokersResponseRate(mbc.Metrics.KafkaBrokersResponseRate), - metricKafkaBrokersResponseSize: newMetricKafkaBrokersResponseSize(mbc.Metrics.KafkaBrokersResponseSize), - metricKafkaConsumerGroupLag: newMetricKafkaConsumerGroupLag(mbc.Metrics.KafkaConsumerGroupLag), - metricKafkaConsumerGroupLagSum: newMetricKafkaConsumerGroupLagSum(mbc.Metrics.KafkaConsumerGroupLagSum), - metricKafkaConsumerGroupMembers: newMetricKafkaConsumerGroupMembers(mbc.Metrics.KafkaConsumerGroupMembers), - metricKafkaConsumerGroupOffset: newMetricKafkaConsumerGroupOffset(mbc.Metrics.KafkaConsumerGroupOffset), - metricKafkaConsumerGroupOffsetSum: newMetricKafkaConsumerGroupOffsetSum(mbc.Metrics.KafkaConsumerGroupOffsetSum), - metricKafkaPartitionCurrentOffset: newMetricKafkaPartitionCurrentOffset(mbc.Metrics.KafkaPartitionCurrentOffset), - metricKafkaPartitionOldestOffset: newMetricKafkaPartitionOldestOffset(mbc.Metrics.KafkaPartitionOldestOffset), - metricKafkaPartitionReplicas: newMetricKafkaPartitionReplicas(mbc.Metrics.KafkaPartitionReplicas), - metricKafkaPartitionReplicasInSync: newMetricKafkaPartitionReplicasInSync(mbc.Metrics.KafkaPartitionReplicasInSync), - metricKafkaTopicPartitions: newMetricKafkaTopicPartitions(mbc.Metrics.KafkaTopicPartitions), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricKafkaBrokers: newMetricKafkaBrokers(mbc.Metrics.KafkaBrokers), + metricKafkaConsumerGroupLag: newMetricKafkaConsumerGroupLag(mbc.Metrics.KafkaConsumerGroupLag), + metricKafkaConsumerGroupLagSum: newMetricKafkaConsumerGroupLagSum(mbc.Metrics.KafkaConsumerGroupLagSum), + metricKafkaConsumerGroupMembers: newMetricKafkaConsumerGroupMembers(mbc.Metrics.KafkaConsumerGroupMembers), + metricKafkaConsumerGroupOffset: newMetricKafkaConsumerGroupOffset(mbc.Metrics.KafkaConsumerGroupOffset), + metricKafkaConsumerGroupOffsetSum: newMetricKafkaConsumerGroupOffsetSum(mbc.Metrics.KafkaConsumerGroupOffsetSum), + metricKafkaPartitionCurrentOffset: newMetricKafkaPartitionCurrentOffset(mbc.Metrics.KafkaPartitionCurrentOffset), + metricKafkaPartitionOldestOffset: newMetricKafkaPartitionOldestOffset(mbc.Metrics.KafkaPartitionOldestOffset), + metricKafkaPartitionReplicas: newMetricKafkaPartitionReplicas(mbc.Metrics.KafkaPartitionReplicas), + metricKafkaPartitionReplicasInSync: newMetricKafkaPartitionReplicasInSync(mbc.Metrics.KafkaPartitionReplicasInSync), + metricKafkaTopicPartitions: newMetricKafkaTopicPartitions(mbc.Metrics.KafkaTopicPartitions), + metricMessagingKafkaBrokersConsumerFetchRate: newMetricMessagingKafkaBrokersConsumerFetchRate(mbc.Metrics.MessagingKafkaBrokersConsumerFetchRate), + metricMessagingKafkaBrokersCount: newMetricMessagingKafkaBrokersCount(mbc.Metrics.MessagingKafkaBrokersCount), + metricMessagingKafkaBrokersIncomingByteRate: newMetricMessagingKafkaBrokersIncomingByteRate(mbc.Metrics.MessagingKafkaBrokersIncomingByteRate), + metricMessagingKafkaBrokersOutgoingByteRate: newMetricMessagingKafkaBrokersOutgoingByteRate(mbc.Metrics.MessagingKafkaBrokersOutgoingByteRate), + metricMessagingKafkaBrokersRequestLatency: newMetricMessagingKafkaBrokersRequestLatency(mbc.Metrics.MessagingKafkaBrokersRequestLatency), + metricMessagingKafkaBrokersRequestRate: newMetricMessagingKafkaBrokersRequestRate(mbc.Metrics.MessagingKafkaBrokersRequestRate), + metricMessagingKafkaBrokersRequestSize: newMetricMessagingKafkaBrokersRequestSize(mbc.Metrics.MessagingKafkaBrokersRequestSize), + metricMessagingKafkaBrokersRequestsInFlight: newMetricMessagingKafkaBrokersRequestsInFlight(mbc.Metrics.MessagingKafkaBrokersRequestsInFlight), + metricMessagingKafkaBrokersResponseRate: newMetricMessagingKafkaBrokersResponseRate(mbc.Metrics.MessagingKafkaBrokersResponseRate), + metricMessagingKafkaBrokersResponseSize: newMetricMessagingKafkaBrokersResponseSize(mbc.Metrics.MessagingKafkaBrokersResponseSize), } for _, op := range options { op(mb) @@ -1254,16 +1254,6 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricKafkaBrokers.emit(ils.Metrics()) - mb.metricKafkaBrokersConsumerFetchRate.emit(ils.Metrics()) - mb.metricKafkaBrokersCount.emit(ils.Metrics()) - mb.metricKafkaBrokersIncomingByteRate.emit(ils.Metrics()) - mb.metricKafkaBrokersOutgoingByteRate.emit(ils.Metrics()) - mb.metricKafkaBrokersRequestLatency.emit(ils.Metrics()) - mb.metricKafkaBrokersRequestRate.emit(ils.Metrics()) - mb.metricKafkaBrokersRequestSize.emit(ils.Metrics()) - mb.metricKafkaBrokersRequestsInFlight.emit(ils.Metrics()) - mb.metricKafkaBrokersResponseRate.emit(ils.Metrics()) - mb.metricKafkaBrokersResponseSize.emit(ils.Metrics()) mb.metricKafkaConsumerGroupLag.emit(ils.Metrics()) mb.metricKafkaConsumerGroupLagSum.emit(ils.Metrics()) mb.metricKafkaConsumerGroupMembers.emit(ils.Metrics()) @@ -1274,6 +1264,16 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricKafkaPartitionReplicas.emit(ils.Metrics()) mb.metricKafkaPartitionReplicasInSync.emit(ils.Metrics()) mb.metricKafkaTopicPartitions.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersConsumerFetchRate.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersCount.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersIncomingByteRate.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersOutgoingByteRate.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersRequestLatency.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersRequestRate.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersRequestSize.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersRequestsInFlight.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersResponseRate.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokersResponseSize.emit(ils.Metrics()) for _, op := range rmo { op(rm) @@ -1299,56 +1299,6 @@ func (mb *MetricsBuilder) RecordKafkaBrokersDataPoint(ts pcommon.Timestamp, val mb.metricKafkaBrokers.recordDataPoint(mb.startTime, ts, val) } -// RecordKafkaBrokersConsumerFetchRateDataPoint adds a data point to messaging.kafka.brokers.consumer_fetch_rate metric. -func (mb *MetricsBuilder) RecordKafkaBrokersConsumerFetchRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersConsumerFetchRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersCountDataPoint adds a data point to messaging.kafka.brokers.count metric. -func (mb *MetricsBuilder) RecordKafkaBrokersCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricKafkaBrokersCount.recordDataPoint(mb.startTime, ts, val) -} - -// RecordKafkaBrokersIncomingByteRateDataPoint adds a data point to messaging.kafka.brokers.incoming_byte_rate metric. -func (mb *MetricsBuilder) RecordKafkaBrokersIncomingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersIncomingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersOutgoingByteRateDataPoint adds a data point to messaging.kafka.brokers.outgoing_byte_rate metric. -func (mb *MetricsBuilder) RecordKafkaBrokersOutgoingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersOutgoingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersRequestLatencyDataPoint adds a data point to messaging.kafka.brokers.request_latency metric. -func (mb *MetricsBuilder) RecordKafkaBrokersRequestLatencyDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersRequestLatency.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersRequestRateDataPoint adds a data point to messaging.kafka.brokers.request_rate metric. -func (mb *MetricsBuilder) RecordKafkaBrokersRequestRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersRequestRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersRequestSizeDataPoint adds a data point to messaging.kafka.brokers.request_size metric. -func (mb *MetricsBuilder) RecordKafkaBrokersRequestSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersRequestSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersRequestsInFlightDataPoint adds a data point to messaging.kafka.brokers.requests_in_flight metric. -func (mb *MetricsBuilder) RecordKafkaBrokersRequestsInFlightDataPoint(ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { - mb.metricKafkaBrokersRequestsInFlight.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersResponseRateDataPoint adds a data point to messaging.kafka.brokers.response_rate metric. -func (mb *MetricsBuilder) RecordKafkaBrokersResponseRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersResponseRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - -// RecordKafkaBrokersResponseSizeDataPoint adds a data point to messaging.kafka.brokers.response_size metric. -func (mb *MetricsBuilder) RecordKafkaBrokersResponseSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricKafkaBrokersResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) -} - // RecordKafkaConsumerGroupLagDataPoint adds a data point to kafka.consumer_group.lag metric. func (mb *MetricsBuilder) RecordKafkaConsumerGroupLagDataPoint(ts pcommon.Timestamp, val int64, groupAttributeValue string, topicAttributeValue string, partitionAttributeValue int64) { mb.metricKafkaConsumerGroupLag.recordDataPoint(mb.startTime, ts, val, groupAttributeValue, topicAttributeValue, partitionAttributeValue) @@ -1399,6 +1349,56 @@ func (mb *MetricsBuilder) RecordKafkaTopicPartitionsDataPoint(ts pcommon.Timesta mb.metricKafkaTopicPartitions.recordDataPoint(mb.startTime, ts, val, topicAttributeValue) } +// RecordMessagingKafkaBrokersConsumerFetchRateDataPoint adds a data point to messaging.kafka.brokers.consumer_fetch_rate metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersConsumerFetchRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersConsumerFetchRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersCountDataPoint adds a data point to messaging.kafka.brokers.count metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMessagingKafkaBrokersCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMessagingKafkaBrokersIncomingByteRateDataPoint adds a data point to messaging.kafka.brokers.incoming_byte_rate metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersIncomingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersIncomingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersOutgoingByteRateDataPoint adds a data point to messaging.kafka.brokers.outgoing_byte_rate metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersOutgoingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersOutgoingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersRequestLatencyDataPoint adds a data point to messaging.kafka.brokers.request_latency metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestLatencyDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersRequestLatency.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersRequestRateDataPoint adds a data point to messaging.kafka.brokers.request_rate metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersRequestRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersRequestSizeDataPoint adds a data point to messaging.kafka.brokers.request_size metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersRequestSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersRequestsInFlightDataPoint adds a data point to messaging.kafka.brokers.requests_in_flight metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestsInFlightDataPoint(ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersRequestsInFlight.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersResponseRateDataPoint adds a data point to messaging.kafka.brokers.response_rate metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersResponseRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersResponseRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + +// RecordMessagingKafkaBrokersResponseSizeDataPoint adds a data point to messaging.kafka.brokers.response_size metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokersResponseSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokersResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index c486894f54d5..f4b5deb476cd 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -102,36 +102,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordKafkaBrokersDataPoint(ts, 1) - allMetricsCount++ - mb.RecordKafkaBrokersConsumerFetchRateDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersCountDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordKafkaBrokersIncomingByteRateDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersOutgoingByteRateDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersRequestLatencyDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersRequestRateDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersRequestSizeDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersRequestsInFlightDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersResponseRateDataPoint(ts, 1, 6) - - allMetricsCount++ - mb.RecordKafkaBrokersResponseSizeDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordKafkaConsumerGroupLagDataPoint(ts, 1, "group-val", "topic-val", 9) @@ -172,6 +142,36 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordKafkaTopicPartitionsDataPoint(ts, 1, "topic-val") + allMetricsCount++ + mb.RecordMessagingKafkaBrokersConsumerFetchRateDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersCountDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersIncomingByteRateDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersOutgoingByteRateDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersRequestLatencyDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersRequestRateDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersRequestSizeDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersRequestsInFlightDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersResponseRateDataPoint(ts, 1, 6) + + allMetricsCount++ + mb.RecordMessagingKafkaBrokersResponseSizeDataPoint(ts, 1, 6) + res := pcommon.NewResource() res.Attributes().PutStr("k1", "v1") metrics := mb.Emit(WithResource(res)) @@ -207,155 +207,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "messaging.kafka.brokers.consumer_fetch_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.consumer_fetch_rate") - validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average consumer fetch Rate", ms.At(i).Description()) - assert.Equal(t, "{fetches}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.count": - assert.False(t, validatedMetrics["messaging.kafka.brokers.count"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.count") - validatedMetrics["messaging.kafka.brokers.count"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Number of brokers in the cluster.", ms.At(i).Description()) - assert.Equal(t, "{brokers}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "messaging.kafka.brokers.incoming_byte_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.incoming_byte_rate") - validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average tncoming Byte Rate in bytes/second", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.outgoing_byte_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.outgoing_byte_rate") - validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average outgoing Byte Rate in bytes/second.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.request_latency": - assert.False(t, validatedMetrics["messaging.kafka.brokers.request_latency"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_latency") - validatedMetrics["messaging.kafka.brokers.request_latency"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average request latency in ms", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.request_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.request_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_rate") - validatedMetrics["messaging.kafka.brokers.request_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average request rate per second.", ms.At(i).Description()) - assert.Equal(t, "{requests}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.request_size": - assert.False(t, validatedMetrics["messaging.kafka.brokers.request_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_size") - validatedMetrics["messaging.kafka.brokers.request_size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average request size in bytes", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.requests_in_flight": - assert.False(t, validatedMetrics["messaging.kafka.brokers.requests_in_flight"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.requests_in_flight") - validatedMetrics["messaging.kafka.brokers.requests_in_flight"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Requests in flight", ms.At(i).Description()) - assert.Equal(t, "{requests}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.response_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.response_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_rate") - validatedMetrics["messaging.kafka.brokers.response_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average response rate per second", ms.At(i).Description()) - assert.Equal(t, "{response}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.response_size": - assert.False(t, validatedMetrics["messaging.kafka.brokers.response_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_size") - validatedMetrics["messaging.kafka.brokers.response_size"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average response size in bytes", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - attrVal, ok := dp.Attributes().Get("broker") - assert.True(t, ok) - assert.EqualValues(t, 6, attrVal.Int()) case "kafka.consumer_group.lag": assert.False(t, validatedMetrics["kafka.consumer_group.lag"], "Found a duplicate in the metrics slice: kafka.consumer_group.lag") validatedMetrics["kafka.consumer_group.lag"] = true @@ -544,6 +395,155 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("topic") assert.True(t, ok) assert.EqualValues(t, "topic-val", attrVal.Str()) + case "messaging.kafka.brokers.consumer_fetch_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.consumer_fetch_rate") + validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average consumer fetch Rate", ms.At(i).Description()) + assert.Equal(t, "{fetches}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.count": + assert.False(t, validatedMetrics["messaging.kafka.brokers.count"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.count") + validatedMetrics["messaging.kafka.brokers.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of brokers in the cluster.", ms.At(i).Description()) + assert.Equal(t, "{brokers}", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "messaging.kafka.brokers.incoming_byte_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.incoming_byte_rate") + validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average tncoming Byte Rate in bytes/second", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.outgoing_byte_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.outgoing_byte_rate") + validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average outgoing Byte Rate in bytes/second.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.request_latency": + assert.False(t, validatedMetrics["messaging.kafka.brokers.request_latency"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_latency") + validatedMetrics["messaging.kafka.brokers.request_latency"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average request latency in ms", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.request_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.request_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_rate") + validatedMetrics["messaging.kafka.brokers.request_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average request rate per second.", ms.At(i).Description()) + assert.Equal(t, "{requests}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.request_size": + assert.False(t, validatedMetrics["messaging.kafka.brokers.request_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_size") + validatedMetrics["messaging.kafka.brokers.request_size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average request size in bytes", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.requests_in_flight": + assert.False(t, validatedMetrics["messaging.kafka.brokers.requests_in_flight"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.requests_in_flight") + validatedMetrics["messaging.kafka.brokers.requests_in_flight"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Requests in flight", ms.At(i).Description()) + assert.Equal(t, "{requests}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.response_rate": + assert.False(t, validatedMetrics["messaging.kafka.brokers.response_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_rate") + validatedMetrics["messaging.kafka.brokers.response_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average response rate per second", ms.At(i).Description()) + assert.Equal(t, "{response}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) + case "messaging.kafka.brokers.response_size": + assert.False(t, validatedMetrics["messaging.kafka.brokers.response_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_size") + validatedMetrics["messaging.kafka.brokers.response_size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average response size in bytes", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) } } }) diff --git a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml index efe5c647fcd4..293ab2825f3b 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml @@ -3,26 +3,6 @@ all_set: metrics: kafka.brokers: enabled: true - messaging.kafka.brokers.consumer_fetch_rate: - enabled: true - messaging.kafka.brokers.count: - enabled: true - messaging.kafka.brokers.incoming_byte_rate: - enabled: true - messaging.kafka.brokers.outgoing_byte_rate: - enabled: true - messaging.kafka.brokers.request_latency: - enabled: true - messaging.kafka.brokers.request_rate: - enabled: true - messaging.kafka.brokers.request_size: - enabled: true - messaging.kafka.brokers.requests_in_flight: - enabled: true - messaging.kafka.brokers.response_rate: - enabled: true - messaging.kafka.brokers.response_size: - enabled: true kafka.consumer_group.lag: enabled: true kafka.consumer_group.lag_sum: @@ -43,29 +23,29 @@ all_set: enabled: true kafka.topic.partitions: enabled: true -none_set: - metrics: - kafka.brokers: - enabled: false messaging.kafka.brokers.consumer_fetch_rate: - enabled: false + enabled: true messaging.kafka.brokers.count: - enabled: false + enabled: true messaging.kafka.brokers.incoming_byte_rate: - enabled: false + enabled: true messaging.kafka.brokers.outgoing_byte_rate: - enabled: false + enabled: true messaging.kafka.brokers.request_latency: - enabled: false + enabled: true messaging.kafka.brokers.request_rate: - enabled: false + enabled: true messaging.kafka.brokers.request_size: - enabled: false + enabled: true messaging.kafka.brokers.requests_in_flight: - enabled: false + enabled: true messaging.kafka.brokers.response_rate: - enabled: false + enabled: true messaging.kafka.brokers.response_size: + enabled: true +none_set: + metrics: + kafka.brokers: enabled: false kafka.consumer_group.lag: enabled: false @@ -87,3 +67,23 @@ none_set: enabled: false kafka.topic.partitions: enabled: false + messaging.kafka.brokers.consumer_fetch_rate: + enabled: false + messaging.kafka.brokers.count: + enabled: false + messaging.kafka.brokers.incoming_byte_rate: + enabled: false + messaging.kafka.brokers.outgoing_byte_rate: + enabled: false + messaging.kafka.brokers.request_latency: + enabled: false + messaging.kafka.brokers.request_rate: + enabled: false + messaging.kafka.brokers.request_size: + enabled: false + messaging.kafka.brokers.requests_in_flight: + enabled: false + messaging.kafka.brokers.response_rate: + enabled: false + messaging.kafka.brokers.response_size: + enabled: false diff --git a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml index f332dc2f569d..d62aa90d2939 100644 --- a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml +++ b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml @@ -3,7 +3,7 @@ resourceMetrics: scopeMetrics: - metrics: - description: Number of brokers in the cluster. - Gauge: + gauge: aggregationTemporality: 2 dataPoints: - asInt: "1" From 25d0e7166102ae2020178f6317b7949669a2aba3 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Tue, 1 Aug 2023 10:05:06 -0700 Subject: [PATCH 11/36] chore: add messaging to scraper --- .../kafkametricsreceiver/broker_scraper.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index d8aa43fd3087..e4e9779c1b14 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -63,39 +63,39 @@ func (s *brokerScraper) scrapeMetric(now pcommon.Timestamp, allMetrics saramaMet switch prefix { case "consumer-fetch-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordKafkaBrokersConsumerFetchRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersConsumerFetchRateDataPoint(now, v, brokerID) } case "incoming-byte-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordKafkaBrokersIncomingByteRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersIncomingByteRateDataPoint(now, v, brokerID) } case "outgoing-byte-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordKafkaBrokersOutgoingByteRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersOutgoingByteRateDataPoint(now, v, brokerID) } case "request-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordKafkaBrokersRequestRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersRequestRateDataPoint(now, v, brokerID) } case "response-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordKafkaBrokersResponseRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersResponseRateDataPoint(now, v, brokerID) } case "response-size-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordKafkaBrokersResponseSizeDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersResponseSizeDataPoint(now, v, brokerID) } case "request-size-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordKafkaBrokersRequestSizeDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersRequestSizeDataPoint(now, v, brokerID) } case "requests-in-flight-for-broker-": if v, ok := metric["count"].(int64); ok { - s.mb.RecordKafkaBrokersRequestsInFlightDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersRequestsInFlightDataPoint(now, v, brokerID) } case "request-latency-in-ms-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordKafkaBrokersRequestLatencyDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokersRequestLatencyDataPoint(now, v, brokerID) } default: fmt.Printf("undefined for prefix %s\n", prefix) @@ -133,7 +133,7 @@ func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { s.mb.RecordKafkaBrokersDataPoint(now, brokerCount) // messaging.kafka.brokers.count should replace kafka.brokers. - s.mb.RecordKafkaBrokersCountDataPoint(now, brokerCount) + s.mb.RecordMessagingKafkaBrokersCountDataPoint(now, brokerCount) return s.mb.Emit(), nil } From 4a3a14986ca2c054d8795ddd3aaccef4ab48ee33 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Wed, 2 Aug 2023 08:46:11 -0700 Subject: [PATCH 12/36] chore: run make gen --- .../internal/metadata/generated_metrics.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 6d5edec4d36c..905b121e60dd 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -1101,10 +1101,11 @@ func newMetricMessagingKafkaBrokersResponseSize(cfg MetricConfig) metricMessagin // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. metricKafkaBrokers metricKafkaBrokers metricKafkaConsumerGroupLag metricKafkaConsumerGroupLag metricKafkaConsumerGroupLagSum metricKafkaConsumerGroupLagSum @@ -1173,6 +1174,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_size`: This metric will be enabled by default in the next versions.") } mb := &MetricsBuilder{ + config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), metricsBuffer: pmetric.NewMetrics(), buildInfo: settings.BuildInfo, From 46cc504d72c45c2fe503b5e52d48a4f3c05ce5b8 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 3 Aug 2023 07:27:30 -0700 Subject: [PATCH 13/36] chore: resolve merge conflict/description --- .../kafkametricsreceiver/testdata/integration/expected.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml index d62aa90d2939..6f02cc262f3d 100644 --- a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml +++ b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml @@ -2,7 +2,7 @@ resourceMetrics: - resource: {} scopeMetrics: - metrics: - - description: Number of brokers in the cluster. + - description: "[DEPRECATED] Number of brokers in the cluster." gauge: aggregationTemporality: 2 dataPoints: From 0c09dd1081e0c00fca1d950fabdd0156836adfb9 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 3 Aug 2023 21:39:55 -0700 Subject: [PATCH 14/36] chore: make updates based on semantic convention --- .../kafkametricsreceiver/broker_scraper.go | 40 +- .../kafkametricsreceiver/documentation.md | 52 +-- .../internal/metadata/generated_config.go | 40 +- .../metadata/generated_config_test.go | 40 +- .../internal/metadata/generated_metrics.go | 402 +++++++++--------- .../metadata/generated_metrics_test.go | 144 ++++--- .../internal/metadata/testdata/config.yaml | 40 +- receiver/kafkametricsreceiver/metadata.yaml | 54 ++- 8 files changed, 416 insertions(+), 396 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index e4e9779c1b14..6fe3f958903d 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -29,11 +29,11 @@ type brokerScraper struct { type saramaMetrics map[string]map[string]interface{} // saramaMetrics is a map of metric name to tags var nrMetricsPrefix = [...]string{ - "consumer-fetch-rate-for-broker-", + "consumer-fetch-count-for-broker-", "incoming-byte-rate-for-broker-", "outgoing-byte-rate-for-broker-", - "request-rate-for-broker-", - "response-rate-for-broker-", + "request-count-for-broker-", + "response-count-for-broker-", "response-size-for-broker-", "request-size-for-broker-", "requests-in-flight-for-broker-", @@ -61,41 +61,41 @@ func (s *brokerScraper) scrapeMetric(now pcommon.Timestamp, allMetrics saramaMet if metric, ok := allMetrics[key]; ok { switch prefix { - case "consumer-fetch-rate-for-broker-": - if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersConsumerFetchRateDataPoint(now, v, brokerID) + case "consumer-fetch-count-for-broker-": + if v, ok := metric["mean"].(float64); ok { + s.mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(now, v, brokerID) } case "incoming-byte-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersIncomingByteRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerIncomingByteRateDataPoint(now, v, brokerID) } case "outgoing-byte-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersOutgoingByteRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(now, v, brokerID) } - case "request-rate-for-broker-": - if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersRequestRateDataPoint(now, v, brokerID) + case "request-count-for-broker-": + if v, ok := metric["mean"].(float64); ok { + s.mb.RecordMessagingKafkaBrokerRequestCountDataPoint(now, v, brokerID) } - case "response-rate-for-broker-": - if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersResponseRateDataPoint(now, v, brokerID) + case "response-count-for-broker-": + if v, ok := metric["mean"].(float64); ok { + s.mb.RecordMessagingKafkaBrokerResponseCountDataPoint(now, v, brokerID) } case "response-size-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersResponseSizeDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerResponseSizeDataPoint(now, v, brokerID) } case "request-size-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersRequestSizeDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerRequestSizeDataPoint(now, v, brokerID) } case "requests-in-flight-for-broker-": if v, ok := metric["count"].(int64); ok { - s.mb.RecordMessagingKafkaBrokersRequestsInFlightDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerRequestsInFlightDataPoint(now, v, brokerID) } case "request-latency-in-ms-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersRequestLatencyDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerRequestLatencyDataPoint(now, v, brokerID) } default: fmt.Printf("undefined for prefix %s\n", prefix) @@ -132,8 +132,8 @@ func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { // kafka.brokers is deprecated. This should be removed in a future release. s.mb.RecordKafkaBrokersDataPoint(now, brokerCount) - // messaging.kafka.brokers.count should replace kafka.brokers. - s.mb.RecordMessagingKafkaBrokersCountDataPoint(now, brokerCount) + // messaging.kafka.broker.count should replace kafka.brokers. + s.mb.RecordMessagingKafkaBrokerCountDataPoint(now, brokerCount) return s.mb.Emit(), nil } diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index bc3607c21ee1..a0d561f70be8 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -180,13 +180,13 @@ metrics: enabled: true ``` -### messaging.kafka.brokers.consumer_fetch_rate +### messaging.kafka.broker.consumer_fetch_count -Average consumer fetch Rate +Count of consumer fetches -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {fetches}/s | Gauge | Double | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {fetches} | Sum | Double | Cumulative | false | #### Attributes @@ -194,7 +194,7 @@ Average consumer fetch Rate | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.count +### messaging.kafka.broker.count Number of brokers in the cluster. @@ -202,9 +202,9 @@ Number of brokers in the cluster. | ---- | ----------- | ---------- | ----------------------- | --------- | | {brokers} | Sum | Int | Cumulative | false | -### messaging.kafka.brokers.incoming_byte_rate +### messaging.kafka.broker.incoming_byte_rate -Average tncoming Byte Rate in bytes/second +Average Bytes received per second | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -216,9 +216,9 @@ Average tncoming Byte Rate in bytes/second | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.outgoing_byte_rate +### messaging.kafka.broker.outgoing_byte_rate -Average outgoing Byte Rate in bytes/second. +Average Bytes sent per second | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -230,13 +230,13 @@ Average outgoing Byte Rate in bytes/second. | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.request_latency +### messaging.kafka.broker.request_count -Average request latency in ms +Number of requests in the broker -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| ms | Gauge | Double | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {requests} | Sum | Double | Cumulative | false | #### Attributes @@ -244,13 +244,13 @@ Average request latency in ms | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.request_rate +### messaging.kafka.broker.request_latency -Average request rate per second. +Average request latency in ms | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {requests}/s | Gauge | Double | +| ms | Gauge | Double | #### Attributes @@ -258,7 +258,7 @@ Average request rate per second. | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.request_size +### messaging.kafka.broker.request_size Average request size in bytes @@ -272,7 +272,7 @@ Average request size in bytes | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.requests_in_flight +### messaging.kafka.broker.requests_in_flight Requests in flight @@ -286,13 +286,13 @@ Requests in flight | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.response_rate +### messaging.kafka.broker.response_count -Average response rate per second +Number of responses from the broker -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {response}/s | Gauge | Double | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {response} | Sum | Double | Cumulative | false | #### Attributes @@ -300,7 +300,7 @@ Average response rate per second | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.brokers.response_size +### messaging.kafka.broker.response_size Average response size in bytes diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index e679464972f0..913deee20fcc 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -36,16 +36,16 @@ type MetricsConfig struct { KafkaPartitionReplicas MetricConfig `mapstructure:"kafka.partition.replicas"` KafkaPartitionReplicasInSync MetricConfig `mapstructure:"kafka.partition.replicas_in_sync"` KafkaTopicPartitions MetricConfig `mapstructure:"kafka.topic.partitions"` - MessagingKafkaBrokersConsumerFetchRate MetricConfig `mapstructure:"messaging.kafka.brokers.consumer_fetch_rate"` - MessagingKafkaBrokersCount MetricConfig `mapstructure:"messaging.kafka.brokers.count"` - MessagingKafkaBrokersIncomingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.incoming_byte_rate"` - MessagingKafkaBrokersOutgoingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.outgoing_byte_rate"` - MessagingKafkaBrokersRequestLatency MetricConfig `mapstructure:"messaging.kafka.brokers.request_latency"` - MessagingKafkaBrokersRequestRate MetricConfig `mapstructure:"messaging.kafka.brokers.request_rate"` - MessagingKafkaBrokersRequestSize MetricConfig `mapstructure:"messaging.kafka.brokers.request_size"` - MessagingKafkaBrokersRequestsInFlight MetricConfig `mapstructure:"messaging.kafka.brokers.requests_in_flight"` - MessagingKafkaBrokersResponseRate MetricConfig `mapstructure:"messaging.kafka.brokers.response_rate"` - MessagingKafkaBrokersResponseSize MetricConfig `mapstructure:"messaging.kafka.brokers.response_size"` + MessagingKafkaBrokerConsumerFetchCount MetricConfig `mapstructure:"messaging.kafka.broker.consumer_fetch_count"` + MessagingKafkaBrokerCount MetricConfig `mapstructure:"messaging.kafka.broker.count"` + MessagingKafkaBrokerIncomingByteRate MetricConfig `mapstructure:"messaging.kafka.broker.incoming_byte_rate"` + MessagingKafkaBrokerOutgoingByteRate MetricConfig `mapstructure:"messaging.kafka.broker.outgoing_byte_rate"` + MessagingKafkaBrokerRequestCount MetricConfig `mapstructure:"messaging.kafka.broker.request_count"` + MessagingKafkaBrokerRequestLatency MetricConfig `mapstructure:"messaging.kafka.broker.request_latency"` + MessagingKafkaBrokerRequestSize MetricConfig `mapstructure:"messaging.kafka.broker.request_size"` + MessagingKafkaBrokerRequestsInFlight MetricConfig `mapstructure:"messaging.kafka.broker.requests_in_flight"` + MessagingKafkaBrokerResponseCount MetricConfig `mapstructure:"messaging.kafka.broker.response_count"` + MessagingKafkaBrokerResponseSize MetricConfig `mapstructure:"messaging.kafka.broker.response_size"` } func DefaultMetricsConfig() MetricsConfig { @@ -83,34 +83,34 @@ func DefaultMetricsConfig() MetricsConfig { KafkaTopicPartitions: MetricConfig{ Enabled: true, }, - MessagingKafkaBrokersConsumerFetchRate: MetricConfig{ + MessagingKafkaBrokerConsumerFetchCount: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersCount: MetricConfig{ + MessagingKafkaBrokerCount: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersIncomingByteRate: MetricConfig{ + MessagingKafkaBrokerIncomingByteRate: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersOutgoingByteRate: MetricConfig{ + MessagingKafkaBrokerOutgoingByteRate: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersRequestLatency: MetricConfig{ + MessagingKafkaBrokerRequestCount: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersRequestRate: MetricConfig{ + MessagingKafkaBrokerRequestLatency: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersRequestSize: MetricConfig{ + MessagingKafkaBrokerRequestSize: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersRequestsInFlight: MetricConfig{ + MessagingKafkaBrokerRequestsInFlight: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersResponseRate: MetricConfig{ + MessagingKafkaBrokerResponseCount: MetricConfig{ Enabled: false, }, - MessagingKafkaBrokersResponseSize: MetricConfig{ + MessagingKafkaBrokerResponseSize: MetricConfig{ Enabled: false, }, } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go index b43b3d6480fd..2bf4063f40b3 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go @@ -37,16 +37,16 @@ func TestMetricsBuilderConfig(t *testing.T) { KafkaPartitionReplicas: MetricConfig{Enabled: true}, KafkaPartitionReplicasInSync: MetricConfig{Enabled: true}, KafkaTopicPartitions: MetricConfig{Enabled: true}, - MessagingKafkaBrokersConsumerFetchRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokersCount: MetricConfig{Enabled: true}, - MessagingKafkaBrokersIncomingByteRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokersOutgoingByteRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokersRequestLatency: MetricConfig{Enabled: true}, - MessagingKafkaBrokersRequestRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokersRequestSize: MetricConfig{Enabled: true}, - MessagingKafkaBrokersRequestsInFlight: MetricConfig{Enabled: true}, - MessagingKafkaBrokersResponseRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokersResponseSize: MetricConfig{Enabled: true}, + MessagingKafkaBrokerConsumerFetchCount: MetricConfig{Enabled: true}, + MessagingKafkaBrokerCount: MetricConfig{Enabled: true}, + MessagingKafkaBrokerIncomingByteRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokerOutgoingByteRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestCount: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestLatency: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestSize: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestsInFlight: MetricConfig{Enabled: true}, + MessagingKafkaBrokerResponseCount: MetricConfig{Enabled: true}, + MessagingKafkaBrokerResponseSize: MetricConfig{Enabled: true}, }, }, }, @@ -65,16 +65,16 @@ func TestMetricsBuilderConfig(t *testing.T) { KafkaPartitionReplicas: MetricConfig{Enabled: false}, KafkaPartitionReplicasInSync: MetricConfig{Enabled: false}, KafkaTopicPartitions: MetricConfig{Enabled: false}, - MessagingKafkaBrokersConsumerFetchRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokersCount: MetricConfig{Enabled: false}, - MessagingKafkaBrokersIncomingByteRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokersOutgoingByteRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokersRequestLatency: MetricConfig{Enabled: false}, - MessagingKafkaBrokersRequestRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokersRequestSize: MetricConfig{Enabled: false}, - MessagingKafkaBrokersRequestsInFlight: MetricConfig{Enabled: false}, - MessagingKafkaBrokersResponseRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokersResponseSize: MetricConfig{Enabled: false}, + MessagingKafkaBrokerConsumerFetchCount: MetricConfig{Enabled: false}, + MessagingKafkaBrokerCount: MetricConfig{Enabled: false}, + MessagingKafkaBrokerIncomingByteRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokerOutgoingByteRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestCount: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestLatency: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestSize: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestsInFlight: MetricConfig{Enabled: false}, + MessagingKafkaBrokerResponseCount: MetricConfig{Enabled: false}, + MessagingKafkaBrokerResponseSize: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 905b121e60dd..2478f4caff1a 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -588,26 +588,28 @@ func newMetricKafkaTopicPartitions(cfg MetricConfig) metricKafkaTopicPartitions return m } -type metricMessagingKafkaBrokersConsumerFetchRate struct { +type metricMessagingKafkaBrokerConsumerFetchCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.consumer_fetch_rate metric with initial data. -func (m *metricMessagingKafkaBrokersConsumerFetchRate) init() { - m.data.SetName("messaging.kafka.brokers.consumer_fetch_rate") - m.data.SetDescription("Average consumer fetch Rate") - m.data.SetUnit("{fetches}/s") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.broker.consumer_fetch_count metric with initial data. +func (m *metricMessagingKafkaBrokerConsumerFetchCount) init() { + m.data.SetName("messaging.kafka.broker.consumer_fetch_count") + m.data.SetDescription("Count of consumer fetches") + m.data.SetUnit("{fetches}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersConsumerFetchRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerConsumerFetchCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) @@ -615,23 +617,23 @@ func (m *metricMessagingKafkaBrokersConsumerFetchRate) recordDataPoint(start pco } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersConsumerFetchRate) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricMessagingKafkaBrokerConsumerFetchCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersConsumerFetchRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokerConsumerFetchCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricMessagingKafkaBrokersConsumerFetchRate(cfg MetricConfig) metricMessagingKafkaBrokersConsumerFetchRate { - m := metricMessagingKafkaBrokersConsumerFetchRate{config: cfg} +func newMetricMessagingKafkaBrokerConsumerFetchCount(cfg MetricConfig) metricMessagingKafkaBrokerConsumerFetchCount { + m := metricMessagingKafkaBrokerConsumerFetchCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -639,15 +641,15 @@ func newMetricMessagingKafkaBrokersConsumerFetchRate(cfg MetricConfig) metricMes return m } -type metricMessagingKafkaBrokersCount struct { +type metricMessagingKafkaBrokerCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.count metric with initial data. -func (m *metricMessagingKafkaBrokersCount) init() { - m.data.SetName("messaging.kafka.brokers.count") +// init fills messaging.kafka.broker.count metric with initial data. +func (m *metricMessagingKafkaBrokerCount) init() { + m.data.SetName("messaging.kafka.broker.count") m.data.SetDescription("Number of brokers in the cluster.") m.data.SetUnit("{brokers}") m.data.SetEmptySum() @@ -655,7 +657,7 @@ func (m *metricMessagingKafkaBrokersCount) init() { m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } -func (m *metricMessagingKafkaBrokersCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { +func (m *metricMessagingKafkaBrokerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } @@ -666,14 +668,14 @@ func (m *metricMessagingKafkaBrokersCount) recordDataPoint(start pcommon.Timesta } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersCount) updateCapacity() { +func (m *metricMessagingKafkaBrokerCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersCount) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokerCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -681,8 +683,8 @@ func (m *metricMessagingKafkaBrokersCount) emit(metrics pmetric.MetricSlice) { } } -func newMetricMessagingKafkaBrokersCount(cfg MetricConfig) metricMessagingKafkaBrokersCount { - m := metricMessagingKafkaBrokersCount{config: cfg} +func newMetricMessagingKafkaBrokerCount(cfg MetricConfig) metricMessagingKafkaBrokerCount { + m := metricMessagingKafkaBrokerCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -690,22 +692,22 @@ func newMetricMessagingKafkaBrokersCount(cfg MetricConfig) metricMessagingKafkaB return m } -type metricMessagingKafkaBrokersIncomingByteRate struct { +type metricMessagingKafkaBrokerIncomingByteRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.incoming_byte_rate metric with initial data. -func (m *metricMessagingKafkaBrokersIncomingByteRate) init() { - m.data.SetName("messaging.kafka.brokers.incoming_byte_rate") - m.data.SetDescription("Average tncoming Byte Rate in bytes/second") +// init fills messaging.kafka.broker.incoming_byte_rate metric with initial data. +func (m *metricMessagingKafkaBrokerIncomingByteRate) init() { + m.data.SetName("messaging.kafka.broker.incoming_byte_rate") + m.data.SetDescription("Average Bytes received per second") m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersIncomingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerIncomingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } @@ -717,14 +719,14 @@ func (m *metricMessagingKafkaBrokersIncomingByteRate) recordDataPoint(start pcom } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersIncomingByteRate) updateCapacity() { +func (m *metricMessagingKafkaBrokerIncomingByteRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersIncomingByteRate) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokerIncomingByteRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -732,8 +734,8 @@ func (m *metricMessagingKafkaBrokersIncomingByteRate) emit(metrics pmetric.Metri } } -func newMetricMessagingKafkaBrokersIncomingByteRate(cfg MetricConfig) metricMessagingKafkaBrokersIncomingByteRate { - m := metricMessagingKafkaBrokersIncomingByteRate{config: cfg} +func newMetricMessagingKafkaBrokerIncomingByteRate(cfg MetricConfig) metricMessagingKafkaBrokerIncomingByteRate { + m := metricMessagingKafkaBrokerIncomingByteRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -741,22 +743,22 @@ func newMetricMessagingKafkaBrokersIncomingByteRate(cfg MetricConfig) metricMess return m } -type metricMessagingKafkaBrokersOutgoingByteRate struct { +type metricMessagingKafkaBrokerOutgoingByteRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.outgoing_byte_rate metric with initial data. -func (m *metricMessagingKafkaBrokersOutgoingByteRate) init() { - m.data.SetName("messaging.kafka.brokers.outgoing_byte_rate") - m.data.SetDescription("Average outgoing Byte Rate in bytes/second.") +// init fills messaging.kafka.broker.outgoing_byte_rate metric with initial data. +func (m *metricMessagingKafkaBrokerOutgoingByteRate) init() { + m.data.SetName("messaging.kafka.broker.outgoing_byte_rate") + m.data.SetDescription("Average Bytes sent per second") m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersOutgoingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerOutgoingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } @@ -768,14 +770,14 @@ func (m *metricMessagingKafkaBrokersOutgoingByteRate) recordDataPoint(start pcom } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersOutgoingByteRate) updateCapacity() { +func (m *metricMessagingKafkaBrokerOutgoingByteRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersOutgoingByteRate) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokerOutgoingByteRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -783,8 +785,8 @@ func (m *metricMessagingKafkaBrokersOutgoingByteRate) emit(metrics pmetric.Metri } } -func newMetricMessagingKafkaBrokersOutgoingByteRate(cfg MetricConfig) metricMessagingKafkaBrokersOutgoingByteRate { - m := metricMessagingKafkaBrokersOutgoingByteRate{config: cfg} +func newMetricMessagingKafkaBrokerOutgoingByteRate(cfg MetricConfig) metricMessagingKafkaBrokerOutgoingByteRate { + m := metricMessagingKafkaBrokerOutgoingByteRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -792,26 +794,28 @@ func newMetricMessagingKafkaBrokersOutgoingByteRate(cfg MetricConfig) metricMess return m } -type metricMessagingKafkaBrokersRequestLatency struct { +type metricMessagingKafkaBrokerRequestCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.request_latency metric with initial data. -func (m *metricMessagingKafkaBrokersRequestLatency) init() { - m.data.SetName("messaging.kafka.brokers.request_latency") - m.data.SetDescription("Average request latency in ms") - m.data.SetUnit("ms") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.broker.request_count metric with initial data. +func (m *metricMessagingKafkaBrokerRequestCount) init() { + m.data.SetName("messaging.kafka.broker.request_count") + m.data.SetDescription("Number of requests in the broker") + m.data.SetUnit("{requests}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersRequestLatency) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) @@ -819,23 +823,23 @@ func (m *metricMessagingKafkaBrokersRequestLatency) recordDataPoint(start pcommo } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersRequestLatency) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricMessagingKafkaBrokerRequestCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersRequestLatency) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokerRequestCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricMessagingKafkaBrokersRequestLatency(cfg MetricConfig) metricMessagingKafkaBrokersRequestLatency { - m := metricMessagingKafkaBrokersRequestLatency{config: cfg} +func newMetricMessagingKafkaBrokerRequestCount(cfg MetricConfig) metricMessagingKafkaBrokerRequestCount { + m := metricMessagingKafkaBrokerRequestCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -843,22 +847,22 @@ func newMetricMessagingKafkaBrokersRequestLatency(cfg MetricConfig) metricMessag return m } -type metricMessagingKafkaBrokersRequestRate struct { +type metricMessagingKafkaBrokerRequestLatency struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.request_rate metric with initial data. -func (m *metricMessagingKafkaBrokersRequestRate) init() { - m.data.SetName("messaging.kafka.brokers.request_rate") - m.data.SetDescription("Average request rate per second.") - m.data.SetUnit("{requests}/s") +// init fills messaging.kafka.broker.request_latency metric with initial data. +func (m *metricMessagingKafkaBrokerRequestLatency) init() { + m.data.SetName("messaging.kafka.broker.request_latency") + m.data.SetDescription("Average request latency in ms") + m.data.SetUnit("ms") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersRequestRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerRequestLatency) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } @@ -870,14 +874,14 @@ func (m *metricMessagingKafkaBrokersRequestRate) recordDataPoint(start pcommon.T } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersRequestRate) updateCapacity() { +func (m *metricMessagingKafkaBrokerRequestLatency) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersRequestRate) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokerRequestLatency) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -885,8 +889,8 @@ func (m *metricMessagingKafkaBrokersRequestRate) emit(metrics pmetric.MetricSlic } } -func newMetricMessagingKafkaBrokersRequestRate(cfg MetricConfig) metricMessagingKafkaBrokersRequestRate { - m := metricMessagingKafkaBrokersRequestRate{config: cfg} +func newMetricMessagingKafkaBrokerRequestLatency(cfg MetricConfig) metricMessagingKafkaBrokerRequestLatency { + m := metricMessagingKafkaBrokerRequestLatency{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -894,22 +898,22 @@ func newMetricMessagingKafkaBrokersRequestRate(cfg MetricConfig) metricMessaging return m } -type metricMessagingKafkaBrokersRequestSize struct { +type metricMessagingKafkaBrokerRequestSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.request_size metric with initial data. -func (m *metricMessagingKafkaBrokersRequestSize) init() { - m.data.SetName("messaging.kafka.brokers.request_size") +// init fills messaging.kafka.broker.request_size metric with initial data. +func (m *metricMessagingKafkaBrokerRequestSize) init() { + m.data.SetName("messaging.kafka.broker.request_size") m.data.SetDescription("Average request size in bytes") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersRequestSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerRequestSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } @@ -921,14 +925,14 @@ func (m *metricMessagingKafkaBrokersRequestSize) recordDataPoint(start pcommon.T } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersRequestSize) updateCapacity() { +func (m *metricMessagingKafkaBrokerRequestSize) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersRequestSize) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokerRequestSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -936,8 +940,8 @@ func (m *metricMessagingKafkaBrokersRequestSize) emit(metrics pmetric.MetricSlic } } -func newMetricMessagingKafkaBrokersRequestSize(cfg MetricConfig) metricMessagingKafkaBrokersRequestSize { - m := metricMessagingKafkaBrokersRequestSize{config: cfg} +func newMetricMessagingKafkaBrokerRequestSize(cfg MetricConfig) metricMessagingKafkaBrokerRequestSize { + m := metricMessagingKafkaBrokerRequestSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -945,22 +949,22 @@ func newMetricMessagingKafkaBrokersRequestSize(cfg MetricConfig) metricMessaging return m } -type metricMessagingKafkaBrokersRequestsInFlight struct { +type metricMessagingKafkaBrokerRequestsInFlight struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.requests_in_flight metric with initial data. -func (m *metricMessagingKafkaBrokersRequestsInFlight) init() { - m.data.SetName("messaging.kafka.brokers.requests_in_flight") +// init fills messaging.kafka.broker.requests_in_flight metric with initial data. +func (m *metricMessagingKafkaBrokerRequestsInFlight) init() { + m.data.SetName("messaging.kafka.broker.requests_in_flight") m.data.SetDescription("Requests in flight") m.data.SetUnit("{requests}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersRequestsInFlight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerRequestsInFlight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { if !m.config.Enabled { return } @@ -972,14 +976,14 @@ func (m *metricMessagingKafkaBrokersRequestsInFlight) recordDataPoint(start pcom } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersRequestsInFlight) updateCapacity() { +func (m *metricMessagingKafkaBrokerRequestsInFlight) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersRequestsInFlight) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokerRequestsInFlight) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -987,8 +991,8 @@ func (m *metricMessagingKafkaBrokersRequestsInFlight) emit(metrics pmetric.Metri } } -func newMetricMessagingKafkaBrokersRequestsInFlight(cfg MetricConfig) metricMessagingKafkaBrokersRequestsInFlight { - m := metricMessagingKafkaBrokersRequestsInFlight{config: cfg} +func newMetricMessagingKafkaBrokerRequestsInFlight(cfg MetricConfig) metricMessagingKafkaBrokerRequestsInFlight { + m := metricMessagingKafkaBrokerRequestsInFlight{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -996,26 +1000,28 @@ func newMetricMessagingKafkaBrokersRequestsInFlight(cfg MetricConfig) metricMess return m } -type metricMessagingKafkaBrokersResponseRate struct { +type metricMessagingKafkaBrokerResponseCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.response_rate metric with initial data. -func (m *metricMessagingKafkaBrokersResponseRate) init() { - m.data.SetName("messaging.kafka.brokers.response_rate") - m.data.SetDescription("Average response rate per second") - m.data.SetUnit("{response}/s") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +// init fills messaging.kafka.broker.response_count metric with initial data. +func (m *metricMessagingKafkaBrokerResponseCount) init() { + m.data.SetName("messaging.kafka.broker.response_count") + m.data.SetDescription("Number of responses from the broker") + m.data.SetUnit("{response}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersResponseRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerResponseCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) @@ -1023,23 +1029,23 @@ func (m *metricMessagingKafkaBrokersResponseRate) recordDataPoint(start pcommon. } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersResponseRate) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() +func (m *metricMessagingKafkaBrokerResponseCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersResponseRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { +func (m *metricMessagingKafkaBrokerResponseCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } -func newMetricMessagingKafkaBrokersResponseRate(cfg MetricConfig) metricMessagingKafkaBrokersResponseRate { - m := metricMessagingKafkaBrokersResponseRate{config: cfg} +func newMetricMessagingKafkaBrokerResponseCount(cfg MetricConfig) metricMessagingKafkaBrokerResponseCount { + m := metricMessagingKafkaBrokerResponseCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -1047,22 +1053,22 @@ func newMetricMessagingKafkaBrokersResponseRate(cfg MetricConfig) metricMessagin return m } -type metricMessagingKafkaBrokersResponseSize struct { +type metricMessagingKafkaBrokerResponseSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills messaging.kafka.brokers.response_size metric with initial data. -func (m *metricMessagingKafkaBrokersResponseSize) init() { - m.data.SetName("messaging.kafka.brokers.response_size") +// init fills messaging.kafka.broker.response_size metric with initial data. +func (m *metricMessagingKafkaBrokerResponseSize) init() { + m.data.SetName("messaging.kafka.broker.response_size") m.data.SetDescription("Average response size in bytes") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokersResponseSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerResponseSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return } @@ -1074,14 +1080,14 @@ func (m *metricMessagingKafkaBrokersResponseSize) recordDataPoint(start pcommon. } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMessagingKafkaBrokersResponseSize) updateCapacity() { +func (m *metricMessagingKafkaBrokerResponseSize) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMessagingKafkaBrokersResponseSize) emit(metrics pmetric.MetricSlice) { +func (m *metricMessagingKafkaBrokerResponseSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1089,8 +1095,8 @@ func (m *metricMessagingKafkaBrokersResponseSize) emit(metrics pmetric.MetricSli } } -func newMetricMessagingKafkaBrokersResponseSize(cfg MetricConfig) metricMessagingKafkaBrokersResponseSize { - m := metricMessagingKafkaBrokersResponseSize{config: cfg} +func newMetricMessagingKafkaBrokerResponseSize(cfg MetricConfig) metricMessagingKafkaBrokerResponseSize { + m := metricMessagingKafkaBrokerResponseSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -1117,16 +1123,16 @@ type MetricsBuilder struct { metricKafkaPartitionReplicas metricKafkaPartitionReplicas metricKafkaPartitionReplicasInSync metricKafkaPartitionReplicasInSync metricKafkaTopicPartitions metricKafkaTopicPartitions - metricMessagingKafkaBrokersConsumerFetchRate metricMessagingKafkaBrokersConsumerFetchRate - metricMessagingKafkaBrokersCount metricMessagingKafkaBrokersCount - metricMessagingKafkaBrokersIncomingByteRate metricMessagingKafkaBrokersIncomingByteRate - metricMessagingKafkaBrokersOutgoingByteRate metricMessagingKafkaBrokersOutgoingByteRate - metricMessagingKafkaBrokersRequestLatency metricMessagingKafkaBrokersRequestLatency - metricMessagingKafkaBrokersRequestRate metricMessagingKafkaBrokersRequestRate - metricMessagingKafkaBrokersRequestSize metricMessagingKafkaBrokersRequestSize - metricMessagingKafkaBrokersRequestsInFlight metricMessagingKafkaBrokersRequestsInFlight - metricMessagingKafkaBrokersResponseRate metricMessagingKafkaBrokersResponseRate - metricMessagingKafkaBrokersResponseSize metricMessagingKafkaBrokersResponseSize + metricMessagingKafkaBrokerConsumerFetchCount metricMessagingKafkaBrokerConsumerFetchCount + metricMessagingKafkaBrokerCount metricMessagingKafkaBrokerCount + metricMessagingKafkaBrokerIncomingByteRate metricMessagingKafkaBrokerIncomingByteRate + metricMessagingKafkaBrokerOutgoingByteRate metricMessagingKafkaBrokerOutgoingByteRate + metricMessagingKafkaBrokerRequestCount metricMessagingKafkaBrokerRequestCount + metricMessagingKafkaBrokerRequestLatency metricMessagingKafkaBrokerRequestLatency + metricMessagingKafkaBrokerRequestSize metricMessagingKafkaBrokerRequestSize + metricMessagingKafkaBrokerRequestsInFlight metricMessagingKafkaBrokerRequestsInFlight + metricMessagingKafkaBrokerResponseCount metricMessagingKafkaBrokerResponseCount + metricMessagingKafkaBrokerResponseSize metricMessagingKafkaBrokerResponseSize } // metricBuilderOption applies changes to default metrics builder. @@ -1143,35 +1149,35 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting if mbc.Metrics.KafkaBrokers.Enabled { settings.Logger.Warn("[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.brokers.count`") } - if !mbc.Metrics.MessagingKafkaBrokersConsumerFetchRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerConsumerFetchCount.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersCount.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.count`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerCount.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.count`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersIncomingByteRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerIncomingByteRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.incoming_byte_rate`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersOutgoingByteRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerOutgoingByteRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.outgoing_byte_rate`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersRequestLatency.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerRequestCount.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.request_count`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersRequestRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerRequestLatency.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.request_latency`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersRequestSize.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_size`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerRequestSize.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.request_size`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersRequestsInFlight.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerRequestsInFlight.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.requests_in_flight`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersResponseRate.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerResponseCount.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.response_count`: This metric will be enabled by default in the next versions.") } - if !mbc.Metrics.MessagingKafkaBrokersResponseSize.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_size`: This metric will be enabled by default in the next versions.") + if !mbc.Metrics.MessagingKafkaBrokerResponseSize.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.response_size`: This metric will be enabled by default in the next versions.") } mb := &MetricsBuilder{ config: mbc, @@ -1189,16 +1195,16 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricKafkaPartitionReplicas: newMetricKafkaPartitionReplicas(mbc.Metrics.KafkaPartitionReplicas), metricKafkaPartitionReplicasInSync: newMetricKafkaPartitionReplicasInSync(mbc.Metrics.KafkaPartitionReplicasInSync), metricKafkaTopicPartitions: newMetricKafkaTopicPartitions(mbc.Metrics.KafkaTopicPartitions), - metricMessagingKafkaBrokersConsumerFetchRate: newMetricMessagingKafkaBrokersConsumerFetchRate(mbc.Metrics.MessagingKafkaBrokersConsumerFetchRate), - metricMessagingKafkaBrokersCount: newMetricMessagingKafkaBrokersCount(mbc.Metrics.MessagingKafkaBrokersCount), - metricMessagingKafkaBrokersIncomingByteRate: newMetricMessagingKafkaBrokersIncomingByteRate(mbc.Metrics.MessagingKafkaBrokersIncomingByteRate), - metricMessagingKafkaBrokersOutgoingByteRate: newMetricMessagingKafkaBrokersOutgoingByteRate(mbc.Metrics.MessagingKafkaBrokersOutgoingByteRate), - metricMessagingKafkaBrokersRequestLatency: newMetricMessagingKafkaBrokersRequestLatency(mbc.Metrics.MessagingKafkaBrokersRequestLatency), - metricMessagingKafkaBrokersRequestRate: newMetricMessagingKafkaBrokersRequestRate(mbc.Metrics.MessagingKafkaBrokersRequestRate), - metricMessagingKafkaBrokersRequestSize: newMetricMessagingKafkaBrokersRequestSize(mbc.Metrics.MessagingKafkaBrokersRequestSize), - metricMessagingKafkaBrokersRequestsInFlight: newMetricMessagingKafkaBrokersRequestsInFlight(mbc.Metrics.MessagingKafkaBrokersRequestsInFlight), - metricMessagingKafkaBrokersResponseRate: newMetricMessagingKafkaBrokersResponseRate(mbc.Metrics.MessagingKafkaBrokersResponseRate), - metricMessagingKafkaBrokersResponseSize: newMetricMessagingKafkaBrokersResponseSize(mbc.Metrics.MessagingKafkaBrokersResponseSize), + metricMessagingKafkaBrokerConsumerFetchCount: newMetricMessagingKafkaBrokerConsumerFetchCount(mbc.Metrics.MessagingKafkaBrokerConsumerFetchCount), + metricMessagingKafkaBrokerCount: newMetricMessagingKafkaBrokerCount(mbc.Metrics.MessagingKafkaBrokerCount), + metricMessagingKafkaBrokerIncomingByteRate: newMetricMessagingKafkaBrokerIncomingByteRate(mbc.Metrics.MessagingKafkaBrokerIncomingByteRate), + metricMessagingKafkaBrokerOutgoingByteRate: newMetricMessagingKafkaBrokerOutgoingByteRate(mbc.Metrics.MessagingKafkaBrokerOutgoingByteRate), + metricMessagingKafkaBrokerRequestCount: newMetricMessagingKafkaBrokerRequestCount(mbc.Metrics.MessagingKafkaBrokerRequestCount), + metricMessagingKafkaBrokerRequestLatency: newMetricMessagingKafkaBrokerRequestLatency(mbc.Metrics.MessagingKafkaBrokerRequestLatency), + metricMessagingKafkaBrokerRequestSize: newMetricMessagingKafkaBrokerRequestSize(mbc.Metrics.MessagingKafkaBrokerRequestSize), + metricMessagingKafkaBrokerRequestsInFlight: newMetricMessagingKafkaBrokerRequestsInFlight(mbc.Metrics.MessagingKafkaBrokerRequestsInFlight), + metricMessagingKafkaBrokerResponseCount: newMetricMessagingKafkaBrokerResponseCount(mbc.Metrics.MessagingKafkaBrokerResponseCount), + metricMessagingKafkaBrokerResponseSize: newMetricMessagingKafkaBrokerResponseSize(mbc.Metrics.MessagingKafkaBrokerResponseSize), } for _, op := range options { op(mb) @@ -1266,16 +1272,16 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricKafkaPartitionReplicas.emit(ils.Metrics()) mb.metricKafkaPartitionReplicasInSync.emit(ils.Metrics()) mb.metricKafkaTopicPartitions.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersConsumerFetchRate.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersCount.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersIncomingByteRate.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersOutgoingByteRate.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersRequestLatency.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersRequestRate.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersRequestSize.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersRequestsInFlight.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersResponseRate.emit(ils.Metrics()) - mb.metricMessagingKafkaBrokersResponseSize.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerConsumerFetchCount.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerCount.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerIncomingByteRate.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerOutgoingByteRate.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerRequestCount.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerRequestLatency.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerRequestSize.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerRequestsInFlight.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerResponseCount.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerResponseSize.emit(ils.Metrics()) for _, op := range rmo { op(rm) @@ -1351,54 +1357,54 @@ func (mb *MetricsBuilder) RecordKafkaTopicPartitionsDataPoint(ts pcommon.Timesta mb.metricKafkaTopicPartitions.recordDataPoint(mb.startTime, ts, val, topicAttributeValue) } -// RecordMessagingKafkaBrokersConsumerFetchRateDataPoint adds a data point to messaging.kafka.brokers.consumer_fetch_rate metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersConsumerFetchRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersConsumerFetchRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerConsumerFetchCountDataPoint adds a data point to messaging.kafka.broker.consumer_fetch_count metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerConsumerFetchCount.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersCountDataPoint adds a data point to messaging.kafka.brokers.count metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersCountDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricMessagingKafkaBrokersCount.recordDataPoint(mb.startTime, ts, val) +// RecordMessagingKafkaBrokerCountDataPoint adds a data point to messaging.kafka.broker.count metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerCountDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMessagingKafkaBrokerCount.recordDataPoint(mb.startTime, ts, val) } -// RecordMessagingKafkaBrokersIncomingByteRateDataPoint adds a data point to messaging.kafka.brokers.incoming_byte_rate metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersIncomingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersIncomingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerIncomingByteRateDataPoint adds a data point to messaging.kafka.broker.incoming_byte_rate metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerIncomingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerIncomingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersOutgoingByteRateDataPoint adds a data point to messaging.kafka.brokers.outgoing_byte_rate metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersOutgoingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersOutgoingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerOutgoingByteRateDataPoint adds a data point to messaging.kafka.broker.outgoing_byte_rate metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerOutgoingByteRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersRequestLatencyDataPoint adds a data point to messaging.kafka.brokers.request_latency metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestLatencyDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersRequestLatency.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerRequestCountDataPoint adds a data point to messaging.kafka.broker.request_count metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerRequestCountDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerRequestCount.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersRequestRateDataPoint adds a data point to messaging.kafka.brokers.request_rate metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersRequestRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerRequestLatencyDataPoint adds a data point to messaging.kafka.broker.request_latency metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerRequestLatencyDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerRequestLatency.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersRequestSizeDataPoint adds a data point to messaging.kafka.brokers.request_size metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersRequestSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerRequestSizeDataPoint adds a data point to messaging.kafka.broker.request_size metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerRequestSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerRequestSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersRequestsInFlightDataPoint adds a data point to messaging.kafka.brokers.requests_in_flight metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersRequestsInFlightDataPoint(ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersRequestsInFlight.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerRequestsInFlightDataPoint adds a data point to messaging.kafka.broker.requests_in_flight metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerRequestsInFlightDataPoint(ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerRequestsInFlight.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersResponseRateDataPoint adds a data point to messaging.kafka.brokers.response_rate metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersResponseRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersResponseRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerResponseCountDataPoint adds a data point to messaging.kafka.broker.response_count metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerResponseCountDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerResponseCount.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } -// RecordMessagingKafkaBrokersResponseSizeDataPoint adds a data point to messaging.kafka.brokers.response_size metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokersResponseSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - mb.metricMessagingKafkaBrokersResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +// RecordMessagingKafkaBrokerResponseSizeDataPoint adds a data point to messaging.kafka.broker.response_size metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerResponseSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 15f908357dc6..358be918eb09 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -54,43 +54,43 @@ func TestMetricsBuilder(t *testing.T) { expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.consumer_fetch_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.incoming_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.incoming_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.outgoing_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.outgoing_byte_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_latency`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.request_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.request_latency`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.request_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.request_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.requests_in_flight`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.requests_in_flight`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.response_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.brokers.response_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.response_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } assert.Equal(t, expectedWarnings, observedLogs.Len()) @@ -143,34 +143,34 @@ func TestMetricsBuilder(t *testing.T) { mb.RecordKafkaTopicPartitionsDataPoint(ts, 1, "topic-val") allMetricsCount++ - mb.RecordMessagingKafkaBrokersConsumerFetchRateDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersCountDataPoint(ts, 1) + mb.RecordMessagingKafkaBrokerCountDataPoint(ts, 1) allMetricsCount++ - mb.RecordMessagingKafkaBrokersIncomingByteRateDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerIncomingByteRateDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersOutgoingByteRateDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersRequestLatencyDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerRequestCountDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersRequestRateDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerRequestLatencyDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersRequestSizeDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerRequestSizeDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersRequestsInFlightDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerRequestsInFlightDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersResponseRateDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerResponseCountDataPoint(ts, 1, 6) allMetricsCount++ - mb.RecordMessagingKafkaBrokersResponseSizeDataPoint(ts, 1, 6) + mb.RecordMessagingKafkaBrokerResponseSizeDataPoint(ts, 1, 6) res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) @@ -394,14 +394,16 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("topic") assert.True(t, ok) assert.EqualValues(t, "topic-val", attrVal.Str()) - case "messaging.kafka.brokers.consumer_fetch_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.consumer_fetch_rate") - validatedMetrics["messaging.kafka.brokers.consumer_fetch_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average consumer fetch Rate", ms.At(i).Description()) - assert.Equal(t, "{fetches}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + case "messaging.kafka.broker.consumer_fetch_count": + assert.False(t, validatedMetrics["messaging.kafka.broker.consumer_fetch_count"], "Found a duplicate in the metrics slice: messaging.kafka.broker.consumer_fetch_count") + validatedMetrics["messaging.kafka.broker.consumer_fetch_count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Count of consumer fetches", ms.At(i).Description()) + assert.Equal(t, "{fetches}", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) @@ -409,9 +411,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.count": - assert.False(t, validatedMetrics["messaging.kafka.brokers.count"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.count") - validatedMetrics["messaging.kafka.brokers.count"] = true + case "messaging.kafka.broker.count": + assert.False(t, validatedMetrics["messaging.kafka.broker.count"], "Found a duplicate in the metrics slice: messaging.kafka.broker.count") + validatedMetrics["messaging.kafka.broker.count"] = true assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of brokers in the cluster.", ms.At(i).Description()) @@ -423,12 +425,12 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "messaging.kafka.brokers.incoming_byte_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.incoming_byte_rate") - validatedMetrics["messaging.kafka.brokers.incoming_byte_rate"] = true + case "messaging.kafka.broker.incoming_byte_rate": + assert.False(t, validatedMetrics["messaging.kafka.broker.incoming_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.broker.incoming_byte_rate") + validatedMetrics["messaging.kafka.broker.incoming_byte_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average tncoming Byte Rate in bytes/second", ms.At(i).Description()) + assert.Equal(t, "Average Bytes received per second", ms.At(i).Description()) assert.Equal(t, "1", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -438,12 +440,12 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.outgoing_byte_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.outgoing_byte_rate") - validatedMetrics["messaging.kafka.brokers.outgoing_byte_rate"] = true + case "messaging.kafka.broker.outgoing_byte_rate": + assert.False(t, validatedMetrics["messaging.kafka.broker.outgoing_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.broker.outgoing_byte_rate") + validatedMetrics["messaging.kafka.broker.outgoing_byte_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average outgoing Byte Rate in bytes/second.", ms.At(i).Description()) + assert.Equal(t, "Average Bytes sent per second", ms.At(i).Description()) assert.Equal(t, "1", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -453,14 +455,16 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.request_latency": - assert.False(t, validatedMetrics["messaging.kafka.brokers.request_latency"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_latency") - validatedMetrics["messaging.kafka.brokers.request_latency"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average request latency in ms", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + case "messaging.kafka.broker.request_count": + assert.False(t, validatedMetrics["messaging.kafka.broker.request_count"], "Found a duplicate in the metrics slice: messaging.kafka.broker.request_count") + validatedMetrics["messaging.kafka.broker.request_count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of requests in the broker", ms.At(i).Description()) + assert.Equal(t, "{requests}", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) @@ -468,13 +472,13 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.request_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.request_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_rate") - validatedMetrics["messaging.kafka.brokers.request_rate"] = true + case "messaging.kafka.broker.request_latency": + assert.False(t, validatedMetrics["messaging.kafka.broker.request_latency"], "Found a duplicate in the metrics slice: messaging.kafka.broker.request_latency") + validatedMetrics["messaging.kafka.broker.request_latency"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average request rate per second.", ms.At(i).Description()) - assert.Equal(t, "{requests}/s", ms.At(i).Unit()) + assert.Equal(t, "Average request latency in ms", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -483,9 +487,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.request_size": - assert.False(t, validatedMetrics["messaging.kafka.brokers.request_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.request_size") - validatedMetrics["messaging.kafka.brokers.request_size"] = true + case "messaging.kafka.broker.request_size": + assert.False(t, validatedMetrics["messaging.kafka.broker.request_size"], "Found a duplicate in the metrics slice: messaging.kafka.broker.request_size") + validatedMetrics["messaging.kafka.broker.request_size"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average request size in bytes", ms.At(i).Description()) @@ -498,9 +502,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.requests_in_flight": - assert.False(t, validatedMetrics["messaging.kafka.brokers.requests_in_flight"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.requests_in_flight") - validatedMetrics["messaging.kafka.brokers.requests_in_flight"] = true + case "messaging.kafka.broker.requests_in_flight": + assert.False(t, validatedMetrics["messaging.kafka.broker.requests_in_flight"], "Found a duplicate in the metrics slice: messaging.kafka.broker.requests_in_flight") + validatedMetrics["messaging.kafka.broker.requests_in_flight"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Requests in flight", ms.At(i).Description()) @@ -513,14 +517,16 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.response_rate": - assert.False(t, validatedMetrics["messaging.kafka.brokers.response_rate"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_rate") - validatedMetrics["messaging.kafka.brokers.response_rate"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average response rate per second", ms.At(i).Description()) - assert.Equal(t, "{response}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + case "messaging.kafka.broker.response_count": + assert.False(t, validatedMetrics["messaging.kafka.broker.response_count"], "Found a duplicate in the metrics slice: messaging.kafka.broker.response_count") + validatedMetrics["messaging.kafka.broker.response_count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from the broker", ms.At(i).Description()) + assert.Equal(t, "{response}", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) @@ -528,9 +534,9 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.brokers.response_size": - assert.False(t, validatedMetrics["messaging.kafka.brokers.response_size"], "Found a duplicate in the metrics slice: messaging.kafka.brokers.response_size") - validatedMetrics["messaging.kafka.brokers.response_size"] = true + case "messaging.kafka.broker.response_size": + assert.False(t, validatedMetrics["messaging.kafka.broker.response_size"], "Found a duplicate in the metrics slice: messaging.kafka.broker.response_size") + validatedMetrics["messaging.kafka.broker.response_size"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average response size in bytes", ms.At(i).Description()) diff --git a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml index 293ab2825f3b..e6dff421481f 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml @@ -23,25 +23,25 @@ all_set: enabled: true kafka.topic.partitions: enabled: true - messaging.kafka.brokers.consumer_fetch_rate: + messaging.kafka.broker.consumer_fetch_count: enabled: true - messaging.kafka.brokers.count: + messaging.kafka.broker.count: enabled: true - messaging.kafka.brokers.incoming_byte_rate: + messaging.kafka.broker.incoming_byte_rate: enabled: true - messaging.kafka.brokers.outgoing_byte_rate: + messaging.kafka.broker.outgoing_byte_rate: enabled: true - messaging.kafka.brokers.request_latency: + messaging.kafka.broker.request_count: enabled: true - messaging.kafka.brokers.request_rate: + messaging.kafka.broker.request_latency: enabled: true - messaging.kafka.brokers.request_size: + messaging.kafka.broker.request_size: enabled: true - messaging.kafka.brokers.requests_in_flight: + messaging.kafka.broker.requests_in_flight: enabled: true - messaging.kafka.brokers.response_rate: + messaging.kafka.broker.response_count: enabled: true - messaging.kafka.brokers.response_size: + messaging.kafka.broker.response_size: enabled: true none_set: metrics: @@ -67,23 +67,23 @@ none_set: enabled: false kafka.topic.partitions: enabled: false - messaging.kafka.brokers.consumer_fetch_rate: + messaging.kafka.broker.consumer_fetch_count: enabled: false - messaging.kafka.brokers.count: + messaging.kafka.broker.count: enabled: false - messaging.kafka.brokers.incoming_byte_rate: + messaging.kafka.broker.incoming_byte_rate: enabled: false - messaging.kafka.brokers.outgoing_byte_rate: + messaging.kafka.broker.outgoing_byte_rate: enabled: false - messaging.kafka.brokers.request_latency: + messaging.kafka.broker.request_count: enabled: false - messaging.kafka.brokers.request_rate: + messaging.kafka.broker.request_latency: enabled: false - messaging.kafka.brokers.request_size: + messaging.kafka.broker.request_size: enabled: false - messaging.kafka.brokers.requests_in_flight: + messaging.kafka.broker.requests_in_flight: enabled: false - messaging.kafka.brokers.response_rate: + messaging.kafka.broker.response_count: enabled: false - messaging.kafka.brokers.response_size: + messaging.kafka.broker.response_size: enabled: false diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 2620310b55aa..eaf07b886e94 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -23,6 +23,7 @@ attributes: type: int metrics: + # brokers scraper kafka.brokers: enabled: true @@ -32,44 +33,45 @@ metrics: value_type: int warnings: if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.brokers.count` - messaging.kafka.brokers.count: + messaging.kafka.broker.count: enabled: false description: Number of brokers in the cluster. unit: "{brokers}" sum: monotonic: false value_type: int - aggregation: cumulative + aggregation_temporality: cumulative warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.consumer_fetch_rate: + messaging.kafka.broker.consumer_fetch_count: enabled: false - description: Average consumer fetch Rate - unit: "{fetches}/s" - gauge: + description: Count of consumer fetches + unit: "{fetches}" + sum: value_type: double + aggregation_temporality: cumulative attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.incoming_byte_rate: + messaging.kafka.broker.incoming_byte_rate: enabled: false - description: Average tncoming Byte Rate in bytes/second + description: Average Bytes received per second unit: 1 gauge: value_type: double attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.outgoing_byte_rate: + messaging.kafka.broker.outgoing_byte_rate: enabled: false - description: Average outgoing Byte Rate in bytes/second. + description: Average Bytes sent per second unit: 1 gauge: value_type: double attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.request_latency: + messaging.kafka.broker.request_latency: enabled: false description: Average request latency in ms unit: "ms" @@ -78,16 +80,17 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.response_rate: + messaging.kafka.broker.response_count: enabled: false - description: Average response rate per second - unit: "{response}/s" - gauge: + description: Number of responses from the broker + unit: "{response}" + sum: value_type: double + aggregation_temporality: cumulative attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.response_size: + messaging.kafka.broker.response_size: enabled: false description: Average response size in bytes unit: "By" @@ -96,16 +99,18 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.request_rate: + messaging.kafka.broker.request_count: enabled: false - description: Average request rate per second. - unit: "{requests}/s" - gauge: + description: Number of requests in the broker + unit: "{requests}" + sum: value_type: double + + aggregation_temporality: cumulative attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.request_size: + messaging.kafka.broker.request_size: enabled: false description: Average request size in bytes unit: "By" @@ -114,7 +119,7 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.brokers.requests_in_flight: + messaging.kafka.broker.requests_in_flight: enabled: false description: Requests in flight unit: "{requests}" @@ -123,7 +128,8 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - aggregation_temporality: cumulative + + # topics scraper kafka.topic.partitions: enabled: true @@ -166,6 +172,8 @@ metrics: value_type: int aggregation_temporality: cumulative attributes: [topic, partition] + + # consumers scraper kafka.consumer_group.members: enabled: true From e137f3d66af5b6b8c32e5dca9fdf777bbbfd7801 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Mon, 7 Aug 2023 12:53:54 -0700 Subject: [PATCH 15/36] chore: revert to metrics based on sarama --- .../kafkametricsreceiver/broker_scraper.go | 20 +-- .../kafkametricsreceiver/documentation.md | 45 +++-- .../internal/metadata/generated_config.go | 26 +-- .../metadata/generated_config_test.go | 44 ++--- .../internal/metadata/generated_metrics.go | 166 +++++++++--------- .../metadata/generated_metrics_test.go | 35 ++-- .../internal/metadata/testdata/config.yaml | 4 + receiver/kafkametricsreceiver/metadata.yaml | 82 +-------- 8 files changed, 181 insertions(+), 241 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index ac4fa3755a77..d13cc617d4d1 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -34,7 +34,6 @@ var nrMetricsPrefix = [...]string{ "outgoing-byte-rate-for-broker-", "request-rate-for-broker-", "response-rate-for-broker-", - "response-size-for-broker-", "request-size-for-broker-", "requests-in-flight-for-broker-", @@ -64,39 +63,39 @@ func (s *brokerScraper) scrapeMetric(now pcommon.Timestamp, allMetrics saramaMet switch prefix { case "consumer-fetch-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersConsumerFetchRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(now, v, brokerID) } case "incoming-byte-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersIncomingByteRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerIncomingByteRateDataPoint(now, v, brokerID) } case "outgoing-byte-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersOutgoingByteRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(now, v, brokerID) } case "request-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersRequestRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerRequestRateDataPoint(now, v, brokerID) } case "response-rate-for-broker-": if v, ok := metric["mean.rate"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersResponseRateDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerResponseRateDataPoint(now, v, brokerID) } case "response-size-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersResponseSizeDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerResponseSizeDataPoint(now, v, brokerID) } case "request-size-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersRequestSizeDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerRequestSizeDataPoint(now, v, brokerID) } case "requests-in-flight-for-broker-": if v, ok := metric["count"].(int64); ok { - s.mb.RecordMessagingKafkaBrokersRequestsInFlightDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerRequestsInFlightDataPoint(now, v, brokerID) } case "request-latency-in-ms-for-broker-": if v, ok := metric["mean"].(float64); ok { - s.mb.RecordMessagingKafkaBrokersRequestLatencyDataPoint(now, v, brokerID) + s.mb.RecordMessagingKafkaBrokerRequestLatencyDataPoint(now, v, brokerID) } default: fmt.Printf("undefined for prefix %s\n", prefix) @@ -136,7 +135,6 @@ func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { // messaging.kafka.broker.count should replace kafka.brokers. s.mb.RecordMessagingKafkaBrokerCountDataPoint(now, brokerCount) - return s.mb.Emit(), nil } diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 9e328f97e2c1..e23dacb86563 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -180,6 +180,20 @@ metrics: enabled: true ``` +### messaging.kafka.broker.consumer_fetch_count + +Count of consumer fetches + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {fetches} | Sum | Double | Cumulative | false | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + ### messaging.kafka.broker.consumer_fetch_rate Average consumer fetch Rate @@ -188,14 +202,12 @@ Average consumer fetch Rate | ---- | ----------- | ---------- | | {fetches}/s | Gauge | Double | - #### Attributes | Name | Description | Values | | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.count Number of brokers in the cluster. @@ -204,11 +216,9 @@ Number of brokers in the cluster. | ---- | ----------- | ---------- | ----------------------- | --------- | | {brokers} | Sum | Int | Cumulative | false | - ### messaging.kafka.broker.incoming_byte_rate -Average Bytes received per second - +Average tncoming Byte Rate in bytes/second | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -220,11 +230,9 @@ Average Bytes received per second | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.outgoing_byte_rate -Average Bytes sent per second - +Average outgoing Byte Rate in bytes/second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -244,14 +252,12 @@ Average request latency in ms | ---- | ----------- | ---------- | | ms | Gauge | Double | - #### Attributes | Name | Description | Values | | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.request_rate Average request rate per second. @@ -260,17 +266,14 @@ Average request rate per second. | ---- | ----------- | ---------- | | {requests}/s | Gauge | Double | - #### Attributes | Name | Description | Values | | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.request_size - Average request size in bytes | Unit | Metric Type | Value Type | @@ -283,10 +286,8 @@ Average request size in bytes | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.requests_in_flight - Requests in flight | Unit | Metric Type | Value Type | @@ -299,15 +300,13 @@ Requests in flight | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | +### messaging.kafka.broker.response_rate -### messaging.kafka.broker.response_count - -Number of responses from the broker - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {response} | Sum | Double | Cumulative | false | +Average response rate per second +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {response}/s | Gauge | Double | #### Attributes @@ -315,10 +314,8 @@ Number of responses from the broker | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.response_size - Average response size in bytes | Unit | Metric Type | Value Type | diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index 2c510fe3eec8..d37dfcc66105 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -36,17 +36,17 @@ type MetricsConfig struct { KafkaPartitionReplicas MetricConfig `mapstructure:"kafka.partition.replicas"` KafkaPartitionReplicasInSync MetricConfig `mapstructure:"kafka.partition.replicas_in_sync"` KafkaTopicPartitions MetricConfig `mapstructure:"kafka.topic.partitions"` - MessagingKafkaBrokerConsumerFetchRate MetricConfig `mapstructure:"messaging.kafka.brokers.consumer_fetch_rate"` - MessagingKafkaBrokerCount MetricConfig `mapstructure:"messaging.kafka.brokers.count"` - MessagingKafkaBrokerIncomingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.incoming_byte_rate"` - MessagingKafkaBrokerOutgoingByteRate MetricConfig `mapstructure:"messaging.kafka.brokers.outgoing_byte_rate"` - MessagingKafkaBrokerRequestLatency MetricConfig `mapstructure:"messaging.kafka.brokers.request_latency"` - MessagingKafkaBrokerRequestRate MetricConfig `mapstructure:"messaging.kafka.brokers.request_rate"` - MessagingKafkaBrokerRequestSize MetricConfig `mapstructure:"messaging.kafka.brokers.request_size"` - MessagingKafkaBrokerRequestsInFlight MetricConfig `mapstructure:"messaging.kafka.brokers.requests_in_flight"` - MessagingKafkaBrokerResponseRate MetricConfig `mapstructure:"messaging.kafka.brokers.response_rate"` - MessagingKafkaBrokerResponseSize MetricConfig `mapstructure:"messaging.kafka.brokers.response_size"` - + MessagingKafkaBrokerConsumerFetchCount MetricConfig `mapstructure:"messaging.kafka.broker.consumer_fetch_count"` + MessagingKafkaBrokerConsumerFetchRate MetricConfig `mapstructure:"messaging.kafka.broker.consumer_fetch_rate"` + MessagingKafkaBrokerCount MetricConfig `mapstructure:"messaging.kafka.broker.count"` + MessagingKafkaBrokerIncomingByteRate MetricConfig `mapstructure:"messaging.kafka.broker.incoming_byte_rate"` + MessagingKafkaBrokerOutgoingByteRate MetricConfig `mapstructure:"messaging.kafka.broker.outgoing_byte_rate"` + MessagingKafkaBrokerRequestLatency MetricConfig `mapstructure:"messaging.kafka.broker.request_latency"` + MessagingKafkaBrokerRequestRate MetricConfig `mapstructure:"messaging.kafka.broker.request_rate"` + MessagingKafkaBrokerRequestSize MetricConfig `mapstructure:"messaging.kafka.broker.request_size"` + MessagingKafkaBrokerRequestsInFlight MetricConfig `mapstructure:"messaging.kafka.broker.requests_in_flight"` + MessagingKafkaBrokerResponseRate MetricConfig `mapstructure:"messaging.kafka.broker.response_rate"` + MessagingKafkaBrokerResponseSize MetricConfig `mapstructure:"messaging.kafka.broker.response_size"` } func DefaultMetricsConfig() MetricsConfig { @@ -84,6 +84,9 @@ func DefaultMetricsConfig() MetricsConfig { KafkaTopicPartitions: MetricConfig{ Enabled: true, }, + MessagingKafkaBrokerConsumerFetchCount: MetricConfig{ + Enabled: false, + }, MessagingKafkaBrokerConsumerFetchRate: MetricConfig{ Enabled: false, }, @@ -112,7 +115,6 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MessagingKafkaBrokerResponseSize: MetricConfig{ - Enabled: false, }, } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go index 4554f45d5a6f..85fa4d151357 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config_test.go @@ -37,17 +37,17 @@ func TestMetricsBuilderConfig(t *testing.T) { KafkaPartitionReplicas: MetricConfig{Enabled: true}, KafkaPartitionReplicasInSync: MetricConfig{Enabled: true}, KafkaTopicPartitions: MetricConfig{Enabled: true}, - MessagingKafkaBrokerConsumerFetchRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokerCount: MetricConfig{Enabled: true}, - MessagingKafkaBrokerIncomingByteRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokerOutgoingByteRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokerRequestLatency: MetricConfig{Enabled: true}, - MessagingKafkaBrokerRequestRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokerRequestSize: MetricConfig{Enabled: true}, - MessagingKafkaBrokerRequestsInFlight: MetricConfig{Enabled: true}, - MessagingKafkaBrokerResponseRate: MetricConfig{Enabled: true}, - MessagingKafkaBrokerResponseSize: MetricConfig{Enabled: true}, - + MessagingKafkaBrokerConsumerFetchCount: MetricConfig{Enabled: true}, + MessagingKafkaBrokerConsumerFetchRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokerCount: MetricConfig{Enabled: true}, + MessagingKafkaBrokerIncomingByteRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokerOutgoingByteRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestLatency: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestSize: MetricConfig{Enabled: true}, + MessagingKafkaBrokerRequestsInFlight: MetricConfig{Enabled: true}, + MessagingKafkaBrokerResponseRate: MetricConfig{Enabled: true}, + MessagingKafkaBrokerResponseSize: MetricConfig{Enabled: true}, }, }, }, @@ -66,17 +66,17 @@ func TestMetricsBuilderConfig(t *testing.T) { KafkaPartitionReplicas: MetricConfig{Enabled: false}, KafkaPartitionReplicasInSync: MetricConfig{Enabled: false}, KafkaTopicPartitions: MetricConfig{Enabled: false}, - MessagingKafkaBrokerConsumerFetchRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokerCount: MetricConfig{Enabled: false}, - MessagingKafkaBrokerIncomingByteRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokerOutgoingByteRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokerRequestLatency: MetricConfig{Enabled: false}, - MessagingKafkaBrokerRequestRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokerRequestSize: MetricConfig{Enabled: false}, - MessagingKafkaBrokerRequestsInFlight: MetricConfig{Enabled: false}, - MessagingKafkaBrokerResponseRate: MetricConfig{Enabled: false}, - MessagingKafkaBrokerResponseSize: MetricConfig{Enabled: false}, - + MessagingKafkaBrokerConsumerFetchCount: MetricConfig{Enabled: false}, + MessagingKafkaBrokerConsumerFetchRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokerCount: MetricConfig{Enabled: false}, + MessagingKafkaBrokerIncomingByteRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokerOutgoingByteRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestLatency: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestSize: MetricConfig{Enabled: false}, + MessagingKafkaBrokerRequestsInFlight: MetricConfig{Enabled: false}, + MessagingKafkaBrokerResponseRate: MetricConfig{Enabled: false}, + MessagingKafkaBrokerResponseSize: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index d978667729f8..cdec46b37f4e 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -588,14 +588,64 @@ func newMetricKafkaTopicPartitions(cfg MetricConfig) metricKafkaTopicPartitions return m } - type metricMessagingKafkaBrokerConsumerFetchCount struct { - data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } +// init fills messaging.kafka.broker.consumer_fetch_count metric with initial data. +func (m *metricMessagingKafkaBrokerConsumerFetchCount) init() { + m.data.SetName("messaging.kafka.broker.consumer_fetch_count") + m.data.SetDescription("Count of consumer fetches") + m.data.SetUnit("{fetches}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMessagingKafkaBrokerConsumerFetchCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutInt("broker", brokerAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMessagingKafkaBrokerConsumerFetchCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMessagingKafkaBrokerConsumerFetchCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMessagingKafkaBrokerConsumerFetchCount(cfg MetricConfig) metricMessagingKafkaBrokerConsumerFetchCount { + m := metricMessagingKafkaBrokerConsumerFetchCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMessagingKafkaBrokerConsumerFetchRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} // init fills messaging.kafka.broker.consumer_fetch_rate metric with initial data. func (m *metricMessagingKafkaBrokerConsumerFetchRate) init() { @@ -611,7 +661,6 @@ func (m *metricMessagingKafkaBrokerConsumerFetchRate) recordDataPoint(start pcom return } dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) @@ -619,18 +668,15 @@ func (m *metricMessagingKafkaBrokerConsumerFetchRate) recordDataPoint(start pcom } // updateCapacity saves max length of data point slices that will be used for the slice capacity. - func (m *metricMessagingKafkaBrokerConsumerFetchRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() - } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMessagingKafkaBrokerConsumerFetchRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() @@ -639,7 +685,6 @@ func (m *metricMessagingKafkaBrokerConsumerFetchRate) emit(metrics pmetric.Metri func newMetricMessagingKafkaBrokerConsumerFetchRate(cfg MetricConfig) metricMessagingKafkaBrokerConsumerFetchRate { m := metricMessagingKafkaBrokerConsumerFetchRate{config: cfg} - if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -647,14 +692,12 @@ func newMetricMessagingKafkaBrokerConsumerFetchRate(cfg MetricConfig) metricMess return m } - type metricMessagingKafkaBrokerCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.count metric with initial data. func (m *metricMessagingKafkaBrokerCount) init() { m.data.SetName("messaging.kafka.broker.count") @@ -665,7 +708,6 @@ func (m *metricMessagingKafkaBrokerCount) init() { m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } - func (m *metricMessagingKafkaBrokerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return @@ -677,7 +719,6 @@ func (m *metricMessagingKafkaBrokerCount) recordDataPoint(start pcommon.Timestam } // updateCapacity saves max length of data point slices that will be used for the slice capacity. - func (m *metricMessagingKafkaBrokerCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() @@ -685,7 +726,6 @@ func (m *metricMessagingKafkaBrokerCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. - func (m *metricMessagingKafkaBrokerCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() @@ -694,7 +734,6 @@ func (m *metricMessagingKafkaBrokerCount) emit(metrics pmetric.MetricSlice) { } } - func newMetricMessagingKafkaBrokerCount(cfg MetricConfig) metricMessagingKafkaBrokerCount { m := metricMessagingKafkaBrokerCount{config: cfg} if cfg.Enabled { @@ -704,25 +743,21 @@ func newMetricMessagingKafkaBrokerCount(cfg MetricConfig) metricMessagingKafkaBr return m } - type metricMessagingKafkaBrokerIncomingByteRate struct { - data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.incoming_byte_rate metric with initial data. func (m *metricMessagingKafkaBrokerIncomingByteRate) init() { m.data.SetName("messaging.kafka.broker.incoming_byte_rate") - m.data.SetDescription("Average Bytes received per second") + m.data.SetDescription("Average tncoming Byte Rate in bytes/second") m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } - func (m *metricMessagingKafkaBrokerIncomingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return @@ -735,7 +770,6 @@ func (m *metricMessagingKafkaBrokerIncomingByteRate) recordDataPoint(start pcomm } // updateCapacity saves max length of data point slices that will be used for the slice capacity. - func (m *metricMessagingKafkaBrokerIncomingByteRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() @@ -744,7 +778,6 @@ func (m *metricMessagingKafkaBrokerIncomingByteRate) updateCapacity() { // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMessagingKafkaBrokerIncomingByteRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -761,7 +794,6 @@ func newMetricMessagingKafkaBrokerIncomingByteRate(cfg MetricConfig) metricMessa return m } - type metricMessagingKafkaBrokerOutgoingByteRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -771,14 +803,13 @@ type metricMessagingKafkaBrokerOutgoingByteRate struct { // init fills messaging.kafka.broker.outgoing_byte_rate metric with initial data. func (m *metricMessagingKafkaBrokerOutgoingByteRate) init() { m.data.SetName("messaging.kafka.broker.outgoing_byte_rate") - m.data.SetDescription("Average Bytes sent per second") + m.data.SetDescription("Average outgoing Byte Rate in bytes/second.") m.data.SetUnit("1") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMessagingKafkaBrokerOutgoingByteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - if !m.config.Enabled { return } @@ -790,7 +821,6 @@ func (m *metricMessagingKafkaBrokerOutgoingByteRate) recordDataPoint(start pcomm } // updateCapacity saves max length of data point slices that will be used for the slice capacity. - func (m *metricMessagingKafkaBrokerOutgoingByteRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() @@ -798,7 +828,6 @@ func (m *metricMessagingKafkaBrokerOutgoingByteRate) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. - func (m *metricMessagingKafkaBrokerOutgoingByteRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() @@ -817,13 +846,11 @@ func newMetricMessagingKafkaBrokerOutgoingByteRate(cfg MetricConfig) metricMessa } type metricMessagingKafkaBrokerRequestLatency struct { - data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.request_latency metric with initial data. func (m *metricMessagingKafkaBrokerRequestLatency) init() { m.data.SetName("messaging.kafka.broker.request_latency") @@ -848,14 +875,12 @@ func (m *metricMessagingKafkaBrokerRequestLatency) recordDataPoint(start pcommon func (m *metricMessagingKafkaBrokerRequestLatency) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() - } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMessagingKafkaBrokerRequestLatency) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() @@ -864,7 +889,6 @@ func (m *metricMessagingKafkaBrokerRequestLatency) emit(metrics pmetric.MetricSl func newMetricMessagingKafkaBrokerRequestLatency(cfg MetricConfig) metricMessagingKafkaBrokerRequestLatency { m := metricMessagingKafkaBrokerRequestLatency{config: cfg} - if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -873,13 +897,11 @@ func newMetricMessagingKafkaBrokerRequestLatency(cfg MetricConfig) metricMessagi } type metricMessagingKafkaBrokerRequestRate struct { - data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.request_rate metric with initial data. func (m *metricMessagingKafkaBrokerRequestRate) init() { m.data.SetName("messaging.kafka.broker.request_rate") @@ -889,9 +911,7 @@ func (m *metricMessagingKafkaBrokerRequestRate) init() { m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } - func (m *metricMessagingKafkaBrokerRequestRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { - if !m.config.Enabled { return } @@ -904,7 +924,6 @@ func (m *metricMessagingKafkaBrokerRequestRate) recordDataPoint(start pcommon.Ti // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMessagingKafkaBrokerRequestRate) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } @@ -912,7 +931,6 @@ func (m *metricMessagingKafkaBrokerRequestRate) updateCapacity() { // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMessagingKafkaBrokerRequestRate) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -922,7 +940,6 @@ func (m *metricMessagingKafkaBrokerRequestRate) emit(metrics pmetric.MetricSlice func newMetricMessagingKafkaBrokerRequestRate(cfg MetricConfig) metricMessagingKafkaBrokerRequestRate { m := metricMessagingKafkaBrokerRequestRate{config: cfg} - if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -931,13 +948,11 @@ func newMetricMessagingKafkaBrokerRequestRate(cfg MetricConfig) metricMessagingK } type metricMessagingKafkaBrokerRequestSize struct { - data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.request_size metric with initial data. func (m *metricMessagingKafkaBrokerRequestSize) init() { m.data.SetName("messaging.kafka.broker.request_size") @@ -947,7 +962,6 @@ func (m *metricMessagingKafkaBrokerRequestSize) init() { m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } - func (m *metricMessagingKafkaBrokerRequestSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return @@ -968,7 +982,6 @@ func (m *metricMessagingKafkaBrokerRequestSize) updateCapacity() { // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMessagingKafkaBrokerRequestSize) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -985,15 +998,12 @@ func newMetricMessagingKafkaBrokerRequestSize(cfg MetricConfig) metricMessagingK return m } - type metricMessagingKafkaBrokerRequestsInFlight struct { - data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.requests_in_flight metric with initial data. func (m *metricMessagingKafkaBrokerRequestsInFlight) init() { m.data.SetName("messaging.kafka.broker.requests_in_flight") @@ -1003,7 +1013,6 @@ func (m *metricMessagingKafkaBrokerRequestsInFlight) init() { m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } - func (m *metricMessagingKafkaBrokerRequestsInFlight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { if !m.config.Enabled { return @@ -1016,7 +1025,6 @@ func (m *metricMessagingKafkaBrokerRequestsInFlight) recordDataPoint(start pcomm } // updateCapacity saves max length of data point slices that will be used for the slice capacity. - func (m *metricMessagingKafkaBrokerRequestsInFlight) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() @@ -1024,7 +1032,6 @@ func (m *metricMessagingKafkaBrokerRequestsInFlight) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. - func (m *metricMessagingKafkaBrokerRequestsInFlight) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() @@ -1033,7 +1040,6 @@ func (m *metricMessagingKafkaBrokerRequestsInFlight) emit(metrics pmetric.Metric } } - func newMetricMessagingKafkaBrokerRequestsInFlight(cfg MetricConfig) metricMessagingKafkaBrokerRequestsInFlight { m := metricMessagingKafkaBrokerRequestsInFlight{config: cfg} if cfg.Enabled { @@ -1049,7 +1055,6 @@ type metricMessagingKafkaBrokerResponseRate struct { capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.response_rate metric with initial data. func (m *metricMessagingKafkaBrokerResponseRate) init() { m.data.SetName("messaging.kafka.broker.response_rate") @@ -1071,7 +1076,6 @@ func (m *metricMessagingKafkaBrokerResponseRate) recordDataPoint(start pcommon.T } // updateCapacity saves max length of data point slices that will be used for the slice capacity. - func (m *metricMessagingKafkaBrokerResponseRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() @@ -1087,7 +1091,6 @@ func (m *metricMessagingKafkaBrokerResponseRate) emit(metrics pmetric.MetricSlic } } - func newMetricMessagingKafkaBrokerResponseRate(cfg MetricConfig) metricMessagingKafkaBrokerResponseRate { m := metricMessagingKafkaBrokerResponseRate{config: cfg} if cfg.Enabled { @@ -1097,15 +1100,12 @@ func newMetricMessagingKafkaBrokerResponseRate(cfg MetricConfig) metricMessaging return m } - type metricMessagingKafkaBrokerResponseSize struct { - data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } - // init fills messaging.kafka.broker.response_size metric with initial data. func (m *metricMessagingKafkaBrokerResponseSize) init() { m.data.SetName("messaging.kafka.broker.response_size") @@ -1115,7 +1115,6 @@ func (m *metricMessagingKafkaBrokerResponseSize) init() { m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } - func (m *metricMessagingKafkaBrokerResponseSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { if !m.config.Enabled { return @@ -1135,7 +1134,6 @@ func (m *metricMessagingKafkaBrokerResponseSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. - func (m *metricMessagingKafkaBrokerResponseSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() @@ -1144,7 +1142,6 @@ func (m *metricMessagingKafkaBrokerResponseSize) emit(metrics pmetric.MetricSlic } } - func newMetricMessagingKafkaBrokerResponseSize(cfg MetricConfig) metricMessagingKafkaBrokerResponseSize { m := metricMessagingKafkaBrokerResponseSize{config: cfg} if cfg.Enabled { @@ -1173,17 +1170,17 @@ type MetricsBuilder struct { metricKafkaPartitionReplicas metricKafkaPartitionReplicas metricKafkaPartitionReplicasInSync metricKafkaPartitionReplicasInSync metricKafkaTopicPartitions metricKafkaTopicPartitions - metricMessagingKafkaBrokerConsumerFetchRate metricMessagingKafkaBrokerConsumerFetchRate - metricMessagingKafkaBrokerCount metricMessagingKafkaBrokerCount - metricMessagingKafkaBrokerIncomingByteRate metricMessagingKafkaBrokerIncomingByteRate - metricMessagingKafkaBrokerOutgoingByteRate metricMessagingKafkaBrokerOutgoingByteRate - metricMessagingKafkaBrokerRequestLatency metricMessagingKafkaBrokerRequestLatency - metricMessagingKafkaBrokerRequestRate metricMessagingKafkaBrokerRequestRate - metricMessagingKafkaBrokerRequestSize metricMessagingKafkaBrokerRequestSize - metricMessagingKafkaBrokerRequestsInFlight metricMessagingKafkaBrokerRequestsInFlight - metricMessagingKafkaBrokerResponseRate metricMessagingKafkaBrokerResponseRate - metricMessagingKafkaBrokerResponseSize metricMessagingKafkaBrokerResponseSize - + metricMessagingKafkaBrokerConsumerFetchCount metricMessagingKafkaBrokerConsumerFetchCount + metricMessagingKafkaBrokerConsumerFetchRate metricMessagingKafkaBrokerConsumerFetchRate + metricMessagingKafkaBrokerCount metricMessagingKafkaBrokerCount + metricMessagingKafkaBrokerIncomingByteRate metricMessagingKafkaBrokerIncomingByteRate + metricMessagingKafkaBrokerOutgoingByteRate metricMessagingKafkaBrokerOutgoingByteRate + metricMessagingKafkaBrokerRequestLatency metricMessagingKafkaBrokerRequestLatency + metricMessagingKafkaBrokerRequestRate metricMessagingKafkaBrokerRequestRate + metricMessagingKafkaBrokerRequestSize metricMessagingKafkaBrokerRequestSize + metricMessagingKafkaBrokerRequestsInFlight metricMessagingKafkaBrokerRequestsInFlight + metricMessagingKafkaBrokerResponseRate metricMessagingKafkaBrokerResponseRate + metricMessagingKafkaBrokerResponseSize metricMessagingKafkaBrokerResponseSize } // metricBuilderOption applies changes to default metrics builder. @@ -1200,7 +1197,9 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting if mbc.Metrics.KafkaBrokers.Enabled { settings.Logger.Warn("[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count`") } - + if !mbc.Metrics.MessagingKafkaBrokerConsumerFetchCount.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.") + } if !mbc.Metrics.MessagingKafkaBrokerConsumerFetchRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_rate`: This metric will be enabled by default in the next versions.") } @@ -1247,17 +1246,17 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricKafkaPartitionReplicas: newMetricKafkaPartitionReplicas(mbc.Metrics.KafkaPartitionReplicas), metricKafkaPartitionReplicasInSync: newMetricKafkaPartitionReplicasInSync(mbc.Metrics.KafkaPartitionReplicasInSync), metricKafkaTopicPartitions: newMetricKafkaTopicPartitions(mbc.Metrics.KafkaTopicPartitions), - metricMessagingKafkaBrokerConsumerFetchRate: newMetricMessagingKafkaBrokerConsumerFetchRate(mbc.Metrics.MessagingKafkaBrokerConsumerFetchRate), - metricMessagingKafkaBrokerCount: newMetricMessagingKafkaBrokerCount(mbc.Metrics.MessagingKafkaBrokerCount), - metricMessagingKafkaBrokerIncomingByteRate: newMetricMessagingKafkaBrokerIncomingByteRate(mbc.Metrics.MessagingKafkaBrokerIncomingByteRate), - metricMessagingKafkaBrokerOutgoingByteRate: newMetricMessagingKafkaBrokerOutgoingByteRate(mbc.Metrics.MessagingKafkaBrokerOutgoingByteRate), - metricMessagingKafkaBrokerRequestLatency: newMetricMessagingKafkaBrokerRequestLatency(mbc.Metrics.MessagingKafkaBrokerRequestLatency), - metricMessagingKafkaBrokerRequestRate: newMetricMessagingKafkaBrokerRequestRate(mbc.Metrics.MessagingKafkaBrokerRequestRate), - metricMessagingKafkaBrokerRequestSize: newMetricMessagingKafkaBrokerRequestSize(mbc.Metrics.MessagingKafkaBrokerRequestSize), - metricMessagingKafkaBrokerRequestsInFlight: newMetricMessagingKafkaBrokerRequestsInFlight(mbc.Metrics.MessagingKafkaBrokerRequestsInFlight), - metricMessagingKafkaBrokerResponseRate: newMetricMessagingKafkaBrokerResponseRate(mbc.Metrics.MessagingKafkaBrokerResponseRate), - metricMessagingKafkaBrokerResponseSize: newMetricMessagingKafkaBrokerResponseSize(mbc.Metrics.MessagingKafkaBrokerResponseSize), - + metricMessagingKafkaBrokerConsumerFetchCount: newMetricMessagingKafkaBrokerConsumerFetchCount(mbc.Metrics.MessagingKafkaBrokerConsumerFetchCount), + metricMessagingKafkaBrokerConsumerFetchRate: newMetricMessagingKafkaBrokerConsumerFetchRate(mbc.Metrics.MessagingKafkaBrokerConsumerFetchRate), + metricMessagingKafkaBrokerCount: newMetricMessagingKafkaBrokerCount(mbc.Metrics.MessagingKafkaBrokerCount), + metricMessagingKafkaBrokerIncomingByteRate: newMetricMessagingKafkaBrokerIncomingByteRate(mbc.Metrics.MessagingKafkaBrokerIncomingByteRate), + metricMessagingKafkaBrokerOutgoingByteRate: newMetricMessagingKafkaBrokerOutgoingByteRate(mbc.Metrics.MessagingKafkaBrokerOutgoingByteRate), + metricMessagingKafkaBrokerRequestLatency: newMetricMessagingKafkaBrokerRequestLatency(mbc.Metrics.MessagingKafkaBrokerRequestLatency), + metricMessagingKafkaBrokerRequestRate: newMetricMessagingKafkaBrokerRequestRate(mbc.Metrics.MessagingKafkaBrokerRequestRate), + metricMessagingKafkaBrokerRequestSize: newMetricMessagingKafkaBrokerRequestSize(mbc.Metrics.MessagingKafkaBrokerRequestSize), + metricMessagingKafkaBrokerRequestsInFlight: newMetricMessagingKafkaBrokerRequestsInFlight(mbc.Metrics.MessagingKafkaBrokerRequestsInFlight), + metricMessagingKafkaBrokerResponseRate: newMetricMessagingKafkaBrokerResponseRate(mbc.Metrics.MessagingKafkaBrokerResponseRate), + metricMessagingKafkaBrokerResponseSize: newMetricMessagingKafkaBrokerResponseSize(mbc.Metrics.MessagingKafkaBrokerResponseSize), } for _, op := range options { op(mb) @@ -1325,6 +1324,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricKafkaPartitionReplicas.emit(ils.Metrics()) mb.metricKafkaPartitionReplicasInSync.emit(ils.Metrics()) mb.metricKafkaTopicPartitions.emit(ils.Metrics()) + mb.metricMessagingKafkaBrokerConsumerFetchCount.emit(ils.Metrics()) mb.metricMessagingKafkaBrokerConsumerFetchRate.emit(ils.Metrics()) mb.metricMessagingKafkaBrokerCount.emit(ils.Metrics()) mb.metricMessagingKafkaBrokerIncomingByteRate.emit(ils.Metrics()) @@ -1410,6 +1410,11 @@ func (mb *MetricsBuilder) RecordKafkaTopicPartitionsDataPoint(ts pcommon.Timesta mb.metricKafkaTopicPartitions.recordDataPoint(mb.startTime, ts, val, topicAttributeValue) } +// RecordMessagingKafkaBrokerConsumerFetchCountDataPoint adds a data point to messaging.kafka.broker.consumer_fetch_count metric. +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { + mb.metricMessagingKafkaBrokerConsumerFetchCount.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) +} + // RecordMessagingKafkaBrokerConsumerFetchRateDataPoint adds a data point to messaging.kafka.broker.consumer_fetch_rate metric. func (mb *MetricsBuilder) RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricMessagingKafkaBrokerConsumerFetchRate.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) @@ -1458,7 +1463,6 @@ func (mb *MetricsBuilder) RecordMessagingKafkaBrokerResponseRateDataPoint(ts pco // RecordMessagingKafkaBrokerResponseSizeDataPoint adds a data point to messaging.kafka.broker.response_size metric. func (mb *MetricsBuilder) RecordMessagingKafkaBrokerResponseSizeDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { mb.metricMessagingKafkaBrokerResponseSize.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) - } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index ff8b65e9a666..dcb02051c03f 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -53,6 +53,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count`", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } if test.configSet == testSetDefault { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_rate`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ @@ -91,7 +95,6 @@ func TestMetricsBuilder(t *testing.T) { } if test.configSet == testSetDefault { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.response_size`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ } assert.Equal(t, expectedWarnings, observedLogs.Len()) @@ -143,6 +146,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordKafkaTopicPartitionsDataPoint(ts, 1, "topic-val") + allMetricsCount++ + mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts, 1, 6) + allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts, 1, 6) @@ -412,7 +418,21 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - + case "messaging.kafka.broker.consumer_fetch_rate": + assert.False(t, validatedMetrics["messaging.kafka.broker.consumer_fetch_rate"], "Found a duplicate in the metrics slice: messaging.kafka.broker.consumer_fetch_rate") + validatedMetrics["messaging.kafka.broker.consumer_fetch_rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average consumer fetch Rate", ms.At(i).Description()) + assert.Equal(t, "{fetches}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("broker") + assert.True(t, ok) + assert.EqualValues(t, 6, attrVal.Int()) case "messaging.kafka.broker.count": assert.False(t, validatedMetrics["messaging.kafka.broker.count"], "Found a duplicate in the metrics slice: messaging.kafka.broker.count") validatedMetrics["messaging.kafka.broker.count"] = true @@ -427,13 +447,12 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "messaging.kafka.broker.incoming_byte_rate": assert.False(t, validatedMetrics["messaging.kafka.broker.incoming_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.broker.incoming_byte_rate") validatedMetrics["messaging.kafka.broker.incoming_byte_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average Bytes received per second", ms.At(i).Description()) + assert.Equal(t, "Average tncoming Byte Rate in bytes/second", ms.At(i).Description()) assert.Equal(t, "1", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -443,13 +462,12 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.broker.outgoing_byte_rate": assert.False(t, validatedMetrics["messaging.kafka.broker.outgoing_byte_rate"], "Found a duplicate in the metrics slice: messaging.kafka.broker.outgoing_byte_rate") validatedMetrics["messaging.kafka.broker.outgoing_byte_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average Bytes sent per second", ms.At(i).Description()) + assert.Equal(t, "Average outgoing Byte Rate in bytes/second.", ms.At(i).Description()) assert.Equal(t, "1", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -459,7 +477,6 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.broker.request_latency": assert.False(t, validatedMetrics["messaging.kafka.broker.request_latency"], "Found a duplicate in the metrics slice: messaging.kafka.broker.request_latency") validatedMetrics["messaging.kafka.broker.request_latency"] = true @@ -482,7 +499,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Average request rate per second.", ms.At(i).Description()) assert.Equal(t, "{requests}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -506,7 +522,6 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.broker.requests_in_flight": assert.False(t, validatedMetrics["messaging.kafka.broker.requests_in_flight"], "Found a duplicate in the metrics slice: messaging.kafka.broker.requests_in_flight") validatedMetrics["messaging.kafka.broker.requests_in_flight"] = true @@ -530,7 +545,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, "Average response rate per second", ms.At(i).Description()) assert.Equal(t, "{response}/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) @@ -538,7 +552,6 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) - case "messaging.kafka.broker.response_size": assert.False(t, validatedMetrics["messaging.kafka.broker.response_size"], "Found a duplicate in the metrics slice: messaging.kafka.broker.response_size") validatedMetrics["messaging.kafka.broker.response_size"] = true diff --git a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml index 2eeb6c61f31d..c71f07e23987 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kafkametricsreceiver/internal/metadata/testdata/config.yaml @@ -23,6 +23,8 @@ all_set: enabled: true kafka.topic.partitions: enabled: true + messaging.kafka.broker.consumer_fetch_count: + enabled: true messaging.kafka.broker.consumer_fetch_rate: enabled: true messaging.kafka.broker.count: @@ -67,6 +69,8 @@ none_set: enabled: false kafka.topic.partitions: enabled: false + messaging.kafka.broker.consumer_fetch_count: + enabled: false messaging.kafka.broker.consumer_fetch_rate: enabled: false messaging.kafka.broker.count: diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 24e97ee48d23..61e68271ef6e 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -32,7 +32,7 @@ metrics: gauge: value_type: int warnings: - if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.brokers.count` + if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count` messaging.kafka.broker.count: enabled: false @@ -41,7 +41,7 @@ metrics: sum: monotonic: false value_type: int - aggregation: cumulative + aggregation_temporality: cumulative warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_rate: @@ -125,9 +125,6 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - aggregation_temporality: cumulative - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_count: enabled: false description: Count of consumer fetches @@ -138,81 +135,6 @@ metrics: attributes: [broker] warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.incoming_byte_rate: - enabled: false - description: Average Bytes received per second - unit: 1 - gauge: - value_type: double - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.outgoing_byte_rate: - enabled: false - description: Average Bytes sent per second - unit: 1 - gauge: - value_type: double - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.request_latency: - enabled: false - description: Average request latency in ms - unit: "ms" - gauge: - value_type: double - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.response_count: - enabled: false - description: Number of responses from the broker - unit: "{response}" - sum: - value_type: double - aggregation_temporality: cumulative - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.response_size: - enabled: false - description: Average response size in bytes - unit: "By" - gauge: - value_type: double - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.request_count: - enabled: false - description: Number of requests in the broker - unit: "{requests}" - sum: - value_type: double - - aggregation_temporality: cumulative - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.request_size: - enabled: false - description: Average request size in bytes - unit: "By" - gauge: - value_type: double - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. - messaging.kafka.broker.requests_in_flight: - enabled: false - description: Requests in flight - unit: "{requests}" - gauge: - value_type: int - attributes: [broker] - warnings: - if_enabled_not_set: This metric will be enabled by default in the next versions. # topics scraper From 380b0447ce931f1ae1888a551aa499c09cda0355 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Tue, 5 Sep 2023 13:22:39 -0700 Subject: [PATCH 16/36] chore: updates per request --- .../broker_scraper_test.go | 36 ++++++++++++++++--- .../kafkametricsreceiver/documentation.md | 20 +++++------ .../internal/metadata/generated_metrics.go | 32 +++++++++-------- .../metadata/generated_metrics_test.go | 26 +++++++------- receiver/kafkametricsreceiver/metadata.yaml | 20 ++++++----- 5 files changed, 84 insertions(+), 50 deletions(-) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index 040ab4c60a3d..b546d6c41dda 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -104,10 +104,38 @@ func TestBrokerScraper_scrape(t *testing.T) { require.NoError(t, bs.start(context.Background(), componenttest.NewNopHost())) md, err := bs.scrape(context.Background()) assert.NoError(t, err) - expectedDp := int64(len(testBrokers)) - receivedMetrics := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) - receivedDp := receivedMetrics.Gauge().DataPoints().At(0).IntValue() - assert.Equal(t, expectedDp, receivedDp) + require.Equal(t, 1, md.ResourceMetrics().Len()) + require.Equal(t, 1, md.ResourceMetrics().At(0).ScopeMetrics().Len()) + ms := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() + for i := 0; i < ms.Len(); i++ { + m := ms.At(i) + switch m.Name() { + case "kafka.brokers": + assert.Equal(t, m.Sum().DataPoints().At(0).IntValue(), int64(len(testBrokers))) + case "kafka.broker.count": + assert.Equal(t, m.Sum().DataPoints().At(0).IntValue(), int64(len(testBrokers))) + case "kafka.broker.consumer_fetch_rate": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.incoming_byte_rate": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.outgoing_byte_rate": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.request_latency": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.response_rate": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.response_size": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.request_rate": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.request_size": + assert.Equal(t, m.Gauge().DataPoints().At(0).DoubleValue(), int64(len(testBrokers))) + case "kafka.broker.requests_in_flight": + assert.Equal(t, m.Sum().DataPoints().At(0).IntValue(), int64(len(testBrokers))) + case "kafka.broker.consumer_fetch_count": + assert.Equal(t, m.Sum().DataPoints().At(0).IntValue(), int64(len(testBrokers))) + } + } } func TestBrokersScraper_createBrokerScraper(t *testing.T) { diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index e23dacb86563..32d02f558606 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -14,11 +14,11 @@ metrics: ### kafka.brokers -[DEPRECATED] Number of brokers in the cluster. +[depracated] Number of brokers in the cluster. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {brokers} | Gauge | Int | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {broker} | Sum | Int | Cumulative | false | ### kafka.consumer_group.lag @@ -186,7 +186,7 @@ Count of consumer fetches | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | -| {fetches} | Sum | Double | Cumulative | false | +| {fetches} | Sum | Int | Cumulative | false | #### Attributes @@ -214,15 +214,15 @@ Number of brokers in the cluster. | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | -| {brokers} | Sum | Int | Cumulative | false | +| {broker} | Sum | Int | Cumulative | false | ### messaging.kafka.broker.incoming_byte_rate -Average tncoming Byte Rate in bytes/second +Average incoming Byte Rate in bytes/second | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Double | +| By/s | Gauge | Double | #### Attributes @@ -246,11 +246,11 @@ Average outgoing Byte Rate in bytes/second. ### messaging.kafka.broker.request_latency -Average request latency in ms +Average request latency in seconds | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| ms | Gauge | Double | +| s | Gauge | Double | #### Attributes diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index cdec46b37f4e..b262442eb4c4 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -20,16 +20,18 @@ type metricKafkaBrokers struct { // init fills kafka.brokers metric with initial data. func (m *metricKafkaBrokers) init() { m.data.SetName("kafka.brokers") - m.data.SetDescription("[DEPRECATED] Number of brokers in the cluster.") - m.data.SetUnit("{brokers}") - m.data.SetEmptyGauge() + m.data.SetDescription("[depracated] Number of brokers in the cluster.") + m.data.SetUnit("{broker}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricKafkaBrokers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) @@ -37,14 +39,14 @@ func (m *metricKafkaBrokers) recordDataPoint(start pcommon.Timestamp, ts pcommon // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricKafkaBrokers) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricKafkaBrokers) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() @@ -605,14 +607,14 @@ func (m *metricMessagingKafkaBrokerConsumerFetchCount) init() { m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMessagingKafkaBrokerConsumerFetchCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (m *metricMessagingKafkaBrokerConsumerFetchCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetDoubleValue(val) + dp.SetIntValue(val) dp.Attributes().PutInt("broker", brokerAttributeValue) } @@ -702,7 +704,7 @@ type metricMessagingKafkaBrokerCount struct { func (m *metricMessagingKafkaBrokerCount) init() { m.data.SetName("messaging.kafka.broker.count") m.data.SetDescription("Number of brokers in the cluster.") - m.data.SetUnit("{brokers}") + m.data.SetUnit("{broker}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) @@ -752,8 +754,8 @@ type metricMessagingKafkaBrokerIncomingByteRate struct { // init fills messaging.kafka.broker.incoming_byte_rate metric with initial data. func (m *metricMessagingKafkaBrokerIncomingByteRate) init() { m.data.SetName("messaging.kafka.broker.incoming_byte_rate") - m.data.SetDescription("Average tncoming Byte Rate in bytes/second") - m.data.SetUnit("1") + m.data.SetDescription("Average incoming Byte Rate in bytes/second") + m.data.SetUnit("By/s") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } @@ -854,8 +856,8 @@ type metricMessagingKafkaBrokerRequestLatency struct { // init fills messaging.kafka.broker.request_latency metric with initial data. func (m *metricMessagingKafkaBrokerRequestLatency) init() { m.data.SetName("messaging.kafka.broker.request_latency") - m.data.SetDescription("Average request latency in ms") - m.data.SetUnit("ms") + m.data.SetDescription("Average request latency in seconds") + m.data.SetUnit("s") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } @@ -1411,7 +1413,7 @@ func (mb *MetricsBuilder) RecordKafkaTopicPartitionsDataPoint(ts pcommon.Timesta } // RecordMessagingKafkaBrokerConsumerFetchCountDataPoint adds a data point to messaging.kafka.broker.consumer_fetch_count metric. -func (mb *MetricsBuilder) RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts pcommon.Timestamp, val float64, brokerAttributeValue int64) { +func (mb *MetricsBuilder) RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts pcommon.Timestamp, val int64, brokerAttributeValue int64) { mb.metricMessagingKafkaBrokerConsumerFetchCount.recordDataPoint(mb.startTime, ts, val, brokerAttributeValue) } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index dcb02051c03f..a017059fb83a 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -204,11 +204,13 @@ func TestMetricsBuilder(t *testing.T) { case "kafka.brokers": assert.False(t, validatedMetrics["kafka.brokers"], "Found a duplicate in the metrics slice: kafka.brokers") validatedMetrics["kafka.brokers"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "[DEPRECATED] Number of brokers in the cluster.", ms.At(i).Description()) - assert.Equal(t, "{brokers}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "[depracated] Number of brokers in the cluster.", ms.At(i).Description()) + assert.Equal(t, "{broker}", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) @@ -413,8 +415,8 @@ func TestMetricsBuilder(t *testing.T) { dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) attrVal, ok := dp.Attributes().Get("broker") assert.True(t, ok) assert.EqualValues(t, 6, attrVal.Int()) @@ -439,7 +441,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of brokers in the cluster.", ms.At(i).Description()) - assert.Equal(t, "{brokers}", ms.At(i).Unit()) + assert.Equal(t, "{broker}", ms.At(i).Unit()) assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) @@ -452,8 +454,8 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["messaging.kafka.broker.incoming_byte_rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average tncoming Byte Rate in bytes/second", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "Average incoming Byte Rate in bytes/second", ms.At(i).Description()) + assert.Equal(t, "By/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -482,8 +484,8 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["messaging.kafka.broker.request_latency"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Average request latency in ms", ms.At(i).Description()) - assert.Equal(t, "ms", ms.At(i).Unit()) + assert.Equal(t, "Average request latency in seconds", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 61e68271ef6e..812738a99b4c 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -27,17 +27,19 @@ metrics: # brokers scraper kafka.brokers: enabled: true - description: "[DEPRECATED] Number of brokers in the cluster." - unit: "{brokers}" - gauge: + description: '[depracated] Number of brokers in the cluster.' + unit: "{broker}" + sum: + monotonic: false value_type: int + aggregation_temporality: cumulative warnings: if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count` messaging.kafka.broker.count: enabled: false description: Number of brokers in the cluster. - unit: "{brokers}" + unit: "{broker}" sum: monotonic: false value_type: int @@ -55,8 +57,8 @@ metrics: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.incoming_byte_rate: enabled: false - description: Average tncoming Byte Rate in bytes/second - unit: 1 + description: Average incoming Byte Rate in bytes/second + unit: By/s gauge: value_type: double attributes: [broker] @@ -73,8 +75,8 @@ metrics: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.request_latency: enabled: false - description: Average request latency in ms - unit: "ms" + description: Average request latency in seconds + unit: "s" gauge: value_type: double attributes: [broker] @@ -130,7 +132,7 @@ metrics: description: Count of consumer fetches unit: "{fetches}" sum: - value_type: double + value_type: int aggregation_temporality: cumulative attributes: [broker] warnings: From 1cf6c15cb3572d33a867ac1d64112c7fab3283ba Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 7 Sep 2023 10:40:06 -0700 Subject: [PATCH 17/36] chore: fix description issue --- receiver/kafkametricsreceiver/documentation.md | 18 +++++++++--------- .../internal/metadata/generated_metrics.go | 2 +- receiver/kafkametricsreceiver/metadata.yaml | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 32d02f558606..8a6ddbe5ec80 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -14,7 +14,7 @@ metrics: ### kafka.brokers -[depracated] Number of brokers in the cluster. +[DEPRACATED] Number of brokers in the cluster. | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | @@ -170,6 +170,14 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | +### messaging.kafka.broker.count + +Number of brokers in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {broker} | Sum | Int | Cumulative | false | + ## Optional Metrics The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: @@ -208,14 +216,6 @@ Average consumer fetch Rate | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.broker.count - -Number of brokers in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {broker} | Sum | Int | Cumulative | false | - ### messaging.kafka.broker.incoming_byte_rate Average incoming Byte Rate in bytes/second diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index b262442eb4c4..bbd824260427 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -20,7 +20,7 @@ type metricKafkaBrokers struct { // init fills kafka.brokers metric with initial data. func (m *metricKafkaBrokers) init() { m.data.SetName("kafka.brokers") - m.data.SetDescription("[depracated] Number of brokers in the cluster.") + m.data.SetDescription("[DEPRACATED] Number of brokers in the cluster.") m.data.SetUnit("{broker}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 812738a99b4c..830992657912 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -27,7 +27,7 @@ metrics: # brokers scraper kafka.brokers: enabled: true - description: '[depracated] Number of brokers in the cluster.' + description: '[DEPRACATED] Number of brokers in the cluster.' unit: "{broker}" sum: monotonic: false From 65862dba24e3e5630078620ebdb43fc153731a27 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 7 Sep 2023 10:47:19 -0700 Subject: [PATCH 18/36] chore: update expected to sum --- receiver/kafkametricsreceiver/integration_test.go | 2 +- .../kafkametricsreceiver/testdata/integration/expected.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index d946e72a135e..e99031c00f6a 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -69,7 +69,7 @@ func TestIntegration(t *testing.T) { ci.MappedPortForNamedContainer(t, "kafka", kafkaPort))} rCfg.Scrapers = []string{"brokers", "consumers", "topics"} }), - // scraperinttest.WriteExpected(), // TODO remove + scraperinttest.WriteExpected(), // TODO remove scraperinttest.WithCompareOptions( // pmetrictest.IgnoreMetricValues(), pmetrictest.IgnoreStartTimestamp(), diff --git a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml index 6f02cc262f3d..e6b290f3de92 100644 --- a/receiver/kafkametricsreceiver/testdata/integration/expected.yaml +++ b/receiver/kafkametricsreceiver/testdata/integration/expected.yaml @@ -3,7 +3,7 @@ resourceMetrics: scopeMetrics: - metrics: - description: "[DEPRECATED] Number of brokers in the cluster." - gauge: + sum: aggregationTemporality: 2 dataPoints: - asInt: "1" From ad34e0b772ad6ebfb1cbceae00955598d0dec503 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 7 Sep 2023 12:21:03 -0700 Subject: [PATCH 19/36] TEST MUST REVERT --- .../kafkametricsreceiver/internal/metadata/generated_config.go | 2 +- .../internal/metadata/generated_metrics_test.go | 3 ++- receiver/kafkametricsreceiver/metadata.yaml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index d37dfcc66105..8d83061a3f49 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -91,7 +91,7 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MessagingKafkaBrokerCount: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerIncomingByteRate: MetricConfig{ Enabled: false, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index a017059fb83a..0600a95f936c 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -152,6 +152,7 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerCountDataPoint(ts, 1) @@ -206,7 +207,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["kafka.brokers"] = true assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "[depracated] Number of brokers in the cluster.", ms.At(i).Description()) + assert.Equal(t, "[DEPRACATED] Number of brokers in the cluster.", ms.At(i).Description()) assert.Equal(t, "{broker}", ms.At(i).Unit()) assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 830992657912..7b807c6f877a 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -37,7 +37,7 @@ metrics: if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count` messaging.kafka.broker.count: - enabled: false + enabled: true description: Number of brokers in the cluster. unit: "{broker}" sum: From a894856e558f3fe8cc6e059beee9f1e3a348ebb6 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 7 Sep 2023 12:24:39 -0700 Subject: [PATCH 20/36] test2 --- .../kafkametricsreceiver/documentation.md | 44 +++++++++---------- .../internal/metadata/generated_config.go | 4 +- .../metadata/generated_metrics_test.go | 6 +-- receiver/kafkametricsreceiver/metadata.yaml | 4 +- 4 files changed, 27 insertions(+), 31 deletions(-) diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 8a6ddbe5ec80..c1349fcc9058 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -12,14 +12,6 @@ metrics: enabled: false ``` -### kafka.brokers - -[DEPRACATED] Number of brokers in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {broker} | Sum | Int | Cumulative | false | - ### kafka.consumer_group.lag Current approximate lag of consumer group at partition of topic @@ -170,6 +162,20 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | +### messaging.kafka.broker.consumer_fetch_rate + +Average consumer fetch Rate + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {fetches}/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + ### messaging.kafka.broker.count Number of brokers in the cluster. @@ -188,27 +194,21 @@ metrics: enabled: true ``` -### messaging.kafka.broker.consumer_fetch_count +### kafka.brokers -Count of consumer fetches +[DEPRACATED] Number of brokers in the cluster. | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | -| {fetches} | Sum | Int | Cumulative | false | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| broker | The ID (integer) of a broker | Any Int | +| {broker} | Sum | Int | Cumulative | false | -### messaging.kafka.broker.consumer_fetch_rate +### messaging.kafka.broker.consumer_fetch_count -Average consumer fetch Rate +Count of consumer fetches -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {fetches}/s | Gauge | Double | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {fetches} | Sum | Int | Cumulative | false | #### Attributes diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index 8d83061a3f49..035e3bf5ada9 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -52,7 +52,7 @@ type MetricsConfig struct { func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ KafkaBrokers: MetricConfig{ - Enabled: true, + Enabled: false, }, KafkaConsumerGroupLag: MetricConfig{ Enabled: true, @@ -88,7 +88,7 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MessagingKafkaBrokerConsumerFetchRate: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerCount: MetricConfig{ Enabled: true, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 0600a95f936c..acefbc93a94a 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -49,10 +49,6 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 - if test.configSet == testSetDefault || test.configSet == testSetAll { - assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count`", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } if test.configSet == testSetDefault { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ @@ -102,7 +98,6 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount := 0 allMetricsCount := 0 - defaultMetricsCount++ allMetricsCount++ mb.RecordKafkaBrokersDataPoint(ts, 1) @@ -149,6 +144,7 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts, 1, 6) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 7b807c6f877a..cbc074643c8b 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -26,7 +26,7 @@ metrics: # brokers scraper kafka.brokers: - enabled: true + enabled: false description: '[DEPRACATED] Number of brokers in the cluster.' unit: "{broker}" sum: @@ -47,7 +47,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_rate: - enabled: false + enabled: true description: Average consumer fetch Rate unit: "{fetches}/s" gauge: From 3391a70134057803b34b27addc49df38d8c8f58e Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 7 Sep 2023 12:26:32 -0700 Subject: [PATCH 21/36] test3 --- .../kafkametricsreceiver/documentation.md | 60 +++++++++---------- .../internal/metadata/generated_config.go | 6 +- .../internal/metadata/generated_metrics.go | 2 +- .../metadata/generated_metrics_test.go | 9 ++- receiver/kafkametricsreceiver/metadata.yaml | 8 +-- 5 files changed, 44 insertions(+), 41 deletions(-) diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index c1349fcc9058..477890691963 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -12,6 +12,14 @@ metrics: enabled: false ``` +### kafka.brokers + +[DEPRACATED] Number of brokers in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {broker} | Sum | Int | Cumulative | false | + ### kafka.consumer_group.lag Current approximate lag of consumer group at partition of topic @@ -162,28 +170,6 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | -### messaging.kafka.broker.consumer_fetch_rate - -Average consumer fetch Rate - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {fetches}/s | Gauge | Double | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| broker | The ID (integer) of a broker | Any Int | - -### messaging.kafka.broker.count - -Number of brokers in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {broker} | Sum | Int | Cumulative | false | - ## Optional Metrics The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: @@ -194,14 +180,6 @@ metrics: enabled: true ``` -### kafka.brokers - -[DEPRACATED] Number of brokers in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {broker} | Sum | Int | Cumulative | false | - ### messaging.kafka.broker.consumer_fetch_count Count of consumer fetches @@ -216,6 +194,28 @@ Count of consumer fetches | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | +### messaging.kafka.broker.consumer_fetch_rate + +Average consumer fetch Rate + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {fetches}/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### messaging.kafka.broker.count + +Number of brokers in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {broker} | Sum | Int | Cumulative | true | + ### messaging.kafka.broker.incoming_byte_rate Average incoming Byte Rate in bytes/second diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index 035e3bf5ada9..d37dfcc66105 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -52,7 +52,7 @@ type MetricsConfig struct { func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ KafkaBrokers: MetricConfig{ - Enabled: false, + Enabled: true, }, KafkaConsumerGroupLag: MetricConfig{ Enabled: true, @@ -88,10 +88,10 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MessagingKafkaBrokerConsumerFetchRate: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerCount: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerIncomingByteRate: MetricConfig{ Enabled: false, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index bbd824260427..49ea1808bfec 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -706,7 +706,7 @@ func (m *metricMessagingKafkaBrokerCount) init() { m.data.SetDescription("Number of brokers in the cluster.") m.data.SetUnit("{broker}") m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index acefbc93a94a..655568a6de8f 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -49,6 +49,10 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 + if test.configSet == testSetDefault || test.configSet == testSetAll { + assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count`", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } if test.configSet == testSetDefault { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ @@ -98,6 +102,7 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount := 0 allMetricsCount := 0 + defaultMetricsCount++ allMetricsCount++ mb.RecordKafkaBrokersDataPoint(ts, 1) @@ -144,11 +149,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerCountDataPoint(ts, 1) @@ -439,7 +442,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of brokers in the cluster.", ms.At(i).Description()) assert.Equal(t, "{broker}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index cbc074643c8b..a3102e3f4f4a 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -26,7 +26,7 @@ metrics: # brokers scraper kafka.brokers: - enabled: false + enabled: true description: '[DEPRACATED] Number of brokers in the cluster.' unit: "{broker}" sum: @@ -37,17 +37,17 @@ metrics: if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count` messaging.kafka.broker.count: - enabled: true + enabled: false description: Number of brokers in the cluster. unit: "{broker}" sum: - monotonic: false + monotonic: true value_type: int aggregation_temporality: cumulative warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_rate: - enabled: true + enabled: false description: Average consumer fetch Rate unit: "{fetches}/s" gauge: From b5b8bc1f1c4099e796fbd761f12ccc5c3d2f62a1 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 7 Sep 2023 13:29:05 -0700 Subject: [PATCH 22/36] chore: run write --- .../kafkametricsreceiver/documentation.md | 28 +++++++++---------- .../internal/metadata/generated_config.go | 2 +- .../metadata/generated_metrics_test.go | 1 + receiver/kafkametricsreceiver/metadata.yaml | 2 +- 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 477890691963..a72eaf514bee 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -170,6 +170,20 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | +### messaging.kafka.broker.outgoing_byte_rate + +Average outgoing Byte Rate in bytes/second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + ## Optional Metrics The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: @@ -230,20 +244,6 @@ Average incoming Byte Rate in bytes/second | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.broker.outgoing_byte_rate - -Average outgoing Byte Rate in bytes/second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Double | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.request_latency Average request latency in seconds diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index d37dfcc66105..b148e3cd5954 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -97,7 +97,7 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MessagingKafkaBrokerOutgoingByteRate: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerRequestLatency: MetricConfig{ Enabled: false, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 655568a6de8f..0762bd6d0a36 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -158,6 +158,7 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMessagingKafkaBrokerIncomingByteRateDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(ts, 1, 6) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index a3102e3f4f4a..108364b000e2 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -65,7 +65,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.outgoing_byte_rate: - enabled: false + enabled: true description: Average outgoing Byte Rate in bytes/second. unit: 1 gauge: From ec75617745da3977c92af9466b64c8ac9781ffb5 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:39:06 -0700 Subject: [PATCH 23/36] chore update --- config.yaml | 63 +++++++++++++++++++ .../kafkametricsreceiver/documentation.md | 40 ++++++------ .../kafkametricsreceiver/integration_test.go | 28 ++++++++- .../internal/metadata/generated_config.go | 6 +- .../metadata/generated_metrics_test.go | 3 +- receiver/kafkametricsreceiver/metadata.yaml | 6 +- 6 files changed, 118 insertions(+), 28 deletions(-) create mode 100644 config.yaml diff --git a/config.yaml b/config.yaml new file mode 100644 index 000000000000..c7f2d37e1f96 --- /dev/null +++ b/config.yaml @@ -0,0 +1,63 @@ +receivers: + kafkametrics: + brokers: + - pkc-p11xm.us-east-1.aws.confluent.cloud:9092 + protocol_version: 2.0.0 + scrapers: + - brokers + - topics + - consumers + auth: + sasl: + username: WQCM423SHEOR4JTI + password: +ElGWWiCEhpaffi6qbpHcUIsCa7fVPdnnIXDVv4P9Cn9nq+4NXzgI1HRkJUvQUTQ + mechanism: PLAIN + tls: + insecure_skip_verify: false + collection_interval: 30s + otlp: + protocols: + grpc: + endpoint: localhost:4317 + http: + endpoint: localhost:4318 + + + prometheus: + config: + scrape_configs: + - job_name: "confluent" + scrape_interval: 60s # Do not go any lower than this or you'll hit rate limits + static_configs: + - targets: ["api.telemetry.confluent.cloud"] + scheme: https + basic_auth: + username: UUC44UI5IHPL2DBS + password: GGxJTDCfx34Lc2+TXoYSghzPRQo0caZuHa7hVMdNJo90405NgTmYk+afN3G+cWZe + metrics_path: /v2/metrics/cloud/export + params: + "resource.kafka.id": + - lkc-j8d3pm +exporters: + otlp: + endpoint: https://otlp.nr-data.net:4317 + headers: + api-key: 990e51ef617558ed94e98be5a7c6e31c6f64NRAL +processors: + batch: + memory_limiter: + limit_mib: 400 + spike_limit_mib: 100 + check_interval: 5s +service: + telemetry: + logs: + pipelines: + metrics: + receivers: [prometheus] + processors: [batch] + exporters: [otlp] + metrics/kafka: + receivers: [kafkametrics] + processors: [batch] + exporters: [otlp] \ No newline at end of file diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index a72eaf514bee..172808a4face 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -170,13 +170,27 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | -### messaging.kafka.broker.outgoing_byte_rate +### messaging.kafka.broker.consumer_fetch_rate -Average outgoing Byte Rate in bytes/second. +Average consumer fetch Rate | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Double | +| {fetches}/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| broker | The ID (integer) of a broker | Any Int | + +### messaging.kafka.broker.incoming_byte_rate + +Average incoming Byte Rate in bytes/second + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By/s | Gauge | Double | #### Attributes @@ -208,20 +222,6 @@ Count of consumer fetches | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.broker.consumer_fetch_rate - -Average consumer fetch Rate - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {fetches}/s | Gauge | Double | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| broker | The ID (integer) of a broker | Any Int | - ### messaging.kafka.broker.count Number of brokers in the cluster. @@ -230,13 +230,13 @@ Number of brokers in the cluster. | ---- | ----------- | ---------- | ----------------------- | --------- | | {broker} | Sum | Int | Cumulative | true | -### messaging.kafka.broker.incoming_byte_rate +### messaging.kafka.broker.outgoing_byte_rate -Average incoming Byte Rate in bytes/second +Average outgoing Byte Rate in bytes/second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| By/s | Gauge | Double | +| 1 | Gauge | Double | #### Attributes diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index e99031c00f6a..d202ff837cb2 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -67,8 +67,34 @@ func TestIntegration(t *testing.T) { rCfg.Brokers = []string{fmt.Sprintf("%s:%s", ci.HostForNamedContainer(t, "kafka"), ci.MappedPortForNamedContainer(t, "kafka", kafkaPort))} - rCfg.Scrapers = []string{"brokers", "consumers", "topics"} + rCfg.Scrapers = []string{ + "brokers", + "consumers", + "topics", + "messaging.kafka.broker.count", + "messaging.kafka.broker.consumer_fetch_rate", + "messaging.kafka.broker.incoming_byte_rate", + "messaging.kafka.broker.outgoing_byte_rate", + "messaging.kafka.broker.request_latency", + "messaging.kafka.broker.response_rate", + "messaging.kafka.broker.response_size", + "messaging.kafka.broker.request_rate", + "messaging.kafka.broker.request_size", + "messaging.kafka.broker.requests_in_flight", + "messaging.kafka.broker.consumer_fetch_count", + "kafka.topic.partitions", + "kafka.partition.current_offset", + "kafka.partition.oldest_offset", + "kafka.partition.replicas", + "kafka.partition.replicas_in_sync", + "kafka.consumer_group.members", + "kafka.consumer_group.offset", + "kafka.consumer_group.offset_sum", + "kafka.consumer_group.lag", + "kafka.consumer_group.lag_sum", + } }), + scraperinttest.WriteExpected(), // TODO remove scraperinttest.WithCompareOptions( // pmetrictest.IgnoreMetricValues(), diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index b148e3cd5954..ef19b55f5e5d 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -88,16 +88,16 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MessagingKafkaBrokerConsumerFetchRate: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerCount: MetricConfig{ Enabled: false, }, MessagingKafkaBrokerIncomingByteRate: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerOutgoingByteRate: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerRequestLatency: MetricConfig{ Enabled: false, diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 0762bd6d0a36..0e0d47208acd 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -149,16 +149,17 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts, 1, 6) allMetricsCount++ mb.RecordMessagingKafkaBrokerCountDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerIncomingByteRateDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(ts, 1, 6) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 108364b000e2..0fc6a2f329bc 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -47,7 +47,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_rate: - enabled: false + enabled: true description: Average consumer fetch Rate unit: "{fetches}/s" gauge: @@ -56,7 +56,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.incoming_byte_rate: - enabled: false + enabled: true description: Average incoming Byte Rate in bytes/second unit: By/s gauge: @@ -65,7 +65,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.outgoing_byte_rate: - enabled: true + enabled: false description: Average outgoing Byte Rate in bytes/second. unit: 1 gauge: From dafb75d6522b1ce60251540ca241e0734d301000 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:44:56 -0700 Subject: [PATCH 24/36] update write expected --- receiver/kafkametricsreceiver/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index d202ff837cb2..07f64f5233b0 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -95,7 +95,7 @@ func TestIntegration(t *testing.T) { } }), - scraperinttest.WriteExpected(), // TODO remove + // scraperinttest.WriteExpected(), // TODO remove scraperinttest.WithCompareOptions( // pmetrictest.IgnoreMetricValues(), pmetrictest.IgnoreStartTimestamp(), From e8a0cb548171a6b8b85cf9b5737f394ab3496daf Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Wed, 20 Sep 2023 19:21:42 -0700 Subject: [PATCH 25/36] fix --- config.yaml | 63 ------------------- .../kafkametricsreceiver/integration_test.go | 11 ---- 2 files changed, 74 deletions(-) delete mode 100644 config.yaml diff --git a/config.yaml b/config.yaml deleted file mode 100644 index c7f2d37e1f96..000000000000 --- a/config.yaml +++ /dev/null @@ -1,63 +0,0 @@ -receivers: - kafkametrics: - brokers: - - pkc-p11xm.us-east-1.aws.confluent.cloud:9092 - protocol_version: 2.0.0 - scrapers: - - brokers - - topics - - consumers - auth: - sasl: - username: WQCM423SHEOR4JTI - password: +ElGWWiCEhpaffi6qbpHcUIsCa7fVPdnnIXDVv4P9Cn9nq+4NXzgI1HRkJUvQUTQ - mechanism: PLAIN - tls: - insecure_skip_verify: false - collection_interval: 30s - otlp: - protocols: - grpc: - endpoint: localhost:4317 - http: - endpoint: localhost:4318 - - - prometheus: - config: - scrape_configs: - - job_name: "confluent" - scrape_interval: 60s # Do not go any lower than this or you'll hit rate limits - static_configs: - - targets: ["api.telemetry.confluent.cloud"] - scheme: https - basic_auth: - username: UUC44UI5IHPL2DBS - password: GGxJTDCfx34Lc2+TXoYSghzPRQo0caZuHa7hVMdNJo90405NgTmYk+afN3G+cWZe - metrics_path: /v2/metrics/cloud/export - params: - "resource.kafka.id": - - lkc-j8d3pm -exporters: - otlp: - endpoint: https://otlp.nr-data.net:4317 - headers: - api-key: 990e51ef617558ed94e98be5a7c6e31c6f64NRAL -processors: - batch: - memory_limiter: - limit_mib: 400 - spike_limit_mib: 100 - check_interval: 5s -service: - telemetry: - logs: - pipelines: - metrics: - receivers: [prometheus] - processors: [batch] - exporters: [otlp] - metrics/kafka: - receivers: [kafkametrics] - processors: [batch] - exporters: [otlp] \ No newline at end of file diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index 07f64f5233b0..e30e367c60b8 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -71,17 +71,6 @@ func TestIntegration(t *testing.T) { "brokers", "consumers", "topics", - "messaging.kafka.broker.count", - "messaging.kafka.broker.consumer_fetch_rate", - "messaging.kafka.broker.incoming_byte_rate", - "messaging.kafka.broker.outgoing_byte_rate", - "messaging.kafka.broker.request_latency", - "messaging.kafka.broker.response_rate", - "messaging.kafka.broker.response_size", - "messaging.kafka.broker.request_rate", - "messaging.kafka.broker.request_size", - "messaging.kafka.broker.requests_in_flight", - "messaging.kafka.broker.consumer_fetch_count", "kafka.topic.partitions", "kafka.partition.current_offset", "kafka.partition.oldest_offset", From 051fa9046ba62b5a3f65f98a652cca2220d17551 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Wed, 20 Sep 2023 19:32:21 -0700 Subject: [PATCH 26/36] activate metrics --- .../kafkametricsreceiver/integration_test.go | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index e30e367c60b8..7af164eb04c3 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -71,16 +71,19 @@ func TestIntegration(t *testing.T) { "brokers", "consumers", "topics", - "kafka.topic.partitions", - "kafka.partition.current_offset", - "kafka.partition.oldest_offset", - "kafka.partition.replicas", - "kafka.partition.replicas_in_sync", - "kafka.consumer_group.members", - "kafka.consumer_group.offset", - "kafka.consumer_group.offset_sum", - "kafka.consumer_group.lag", - "kafka.consumer_group.lag_sum", + } + rCfg.Metrics = map[string]bool{ + "messaging.kafka.broker.count": true, + "messaging.kafka.broker.consumer_fetch_rate": true, + "messaging.kafka.broker.incoming_byte_rate": true, + "messaging.kafka.broker.outgoing_byte_rate": true, + "messaging.kafka.broker.request_latency": true, + "messaging.kafka.broker.response_rate": true, + "messaging.kafka.broker.response_size": true, + "messaging.kafka.broker.request_rate": true, + "messaging.kafka.broker.request_size": true, + "messaging.kafka.broker.requests_in_flight": true, + "messaging.kafka.broker.consumer_fetch_count": true, } }), From 1a4993b4c815db228bbda18f18153056780a5397 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:12:04 -0700 Subject: [PATCH 27/36] update to string instead of map --- .../kafkametricsreceiver/integration_test.go | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index 7af164eb04c3..e28a29978def 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -72,18 +72,29 @@ func TestIntegration(t *testing.T) { "consumers", "topics", } - rCfg.Metrics = map[string]bool{ - "messaging.kafka.broker.count": true, - "messaging.kafka.broker.consumer_fetch_rate": true, - "messaging.kafka.broker.incoming_byte_rate": true, - "messaging.kafka.broker.outgoing_byte_rate": true, - "messaging.kafka.broker.request_latency": true, - "messaging.kafka.broker.response_rate": true, - "messaging.kafka.broker.response_size": true, - "messaging.kafka.broker.request_rate": true, - "messaging.kafka.broker.request_size": true, - "messaging.kafka.broker.requests_in_flight": true, - "messaging.kafka.broker.consumer_fetch_count": true, + rCfg.Metrics = []string{ + "kafka.brokers", + "kafka.consumer_group.lag", + "kafka.consumer_group.lag_sum", + "kafka.consumer_group.members", + "kafka.consumer_group.offset", + "kafka.consumer_group.offset_sum", + "kafka.partition.current_offset", + "kafka.partition.oldest_offset", + "kafka.partition.replicas", + "kafka.partition.replicas_in_sync", + "kafka.topic.partitions", + "messaging.kafka.broker.consumer_fetch_count", + "messaging.kafka.broker.consumer_fetch_rate", + "messaging.kafka.broker.count", + "messaging.kafka.broker.incoming_byte_rate", + "messaging.kafka.broker.outgoing_byte_rate", + "messaging.kafka.broker.request_latency", + "messaging.kafka.broker.request_rate", + "messaging.kafka.broker.request_size", + "messaging.kafka.broker.requests_in_flight", + "messaging.kafka.broker.response_rate", + "messaging.kafka.broker.response_size", } }), From 5085e989a7c49cb907789ab956e58285772bc687 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:14:40 -0700 Subject: [PATCH 28/36] setting by field --- .../kafkametricsreceiver/integration_test.go | 26 ++----------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index e28a29978def..61673b6df4d7 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -72,30 +72,8 @@ func TestIntegration(t *testing.T) { "consumers", "topics", } - rCfg.Metrics = []string{ - "kafka.brokers", - "kafka.consumer_group.lag", - "kafka.consumer_group.lag_sum", - "kafka.consumer_group.members", - "kafka.consumer_group.offset", - "kafka.consumer_group.offset_sum", - "kafka.partition.current_offset", - "kafka.partition.oldest_offset", - "kafka.partition.replicas", - "kafka.partition.replicas_in_sync", - "kafka.topic.partitions", - "messaging.kafka.broker.consumer_fetch_count", - "messaging.kafka.broker.consumer_fetch_rate", - "messaging.kafka.broker.count", - "messaging.kafka.broker.incoming_byte_rate", - "messaging.kafka.broker.outgoing_byte_rate", - "messaging.kafka.broker.request_latency", - "messaging.kafka.broker.request_rate", - "messaging.kafka.broker.request_size", - "messaging.kafka.broker.requests_in_flight", - "messaging.kafka.broker.response_rate", - "messaging.kafka.broker.response_size", - } + rCfg.Metrics.MessagingKafkaBrokerConsumerFetchRate.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerConsumerFetchRate.enabledSetByUser = true }), // scraperinttest.WriteExpected(), // TODO remove From 1fb7d9ba8b257f6235812d21524c4a0f919e7baa Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:17:08 -0700 Subject: [PATCH 29/36] chore: remove enabled setbyuser --- receiver/kafkametricsreceiver/integration_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index 61673b6df4d7..ac8464f04cfd 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -73,7 +73,6 @@ func TestIntegration(t *testing.T) { "topics", } rCfg.Metrics.MessagingKafkaBrokerConsumerFetchRate.Enabled = true - rCfg.Metrics.MessagingKafkaBrokerConsumerFetchRate.enabledSetByUser = true }), // scraperinttest.WriteExpected(), // TODO remove From afea5c1ac4212e18ca61717c0dc2352cba9541c2 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:22:27 -0700 Subject: [PATCH 30/36] adding all metrics enabled --- .../kafkametricsreceiver/integration_test.go | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index ac8464f04cfd..0696b0bd7d6c 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -72,10 +72,31 @@ func TestIntegration(t *testing.T) { "consumers", "topics", } + rCfg.Metrics.KafkaBrokers.Enabled = true + rCfg.Metrics.KafkaConsumerGroupLag.Enabled = true + rCfg.Metrics.KafkaConsumerGroupLagSum.Enabled = true + rCfg.Metrics.KafkaConsumerGroupMembers.Enabled = true + rCfg.Metrics.KafkaConsumerGroupOffset.Enabled = true + rCfg.Metrics.KafkaConsumerGroupOffsetSum.Enabled = true + rCfg.Metrics.KafkaPartitionCurrentOffset.Enabled = true + rCfg.Metrics.KafkaPartitionOldestOffset.Enabled = true + rCfg.Metrics.KafkaPartitionReplicas.Enabled = true + rCfg.Metrics.KafkaPartitionReplicasInSync.Enabled = true + rCfg.Metrics.KafkaTopicPartitions.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerConsumerFetchCount.Enabled = true rCfg.Metrics.MessagingKafkaBrokerConsumerFetchRate.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerCount.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerIncomingByteRate.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerOutgoingByteRate.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerRequestLatency.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerRequestRate.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerRequestSize.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerRequestsInFlight.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerResponseRate.Enabled = true + rCfg.Metrics.MessagingKafkaBrokerResponseSize.Enabled = true }), - // scraperinttest.WriteExpected(), // TODO remove + // scraperinttest.WriteExpected(), // TODO remove scraperinttest.WithCompareOptions( // pmetrictest.IgnoreMetricValues(), pmetrictest.IgnoreStartTimestamp(), From 1009d64229fd8f5942fa4517d6536f0d35cae3e4 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 11:42:53 -0700 Subject: [PATCH 31/36] chore - add sink --- receiver/kafkametricsreceiver/integration_test.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index 0696b0bd7d6c..219861b44562 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -14,6 +14,7 @@ import ( "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumertest" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/scraperinttest" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" @@ -27,6 +28,9 @@ const ( ) func TestIntegration(t *testing.T) { + + sink := new(consumertest.MetricsSink) + scraperinttest.NewIntegrationTest( NewFactory(), scraperinttest.WithNetworkRequest( @@ -72,7 +76,7 @@ func TestIntegration(t *testing.T) { "consumers", "topics", } - rCfg.Metrics.KafkaBrokers.Enabled = true + rCfg.Metrics.KafkaBrokers.Enabled = false rCfg.Metrics.KafkaConsumerGroupLag.Enabled = true rCfg.Metrics.KafkaConsumerGroupLagSum.Enabled = true rCfg.Metrics.KafkaConsumerGroupMembers.Enabled = true @@ -94,6 +98,8 @@ func TestIntegration(t *testing.T) { rCfg.Metrics.MessagingKafkaBrokerRequestsInFlight.Enabled = true rCfg.Metrics.MessagingKafkaBrokerResponseRate.Enabled = true rCfg.Metrics.MessagingKafkaBrokerResponseSize.Enabled = true + + rCfg.MetricsSink = sink }), // scraperinttest.WriteExpected(), // TODO remove @@ -103,4 +109,9 @@ func TestIntegration(t *testing.T) { pmetrictest.IgnoreTimestamp(), ), ).Run(t) + + collectedMetrics := sink.AllMetrics() + + err := scraperinttest.WriteExpected(collectedMetrics, "./testdata/integration/expected.yaml") + require.noError(t, err) } From c5c8c3b11de79b5b4744ee6edc32b7bd6a80a172 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 12:19:18 -0700 Subject: [PATCH 32/36] fix write expected --- receiver/kafkametricsreceiver/integration_test.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index 219861b44562..77e62c422192 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -14,7 +14,6 @@ import ( "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/consumertest" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/scraperinttest" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" @@ -28,9 +27,6 @@ const ( ) func TestIntegration(t *testing.T) { - - sink := new(consumertest.MetricsSink) - scraperinttest.NewIntegrationTest( NewFactory(), scraperinttest.WithNetworkRequest( @@ -98,8 +94,6 @@ func TestIntegration(t *testing.T) { rCfg.Metrics.MessagingKafkaBrokerRequestsInFlight.Enabled = true rCfg.Metrics.MessagingKafkaBrokerResponseRate.Enabled = true rCfg.Metrics.MessagingKafkaBrokerResponseSize.Enabled = true - - rCfg.MetricsSink = sink }), // scraperinttest.WriteExpected(), // TODO remove @@ -108,10 +102,6 @@ func TestIntegration(t *testing.T) { pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp(), ), + scraperinttest.WriteExpected(), ).Run(t) - - collectedMetrics := sink.AllMetrics() - - err := scraperinttest.WriteExpected(collectedMetrics, "./testdata/integration/expected.yaml") - require.noError(t, err) } From 4b80265dd456972a1a2f0c420f4a491813f574fa Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 12:45:17 -0700 Subject: [PATCH 33/36] chore enable all metrics to try and run write() --- config.yaml | 65 +++++++++++++++++ .../kafkametricsreceiver/documentation.md | 72 +++++++++---------- .../internal/metadata/generated_config.go | 20 +++--- .../metadata/generated_metrics_test.go | 14 ++-- receiver/kafkametricsreceiver/metadata.yaml | 20 +++--- 5 files changed, 130 insertions(+), 61 deletions(-) create mode 100644 config.yaml diff --git a/config.yaml b/config.yaml new file mode 100644 index 000000000000..c550660a5bd6 --- /dev/null +++ b/config.yaml @@ -0,0 +1,65 @@ +receivers: + kafkametrics: + brokers: + - pkc-p11xm.us-east-1.aws.confluent.cloud:9092 + protocol_version: 2.0.0 + scrapers: + - brokers + - topics + - consumers + auth: + sasl: + username: WQCM423SHEOR4JTI + password: +ElGWWiCEhpaffi6qbpHcUIsCa7fVPdnnIXDVv4P9Cn9nq+4NXzgI1HRkJUvQUTQ + mechanism: PLAIN + tls: + insecure_skip_verify: false + collection_interval: 30s + otlp: + protocols: + grpc: + endpoint: localhost:4317 + http: + endpoint: localhost:4318 + + + prometheus: + config: + scrape_configs: + - job_name: "confluent" + scrape_interval: 60s # Do not go any lower than this or you'll hit rate limits + static_configs: + - targets: ["api.telemetry.confluent.cloud"] + scheme: https + basic_auth: + username: UUC44UI5IHPL2DBS + password: GGxJTDCfx34Lc2+TXoYSghzPRQo0caZuHa7hVMdNJo90405NgTmYk+afN3G+cWZe + metrics_path: /v2/metrics/cloud/export + params: + "resource.kafka.id": + - lkc-j8d3pm +exporters: + otlp: + endpoint: https://otlp.nr-data.net:4317 + headers: + api-key: 990e51ef617558ed94e98be5a7c6e31c6f64NRAL + logging: + verbosity: normal +processors: + batch: + memory_limiter: + limit_mib: 400 + spike_limit_mib: 100 + check_interval: 5s +service: + telemetry: + logs: + pipelines: + metrics: + receivers: [prometheus] + processors: [batch] + exporters: [otlp, logging] + metrics/kafka: + receivers: [kafkametrics] + processors: [batch] + exporters: [otlp] diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 172808a4face..53015d6d42ac 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -12,14 +12,6 @@ metrics: enabled: false ``` -### kafka.brokers - -[DEPRACATED] Number of brokers in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {broker} | Sum | Int | Cumulative | false | - ### kafka.consumer_group.lag Current approximate lag of consumer group at partition of topic @@ -170,13 +162,13 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | -### messaging.kafka.broker.consumer_fetch_rate +### messaging.kafka.broker.consumer_fetch_count -Average consumer fetch Rate +Count of consumer fetches -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {fetches}/s | Gauge | Double | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {fetches} | Sum | Int | Cumulative | false | #### Attributes @@ -184,13 +176,13 @@ Average consumer fetch Rate | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.broker.incoming_byte_rate +### messaging.kafka.broker.consumer_fetch_rate -Average incoming Byte Rate in bytes/second +Average consumer fetch Rate | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| By/s | Gauge | Double | +| {fetches}/s | Gauge | Double | #### Attributes @@ -198,23 +190,21 @@ Average incoming Byte Rate in bytes/second | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -## Optional Metrics +### messaging.kafka.broker.count -The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: +Number of brokers in the cluster. -```yaml -metrics: - : - enabled: true -``` +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {broker} | Sum | Int | Cumulative | true | -### messaging.kafka.broker.consumer_fetch_count +### messaging.kafka.broker.incoming_byte_rate -Count of consumer fetches +Average incoming Byte Rate in bytes/second -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {fetches} | Sum | Int | Cumulative | false | +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By/s | Gauge | Double | #### Attributes @@ -222,14 +212,6 @@ Count of consumer fetches | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | -### messaging.kafka.broker.count - -Number of brokers in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {broker} | Sum | Int | Cumulative | true | - ### messaging.kafka.broker.outgoing_byte_rate Average outgoing Byte Rate in bytes/second. @@ -327,3 +309,21 @@ Average response size in bytes | Name | Description | Values | | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | + +## Optional Metrics + +The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: + +```yaml +metrics: + : + enabled: true +``` + +### kafka.brokers + +[DEPRACATED] Number of brokers in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {broker} | Sum | Int | Cumulative | false | diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index ef19b55f5e5d..e626fea665f7 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -52,7 +52,7 @@ type MetricsConfig struct { func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ KafkaBrokers: MetricConfig{ - Enabled: true, + Enabled: false, }, KafkaConsumerGroupLag: MetricConfig{ Enabled: true, @@ -85,37 +85,37 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: true, }, MessagingKafkaBrokerConsumerFetchCount: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerConsumerFetchRate: MetricConfig{ Enabled: true, }, MessagingKafkaBrokerCount: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerIncomingByteRate: MetricConfig{ Enabled: true, }, MessagingKafkaBrokerOutgoingByteRate: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerRequestLatency: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerRequestRate: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerRequestSize: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerRequestsInFlight: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerResponseRate: MetricConfig{ - Enabled: false, + Enabled: true, }, MessagingKafkaBrokerResponseSize: MetricConfig{ - Enabled: false, + Enabled: true, }, } } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 0e0d47208acd..6974a170a555 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -49,10 +49,6 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 - if test.configSet == testSetDefault || test.configSet == testSetAll { - assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count`", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } if test.configSet == testSetDefault { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ @@ -102,7 +98,6 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount := 0 allMetricsCount := 0 - defaultMetricsCount++ allMetricsCount++ mb.RecordKafkaBrokersDataPoint(ts, 1) @@ -146,6 +141,7 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordKafkaTopicPartitionsDataPoint(ts, 1, "topic-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts, 1, 6) @@ -153,6 +149,7 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerCountDataPoint(ts, 1) @@ -160,24 +157,31 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMessagingKafkaBrokerIncomingByteRateDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestLatencyDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestRateDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestSizeDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestsInFlightDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerResponseRateDataPoint(ts, 1, 6) + defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerResponseSizeDataPoint(ts, 1, 6) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index 0fc6a2f329bc..aaca34da5096 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -26,7 +26,7 @@ metrics: # brokers scraper kafka.brokers: - enabled: true + enabled: false description: '[DEPRACATED] Number of brokers in the cluster.' unit: "{broker}" sum: @@ -37,7 +37,7 @@ metrics: if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count` messaging.kafka.broker.count: - enabled: false + enabled: true description: Number of brokers in the cluster. unit: "{broker}" sum: @@ -65,7 +65,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.outgoing_byte_rate: - enabled: false + enabled: true description: Average outgoing Byte Rate in bytes/second. unit: 1 gauge: @@ -74,7 +74,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.request_latency: - enabled: false + enabled: true description: Average request latency in seconds unit: "s" gauge: @@ -83,7 +83,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.response_rate: - enabled: false + enabled: true description: Average response rate per second unit: "{response}/s" gauge: @@ -92,7 +92,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.response_size: - enabled: false + enabled: true description: Average response size in bytes unit: "By" gauge: @@ -101,7 +101,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.request_rate: - enabled: false + enabled: true description: Average request rate per second. unit: "{requests}/s" gauge: @@ -110,7 +110,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.request_size: - enabled: false + enabled: true description: Average request size in bytes unit: "By" gauge: @@ -119,7 +119,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.requests_in_flight: - enabled: false + enabled: true description: Requests in flight unit: "{requests}" gauge: @@ -128,7 +128,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_count: - enabled: false + enabled: true description: Count of consumer fetches unit: "{fetches}" sum: From 3fc9412a84f184c592799f5f03bd7abad3bb5b6d Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 13:11:03 -0700 Subject: [PATCH 34/36] disable again --- .../kafkametricsreceiver/documentation.md | 36 +++++++++---------- .../internal/metadata/generated_config.go | 24 ++++++------- .../metadata/generated_metrics_test.go | 16 +++------ receiver/kafkametricsreceiver/metadata.yaml | 24 ++++++------- 4 files changed, 47 insertions(+), 53 deletions(-) diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 53015d6d42ac..477890691963 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -12,6 +12,14 @@ metrics: enabled: false ``` +### kafka.brokers + +[DEPRACATED] Number of brokers in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {broker} | Sum | Int | Cumulative | false | + ### kafka.consumer_group.lag Current approximate lag of consumer group at partition of topic @@ -162,6 +170,16 @@ Number of partitions in topic. | ---- | ----------- | ------ | | topic | The ID (integer) of a topic | Any Str | +## Optional Metrics + +The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: + +```yaml +metrics: + : + enabled: true +``` + ### messaging.kafka.broker.consumer_fetch_count Count of consumer fetches @@ -309,21 +327,3 @@ Average response size in bytes | Name | Description | Values | | ---- | ----------- | ------ | | broker | The ID (integer) of a broker | Any Int | - -## Optional Metrics - -The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: - -```yaml -metrics: - : - enabled: true -``` - -### kafka.brokers - -[DEPRACATED] Number of brokers in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {broker} | Sum | Int | Cumulative | false | diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go index e626fea665f7..d37dfcc66105 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_config.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_config.go @@ -52,7 +52,7 @@ type MetricsConfig struct { func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ KafkaBrokers: MetricConfig{ - Enabled: false, + Enabled: true, }, KafkaConsumerGroupLag: MetricConfig{ Enabled: true, @@ -85,37 +85,37 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: true, }, MessagingKafkaBrokerConsumerFetchCount: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerConsumerFetchRate: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerCount: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerIncomingByteRate: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerOutgoingByteRate: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerRequestLatency: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerRequestRate: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerRequestSize: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerRequestsInFlight: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerResponseRate: MetricConfig{ - Enabled: true, + Enabled: false, }, MessagingKafkaBrokerResponseSize: MetricConfig{ - Enabled: true, + Enabled: false, }, } } diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 6974a170a555..655568a6de8f 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -49,6 +49,10 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 + if test.configSet == testSetDefault || test.configSet == testSetAll { + assert.Equal(t, "[WARNING] `kafka.brokers` should not be enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count`", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } if test.configSet == testSetDefault { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `messaging.kafka.broker.consumer_fetch_count`: This metric will be enabled by default in the next versions.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ @@ -98,6 +102,7 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount := 0 allMetricsCount := 0 + defaultMetricsCount++ allMetricsCount++ mb.RecordKafkaBrokersDataPoint(ts, 1) @@ -141,47 +146,36 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordKafkaTopicPartitionsDataPoint(ts, 1, "topic-val") - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchCountDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerConsumerFetchRateDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerCountDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerIncomingByteRateDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerOutgoingByteRateDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestLatencyDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestRateDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestSizeDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerRequestsInFlightDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerResponseRateDataPoint(ts, 1, 6) - defaultMetricsCount++ allMetricsCount++ mb.RecordMessagingKafkaBrokerResponseSizeDataPoint(ts, 1, 6) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index aaca34da5096..a3102e3f4f4a 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -26,7 +26,7 @@ metrics: # brokers scraper kafka.brokers: - enabled: false + enabled: true description: '[DEPRACATED] Number of brokers in the cluster.' unit: "{broker}" sum: @@ -37,7 +37,7 @@ metrics: if_enabled: The metric is deprecated and will be removed. Use `messaging.kafka.broker.count` messaging.kafka.broker.count: - enabled: true + enabled: false description: Number of brokers in the cluster. unit: "{broker}" sum: @@ -47,7 +47,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_rate: - enabled: true + enabled: false description: Average consumer fetch Rate unit: "{fetches}/s" gauge: @@ -56,7 +56,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.incoming_byte_rate: - enabled: true + enabled: false description: Average incoming Byte Rate in bytes/second unit: By/s gauge: @@ -65,7 +65,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.outgoing_byte_rate: - enabled: true + enabled: false description: Average outgoing Byte Rate in bytes/second. unit: 1 gauge: @@ -74,7 +74,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.request_latency: - enabled: true + enabled: false description: Average request latency in seconds unit: "s" gauge: @@ -83,7 +83,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.response_rate: - enabled: true + enabled: false description: Average response rate per second unit: "{response}/s" gauge: @@ -92,7 +92,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.response_size: - enabled: true + enabled: false description: Average response size in bytes unit: "By" gauge: @@ -101,7 +101,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.request_rate: - enabled: true + enabled: false description: Average request rate per second. unit: "{requests}/s" gauge: @@ -110,7 +110,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.request_size: - enabled: true + enabled: false description: Average request size in bytes unit: "By" gauge: @@ -119,7 +119,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.requests_in_flight: - enabled: true + enabled: false description: Requests in flight unit: "{requests}" gauge: @@ -128,7 +128,7 @@ metrics: warnings: if_enabled_not_set: This metric will be enabled by default in the next versions. messaging.kafka.broker.consumer_fetch_count: - enabled: true + enabled: false description: Count of consumer fetches unit: "{fetches}" sum: From 429809617e2b0cc564a3a2a7dfc47fe88b0b0327 Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Thu, 21 Sep 2023 13:45:02 -0700 Subject: [PATCH 35/36] remove test config --- config.yaml | 65 ----------------------------------------------------- 1 file changed, 65 deletions(-) delete mode 100644 config.yaml diff --git a/config.yaml b/config.yaml deleted file mode 100644 index c550660a5bd6..000000000000 --- a/config.yaml +++ /dev/null @@ -1,65 +0,0 @@ -receivers: - kafkametrics: - brokers: - - pkc-p11xm.us-east-1.aws.confluent.cloud:9092 - protocol_version: 2.0.0 - scrapers: - - brokers - - topics - - consumers - auth: - sasl: - username: WQCM423SHEOR4JTI - password: +ElGWWiCEhpaffi6qbpHcUIsCa7fVPdnnIXDVv4P9Cn9nq+4NXzgI1HRkJUvQUTQ - mechanism: PLAIN - tls: - insecure_skip_verify: false - collection_interval: 30s - otlp: - protocols: - grpc: - endpoint: localhost:4317 - http: - endpoint: localhost:4318 - - - prometheus: - config: - scrape_configs: - - job_name: "confluent" - scrape_interval: 60s # Do not go any lower than this or you'll hit rate limits - static_configs: - - targets: ["api.telemetry.confluent.cloud"] - scheme: https - basic_auth: - username: UUC44UI5IHPL2DBS - password: GGxJTDCfx34Lc2+TXoYSghzPRQo0caZuHa7hVMdNJo90405NgTmYk+afN3G+cWZe - metrics_path: /v2/metrics/cloud/export - params: - "resource.kafka.id": - - lkc-j8d3pm -exporters: - otlp: - endpoint: https://otlp.nr-data.net:4317 - headers: - api-key: 990e51ef617558ed94e98be5a7c6e31c6f64NRAL - logging: - verbosity: normal -processors: - batch: - memory_limiter: - limit_mib: 400 - spike_limit_mib: 100 - check_interval: 5s -service: - telemetry: - logs: - pipelines: - metrics: - receivers: [prometheus] - processors: [batch] - exporters: [otlp, logging] - metrics/kafka: - receivers: [kafkametrics] - processors: [batch] - exporters: [otlp] From 38ed735cd2234a157479572ad4050d998c9fd27e Mon Sep 17 00:00:00 2001 From: jcountsNR <94138069+jcountsNR@users.noreply.github.com> Date: Tue, 3 Oct 2023 12:50:16 -0700 Subject: [PATCH 36/36] remove writeexpected --- receiver/kafkametricsreceiver/integration_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index 77e62c422192..eef9b9d7039e 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -102,6 +102,5 @@ func TestIntegration(t *testing.T) { pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp(), ), - scraperinttest.WriteExpected(), ).Run(t) }