From 0f8dfde960ec07a5d1cd3f33191e44b4d978bfb2 Mon Sep 17 00:00:00 2001 From: Povilas Versockas Date: Fri, 21 Jul 2023 14:56:52 +0300 Subject: [PATCH] [receiver/k8scluster] Add k8s.pod.status_reason metric --- ...8s-cluster-receiver-pod-status-reason.yaml | 20 ++ .../internal/pod/documentation.md | 40 +++ .../pod/internal/metadata/generated_config.go | 22 +- .../metadata/generated_config_test.go | 14 +- .../internal/metadata/generated_metrics.go | 309 +++++++++++++++++- .../metadata/generated_metrics_test.go | 80 +++++ .../internal/metadata/testdata/config.yaml | 20 ++ .../internal/pod/metadata.yaml | 32 +- .../k8sclusterreceiver/internal/pod/pods.go | 40 +++ .../internal/pod/pods_test.go | 20 ++ .../internal/pod/testdata/expected.yaml | 32 +- .../pod/testdata/expected_evicted.yaml | 119 +++++++ .../internal/testutils/objects.go | 19 ++ receiver/k8sclusterreceiver/receiver_test.go | 10 +- .../testdata/e2e/expected.yaml | 309 ++++++++++++++++++ 15 files changed, 1066 insertions(+), 20 deletions(-) create mode 100755 .chloggen/k8s-cluster-receiver-pod-status-reason.yaml create mode 100644 receiver/k8sclusterreceiver/internal/pod/testdata/expected_evicted.yaml diff --git a/.chloggen/k8s-cluster-receiver-pod-status-reason.yaml b/.chloggen/k8s-cluster-receiver-pod-status-reason.yaml new file mode 100755 index 000000000000..630cd7ec5c7a --- /dev/null +++ b/.chloggen/k8s-cluster-receiver-pod-status-reason.yaml @@ -0,0 +1,20 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: k8sclusterreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Add pod status reason metrics" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [24034] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/receiver/k8sclusterreceiver/internal/pod/documentation.md b/receiver/k8sclusterreceiver/internal/pod/documentation.md index 09afa7c3cea4..685008cc5b44 100644 --- a/receiver/k8sclusterreceiver/internal/pod/documentation.md +++ b/receiver/k8sclusterreceiver/internal/pod/documentation.md @@ -22,6 +22,46 @@ Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 | ---- | ----------- | ---------- | | 1 | Gauge | Int | +### k8s.pod.status_reason_evicted + +Whether this pod status reason is Evicted (1), or not (0). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### k8s.pod.status_reason_node_affinity + +Whether this pod status reason is NodeAffinity (1), or not (0). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### k8s.pod.status_reason_node_lost + +Whether this pod status reason is NodeLost (1), or not (0). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### k8s.pod.status_reason_shutdown + +Whether this pod status reason is Shutdown (1), or not (0). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### k8s.pod.status_reason_unexpected_admission_error + +Whether this pod status reason is Unexpected Admission Error (1), or not (0). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + ## Resource Attributes | Name | Description | Values | Enabled | diff --git a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config.go b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config.go index 28ec4922e5be..39cdb410102e 100644 --- a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config.go +++ b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config.go @@ -25,7 +25,12 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for k8s/pod metrics. type MetricsConfig struct { - K8sPodPhase MetricConfig `mapstructure:"k8s.pod.phase"` + K8sPodPhase MetricConfig `mapstructure:"k8s.pod.phase"` + K8sPodStatusReasonEvicted MetricConfig `mapstructure:"k8s.pod.status_reason_evicted"` + K8sPodStatusReasonNodeAffinity MetricConfig `mapstructure:"k8s.pod.status_reason_node_affinity"` + K8sPodStatusReasonNodeLost MetricConfig `mapstructure:"k8s.pod.status_reason_node_lost"` + K8sPodStatusReasonShutdown MetricConfig `mapstructure:"k8s.pod.status_reason_shutdown"` + K8sPodStatusReasonUnexpectedAdmissionError MetricConfig `mapstructure:"k8s.pod.status_reason_unexpected_admission_error"` } func DefaultMetricsConfig() MetricsConfig { @@ -33,6 +38,21 @@ func DefaultMetricsConfig() MetricsConfig { K8sPodPhase: MetricConfig{ Enabled: true, }, + K8sPodStatusReasonEvicted: MetricConfig{ + Enabled: true, + }, + K8sPodStatusReasonNodeAffinity: MetricConfig{ + Enabled: true, + }, + K8sPodStatusReasonNodeLost: MetricConfig{ + Enabled: true, + }, + K8sPodStatusReasonShutdown: MetricConfig{ + Enabled: true, + }, + K8sPodStatusReasonUnexpectedAdmissionError: MetricConfig{ + Enabled: true, + }, } } diff --git a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config_test.go b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config_test.go index e8ebeec5b9f8..00763d35d34b 100644 --- a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config_test.go +++ b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_config_test.go @@ -26,7 +26,12 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - K8sPodPhase: MetricConfig{Enabled: true}, + K8sPodPhase: MetricConfig{Enabled: true}, + K8sPodStatusReasonEvicted: MetricConfig{Enabled: true}, + K8sPodStatusReasonNodeAffinity: MetricConfig{Enabled: true}, + K8sPodStatusReasonNodeLost: MetricConfig{Enabled: true}, + K8sPodStatusReasonShutdown: MetricConfig{Enabled: true}, + K8sPodStatusReasonUnexpectedAdmissionError: MetricConfig{Enabled: true}, }, ResourceAttributes: ResourceAttributesConfig{ K8sNamespaceName: ResourceAttributeConfig{Enabled: true}, @@ -41,7 +46,12 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - K8sPodPhase: MetricConfig{Enabled: false}, + K8sPodPhase: MetricConfig{Enabled: false}, + K8sPodStatusReasonEvicted: MetricConfig{Enabled: false}, + K8sPodStatusReasonNodeAffinity: MetricConfig{Enabled: false}, + K8sPodStatusReasonNodeLost: MetricConfig{Enabled: false}, + K8sPodStatusReasonShutdown: MetricConfig{Enabled: false}, + K8sPodStatusReasonUnexpectedAdmissionError: MetricConfig{Enabled: false}, }, ResourceAttributes: ResourceAttributesConfig{ K8sNamespaceName: ResourceAttributeConfig{Enabled: false}, diff --git a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics.go b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics.go index 28137ef4183f..fe8f5bfeceb0 100644 --- a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics.go +++ b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics.go @@ -61,16 +61,266 @@ func newMetricK8sPodPhase(cfg MetricConfig) metricK8sPodPhase { return m } +type metricK8sPodStatusReasonEvicted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.status_reason_evicted metric with initial data. +func (m *metricK8sPodStatusReasonEvicted) init() { + m.data.SetName("k8s.pod.status_reason_evicted") + m.data.SetDescription("Whether this pod status reason is Evicted (1), or not (0).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodStatusReasonEvicted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodStatusReasonEvicted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodStatusReasonEvicted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodStatusReasonEvicted(cfg MetricConfig) metricK8sPodStatusReasonEvicted { + m := metricK8sPodStatusReasonEvicted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sPodStatusReasonNodeAffinity struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.status_reason_node_affinity metric with initial data. +func (m *metricK8sPodStatusReasonNodeAffinity) init() { + m.data.SetName("k8s.pod.status_reason_node_affinity") + m.data.SetDescription("Whether this pod status reason is NodeAffinity (1), or not (0).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodStatusReasonNodeAffinity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodStatusReasonNodeAffinity) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodStatusReasonNodeAffinity) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodStatusReasonNodeAffinity(cfg MetricConfig) metricK8sPodStatusReasonNodeAffinity { + m := metricK8sPodStatusReasonNodeAffinity{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sPodStatusReasonNodeLost struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.status_reason_node_lost metric with initial data. +func (m *metricK8sPodStatusReasonNodeLost) init() { + m.data.SetName("k8s.pod.status_reason_node_lost") + m.data.SetDescription("Whether this pod status reason is NodeLost (1), or not (0).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodStatusReasonNodeLost) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodStatusReasonNodeLost) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodStatusReasonNodeLost) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodStatusReasonNodeLost(cfg MetricConfig) metricK8sPodStatusReasonNodeLost { + m := metricK8sPodStatusReasonNodeLost{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sPodStatusReasonShutdown struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.status_reason_shutdown metric with initial data. +func (m *metricK8sPodStatusReasonShutdown) init() { + m.data.SetName("k8s.pod.status_reason_shutdown") + m.data.SetDescription("Whether this pod status reason is Shutdown (1), or not (0).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodStatusReasonShutdown) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodStatusReasonShutdown) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodStatusReasonShutdown) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodStatusReasonShutdown(cfg MetricConfig) metricK8sPodStatusReasonShutdown { + m := metricK8sPodStatusReasonShutdown{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sPodStatusReasonUnexpectedAdmissionError struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.status_reason_unexpected_admission_error metric with initial data. +func (m *metricK8sPodStatusReasonUnexpectedAdmissionError) init() { + m.data.SetName("k8s.pod.status_reason_unexpected_admission_error") + m.data.SetDescription("Whether this pod status reason is Unexpected Admission Error (1), or not (0).") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodStatusReasonUnexpectedAdmissionError) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodStatusReasonUnexpectedAdmissionError) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodStatusReasonUnexpectedAdmissionError) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodStatusReasonUnexpectedAdmissionError(cfg MetricConfig) metricK8sPodStatusReasonUnexpectedAdmissionError { + m := metricK8sPodStatusReasonUnexpectedAdmissionError{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information - resourceAttributesConfig ResourceAttributesConfig - metricK8sPodPhase metricK8sPodPhase + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information + resourceAttributesConfig ResourceAttributesConfig + metricK8sPodPhase metricK8sPodPhase + metricK8sPodStatusReasonEvicted metricK8sPodStatusReasonEvicted + metricK8sPodStatusReasonNodeAffinity metricK8sPodStatusReasonNodeAffinity + metricK8sPodStatusReasonNodeLost metricK8sPodStatusReasonNodeLost + metricK8sPodStatusReasonShutdown metricK8sPodStatusReasonShutdown + metricK8sPodStatusReasonUnexpectedAdmissionError metricK8sPodStatusReasonUnexpectedAdmissionError } // metricBuilderOption applies changes to default metrics builder. @@ -85,11 +335,16 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - resourceAttributesConfig: mbc.ResourceAttributes, - metricK8sPodPhase: newMetricK8sPodPhase(mbc.Metrics.K8sPodPhase), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + resourceAttributesConfig: mbc.ResourceAttributes, + metricK8sPodPhase: newMetricK8sPodPhase(mbc.Metrics.K8sPodPhase), + metricK8sPodStatusReasonEvicted: newMetricK8sPodStatusReasonEvicted(mbc.Metrics.K8sPodStatusReasonEvicted), + metricK8sPodStatusReasonNodeAffinity: newMetricK8sPodStatusReasonNodeAffinity(mbc.Metrics.K8sPodStatusReasonNodeAffinity), + metricK8sPodStatusReasonNodeLost: newMetricK8sPodStatusReasonNodeLost(mbc.Metrics.K8sPodStatusReasonNodeLost), + metricK8sPodStatusReasonShutdown: newMetricK8sPodStatusReasonShutdown(mbc.Metrics.K8sPodStatusReasonShutdown), + metricK8sPodStatusReasonUnexpectedAdmissionError: newMetricK8sPodStatusReasonUnexpectedAdmissionError(mbc.Metrics.K8sPodStatusReasonUnexpectedAdmissionError), } for _, op := range options { op(mb) @@ -189,6 +444,11 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricK8sPodPhase.emit(ils.Metrics()) + mb.metricK8sPodStatusReasonEvicted.emit(ils.Metrics()) + mb.metricK8sPodStatusReasonNodeAffinity.emit(ils.Metrics()) + mb.metricK8sPodStatusReasonNodeLost.emit(ils.Metrics()) + mb.metricK8sPodStatusReasonShutdown.emit(ils.Metrics()) + mb.metricK8sPodStatusReasonUnexpectedAdmissionError.emit(ils.Metrics()) for _, op := range rmo { op(mb.resourceAttributesConfig, rm) @@ -214,6 +474,31 @@ func (mb *MetricsBuilder) RecordK8sPodPhaseDataPoint(ts pcommon.Timestamp, val i mb.metricK8sPodPhase.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sPodStatusReasonEvictedDataPoint adds a data point to k8s.pod.status_reason_evicted metric. +func (mb *MetricsBuilder) RecordK8sPodStatusReasonEvictedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sPodStatusReasonEvicted.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sPodStatusReasonNodeAffinityDataPoint adds a data point to k8s.pod.status_reason_node_affinity metric. +func (mb *MetricsBuilder) RecordK8sPodStatusReasonNodeAffinityDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sPodStatusReasonNodeAffinity.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sPodStatusReasonNodeLostDataPoint adds a data point to k8s.pod.status_reason_node_lost metric. +func (mb *MetricsBuilder) RecordK8sPodStatusReasonNodeLostDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sPodStatusReasonNodeLost.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sPodStatusReasonShutdownDataPoint adds a data point to k8s.pod.status_reason_shutdown metric. +func (mb *MetricsBuilder) RecordK8sPodStatusReasonShutdownDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sPodStatusReasonShutdown.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint adds a data point to k8s.pod.status_reason_unexpected_admission_error metric. +func (mb *MetricsBuilder) RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sPodStatusReasonUnexpectedAdmissionError.recordDataPoint(mb.startTime, ts, val) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { diff --git a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics_test.go b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics_test.go index 82e0d0cf2b7a..899e4b2eadc7 100644 --- a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics_test.go +++ b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/generated_metrics_test.go @@ -58,6 +58,26 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sPodPhaseDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sPodStatusReasonEvictedDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sPodStatusReasonNodeAffinityDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sPodStatusReasonNodeLostDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sPodStatusReasonShutdownDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts, 1) + metrics := mb.Emit(WithK8sNamespaceName("k8s.namespace.name-val"), WithK8sNodeName("k8s.node.name-val"), WithK8sPodName("k8s.pod.name-val"), WithK8sPodUID("k8s.pod.uid-val"), WithOpencensusResourcetype("opencensus.resourcetype-val")) if test.configSet == testSetNone { @@ -130,6 +150,66 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.pod.status_reason_evicted": + assert.False(t, validatedMetrics["k8s.pod.status_reason_evicted"], "Found a duplicate in the metrics slice: k8s.pod.status_reason_evicted") + validatedMetrics["k8s.pod.status_reason_evicted"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Whether this pod status reason is Evicted (1), or not (0).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.pod.status_reason_node_affinity": + assert.False(t, validatedMetrics["k8s.pod.status_reason_node_affinity"], "Found a duplicate in the metrics slice: k8s.pod.status_reason_node_affinity") + validatedMetrics["k8s.pod.status_reason_node_affinity"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Whether this pod status reason is NodeAffinity (1), or not (0).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.pod.status_reason_node_lost": + assert.False(t, validatedMetrics["k8s.pod.status_reason_node_lost"], "Found a duplicate in the metrics slice: k8s.pod.status_reason_node_lost") + validatedMetrics["k8s.pod.status_reason_node_lost"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Whether this pod status reason is NodeLost (1), or not (0).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.pod.status_reason_shutdown": + assert.False(t, validatedMetrics["k8s.pod.status_reason_shutdown"], "Found a duplicate in the metrics slice: k8s.pod.status_reason_shutdown") + validatedMetrics["k8s.pod.status_reason_shutdown"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Whether this pod status reason is Shutdown (1), or not (0).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.pod.status_reason_unexpected_admission_error": + assert.False(t, validatedMetrics["k8s.pod.status_reason_unexpected_admission_error"], "Found a duplicate in the metrics slice: k8s.pod.status_reason_unexpected_admission_error") + validatedMetrics["k8s.pod.status_reason_unexpected_admission_error"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Whether this pod status reason is Unexpected Admission Error (1), or not (0).", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) } } }) diff --git a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/testdata/config.yaml b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/testdata/config.yaml index fe14a2b630a6..1195b548057a 100644 --- a/receiver/k8sclusterreceiver/internal/pod/internal/metadata/testdata/config.yaml +++ b/receiver/k8sclusterreceiver/internal/pod/internal/metadata/testdata/config.yaml @@ -3,6 +3,16 @@ all_set: metrics: k8s.pod.phase: enabled: true + k8s.pod.status_reason_evicted: + enabled: true + k8s.pod.status_reason_node_affinity: + enabled: true + k8s.pod.status_reason_node_lost: + enabled: true + k8s.pod.status_reason_shutdown: + enabled: true + k8s.pod.status_reason_unexpected_admission_error: + enabled: true resource_attributes: k8s.namespace.name: enabled: true @@ -18,6 +28,16 @@ none_set: metrics: k8s.pod.phase: enabled: false + k8s.pod.status_reason_evicted: + enabled: false + k8s.pod.status_reason_node_affinity: + enabled: false + k8s.pod.status_reason_node_lost: + enabled: false + k8s.pod.status_reason_shutdown: + enabled: false + k8s.pod.status_reason_unexpected_admission_error: + enabled: false resource_attributes: k8s.namespace.name: enabled: false diff --git a/receiver/k8sclusterreceiver/internal/pod/metadata.yaml b/receiver/k8sclusterreceiver/internal/pod/metadata.yaml index a4c3c0802c13..648df15cac6f 100644 --- a/receiver/k8sclusterreceiver/internal/pod/metadata.yaml +++ b/receiver/k8sclusterreceiver/internal/pod/metadata.yaml @@ -36,4 +36,34 @@ metrics: description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) unit: 1 gauge: - value_type: int \ No newline at end of file + value_type: int + k8s.pod.status_reason_evicted: + enabled: true + description: Whether this pod status reason is Evicted (1), or not (0). + unit: 1 + gauge: + value_type: int + k8s.pod.status_reason_node_affinity: + enabled: true + description: Whether this pod status reason is NodeAffinity (1), or not (0). + unit: 1 + gauge: + value_type: int + k8s.pod.status_reason_node_lost: + enabled: true + description: Whether this pod status reason is NodeLost (1), or not (0). + unit: 1 + gauge: + value_type: int + k8s.pod.status_reason_shutdown: + enabled: true + description: Whether this pod status reason is Shutdown (1), or not (0). + unit: 1 + gauge: + value_type: int + k8s.pod.status_reason_unexpected_admission_error: + enabled: true + description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + unit: 1 + gauge: + value_type: int diff --git a/receiver/k8sclusterreceiver/internal/pod/pods.go b/receiver/k8sclusterreceiver/internal/pod/pods.go index 7cc186e02f29..5821fd7b8ed0 100644 --- a/receiver/k8sclusterreceiver/internal/pod/pods.go +++ b/receiver/k8sclusterreceiver/internal/pod/pods.go @@ -75,6 +75,46 @@ func GetMetrics(set receiver.CreateSettings, pod *corev1.Pod) pmetric.Metrics { mbphase := imetadataphase.NewMetricsBuilder(imetadataphase.DefaultMetricsBuilderConfig(), set) ts := pcommon.NewTimestampFromTime(time.Now()) mbphase.RecordK8sPodPhaseDataPoint(ts, int64(phaseToInt(pod.Status.Phase))) + + switch pod.Status.Reason { + case "Evicted": + mbphase.RecordK8sPodStatusReasonEvictedDataPoint(ts, 1) + mbphase.RecordK8sPodStatusReasonNodeAffinityDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeLostDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonShutdownDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts, 0) + case "NodeAffinity": + mbphase.RecordK8sPodStatusReasonEvictedDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeAffinityDataPoint(ts, 1) + mbphase.RecordK8sPodStatusReasonNodeLostDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonShutdownDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts, 0) + case "NodeLost": + mbphase.RecordK8sPodStatusReasonEvictedDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeAffinityDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeLostDataPoint(ts, 1) + mbphase.RecordK8sPodStatusReasonShutdownDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts, 0) + case "Shutdown": + mbphase.RecordK8sPodStatusReasonEvictedDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeAffinityDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeLostDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonShutdownDataPoint(ts, 1) + mbphase.RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts, 0) + case "UnexpectedAdmissionError": + mbphase.RecordK8sPodStatusReasonEvictedDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeAffinityDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeLostDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonShutdownDataPoint(ts, 1) + mbphase.RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts, 1) + default: + mbphase.RecordK8sPodStatusReasonEvictedDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeAffinityDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonNodeLostDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonShutdownDataPoint(ts, 0) + mbphase.RecordK8sPodStatusReasonUnexpectedAdmissionErrorDataPoint(ts, 0) + } + metrics := mbphase.Emit(imetadataphase.WithK8sNamespaceName(pod.Namespace), imetadataphase.WithK8sNodeName(pod.Spec.NodeName), imetadataphase.WithK8sPodName(pod.Name), imetadataphase.WithK8sPodUID(string(pod.UID)), imetadataphase.WithOpencensusResourcetype("k8s")) for _, c := range pod.Spec.Containers { diff --git a/receiver/k8sclusterreceiver/internal/pod/pods_test.go b/receiver/k8sclusterreceiver/internal/pod/pods_test.go index 76afb7a4a727..ef2ee52aee28 100644 --- a/receiver/k8sclusterreceiver/internal/pod/pods_test.go +++ b/receiver/k8sclusterreceiver/internal/pod/pods_test.go @@ -54,6 +54,26 @@ func TestPodAndContainerMetricsReportCPUMetrics(t *testing.T) { ) } +func TestPodStatusReasonAndContainerMetricsReportCPUMetrics(t *testing.T) { + pod := testutils.NewPodWithContainer( + "1", + testutils.NewPodSpecWithContainer("container-name"), + testutils.NewEvictedTerminatedPodStatusWithContainer("container-name", containerIDWithPreifx("container-id")), + ) + + m := GetMetrics(receivertest.NewNopCreateSettings(), pod) + expected, err := golden.ReadMetrics(filepath.Join("testdata", "expected_evicted.yaml")) + require.NoError(t, err) + require.NoError(t, pmetrictest.CompareMetrics(expected, m, + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreScopeMetricsOrder(), + ), + ) +} + var containerIDWithPreifx = func(containerID string) string { return "docker://" + containerID } diff --git a/receiver/k8sclusterreceiver/internal/pod/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/pod/testdata/expected.yaml index 867335f49063..38ccda685053 100644 --- a/receiver/k8sclusterreceiver/internal/pod/testdata/expected.yaml +++ b/receiver/k8sclusterreceiver/internal/pod/testdata/expected.yaml @@ -25,6 +25,36 @@ resourceMetrics: - asInt: "3" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" scope: name: otelcol/k8sclusterreceiver version: latest @@ -86,4 +116,4 @@ resourceMetrics: unit: "{cpu}" scope: name: otelcol/k8sclusterreceiver - version: latest \ No newline at end of file + version: latest diff --git a/receiver/k8sclusterreceiver/internal/pod/testdata/expected_evicted.yaml b/receiver/k8sclusterreceiver/internal/pod/testdata/expected_evicted.yaml new file mode 100644 index 000000000000..8e4118784791 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/pod/testdata/expected_evicted.yaml @@ -0,0 +1,119 @@ +resourceMetrics: + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: test-namespace + - key: k8s.node.name + value: + stringValue: test-node + - key: k8s.pod.name + value: + stringValue: test-pod-1 + - key: k8s.pod.uid + value: + stringValue: test-pod-1-uid + - key: opencensus.resourcetype + value: + stringValue: k8s + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) + gauge: + dataPoints: + - asInt: "4" + name: k8s.pod.phase + unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "1" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: + name: otelcol/k8sclusterreceiver + version: latest + - resource: + attributes: + - key: container.id + value: + stringValue: container-id + - key: container.image.name + value: + stringValue: container-image-name + - key: container.image.tag + value: + stringValue: latest + - key: k8s.container.name + value: + stringValue: container-name + - key: k8s.namespace.name + value: + stringValue: test-namespace + - key: k8s.node.name + value: + stringValue: test-node + - key: k8s.pod.name + value: + stringValue: test-pod-1 + - key: k8s.pod.uid + value: + stringValue: test-pod-1-uid + - key: opencensus.resourcetype + value: + stringValue: container + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. + gauge: + dataPoints: + - asInt: "3" + name: k8s.container.restarts + unit: "1" + - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) + gauge: + dataPoints: + - asInt: "1" + name: k8s.container.ready + unit: "1" + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 10 + name: k8s.container.cpu_request + unit: "{cpu}" + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 20 + name: k8s.container.cpu_limit + unit: "{cpu}" + scope: + name: otelcol/k8sclusterreceiver + version: latest diff --git a/receiver/k8sclusterreceiver/internal/testutils/objects.go b/receiver/k8sclusterreceiver/internal/testutils/objects.go index 17c33651a48f..d22dfae06ddd 100644 --- a/receiver/k8sclusterreceiver/internal/testutils/objects.go +++ b/receiver/k8sclusterreceiver/internal/testutils/objects.go @@ -233,6 +233,25 @@ func NewPodStatusWithContainer(containerName, containerID string) *corev1.PodSta } } +func NewEvictedTerminatedPodStatusWithContainer(containerName, containerID string) *corev1.PodStatus { + return &corev1.PodStatus{ + Phase: corev1.PodFailed, + Reason: "Evicted", + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: containerName, + Ready: true, + RestartCount: 3, + Image: "container-image-name", + ContainerID: containerID, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, + }, + }, + }, + } +} + func WithOwnerReferences(or []v1.OwnerReference, obj interface{}) interface{} { switch o := obj.(type) { case *corev1.Pod: diff --git a/receiver/k8sclusterreceiver/receiver_test.go b/receiver/k8sclusterreceiver/receiver_test.go index 4419f49c6e3e..dcc496278442 100644 --- a/receiver/k8sclusterreceiver/receiver_test.go +++ b/receiver/k8sclusterreceiver/receiver_test.go @@ -43,6 +43,7 @@ func TestReceiver(t *testing.T) { // Setup k8s resources. numPods := 2 + numPodMetrics := 6 numNodes := 1 numQuotas := 2 numClusterQuotaMetrics := numQuotas * 4 @@ -55,10 +56,11 @@ func TestReceiver(t *testing.T) { // Expects metric data from nodes and pods where each metric data // struct corresponds to one resource. - expectedNumMetrics := numPods + numNodes + numClusterQuotaMetrics + expectedNumMetrics := numPods*numPodMetrics + numNodes + numClusterQuotaMetrics var initialDataPointCount int require.Eventually(t, func() bool { initialDataPointCount = sink.DataPointCount() + return initialDataPointCount == expectedNumMetrics }, 10*time.Second, 100*time.Millisecond, "metrics not collected") @@ -67,10 +69,11 @@ func TestReceiver(t *testing.T) { deletePods(t, client, numPodsToDelete) // Expects metric data from a node, since other resources were deleted. - expectedNumMetrics = (numPods - numPodsToDelete) + numNodes + numClusterQuotaMetrics + expectedNumMetrics = (numPods-numPodsToDelete)*numPodMetrics + numNodes + numClusterQuotaMetrics var metricsCountDelta int require.Eventually(t, func() bool { metricsCountDelta = sink.DataPointCount() - initialDataPointCount + return metricsCountDelta == expectedNumMetrics }, 10*time.Second, 100*time.Millisecond, "updated metrics not collected") @@ -113,8 +116,9 @@ func TestReceiverWithManyResources(t *testing.T) { r := setupReceiver(client, osQuotaClient, sink, 10*time.Second, tt) numPods := 1000 + numPodMetrics := 6 numQuotas := 2 - numExpectedMetrics := numPods + numQuotas*4 + numExpectedMetrics := numPods*numPodMetrics + numQuotas*4 createPods(t, client, numPods) createClusterQuota(t, osQuotaClient, 2) diff --git a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml index 63ec1c246b39..6cf2f2f56c5f 100644 --- a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml +++ b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml @@ -470,6 +470,36 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" scope: name: otelcol/k8sclusterreceiver version: latest @@ -500,6 +530,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -530,6 +591,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -560,6 +652,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -590,6 +713,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -620,6 +774,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -650,6 +835,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -680,6 +896,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -710,6 +957,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest @@ -740,6 +1018,37 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.pod.phase unit: "1" + - description: Whether this pod status reason is Evicted (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_evicted + unit: "1" + - description: Whether this pod status reason is NodeAffinity (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_affinity + unit: "1" + - description: Whether this pod status reason is NodeLost (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_node_lost + unit: "1" + - description: Whether this pod status reason is Shutdown (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_shutdown + unit: "1" + - description: Whether this pod status reason is Unexpected Admission Error (1), or not (0). + gauge: + dataPoints: + - asInt: "0" + name: k8s.pod.status_reason_unexpected_admission_error + unit: "1" + scope: name: otelcol/k8sclusterreceiver version: latest