From ec5e49753c1d916e1178817976535d46362a8cde Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Tue, 12 Nov 2024 16:53:14 +0530 Subject: [PATCH 1/9] Added support for nginx vts. Recording timing metrics --- receiver/nginxreceiver/config.go | 2 + receiver/nginxreceiver/documentation.md | 23 +++ .../nginxreceiver/generated_package_test.go | 3 +- receiver/nginxreceiver/go.mod | 6 + receiver/nginxreceiver/go.sum | 9 + .../internal/metadata/generated_config.go | 16 +- .../metadata/generated_config_test.go | 20 +- .../internal/metadata/generated_metrics.go | 151 +++++++++++++-- .../metadata/generated_metrics_test.go | 38 ++++ .../internal/metadata/testdata/config.yaml | 8 + receiver/nginxreceiver/metadata.yaml | 30 ++- receiver/nginxreceiver/nginx.go | 133 +++++++++++++ receiver/nginxreceiver/scraper.go | 40 +++- receiver/nginxreceiver/vts_stats.go | 174 ++++++++++++++++++ 14 files changed, 618 insertions(+), 35 deletions(-) create mode 100644 receiver/nginxreceiver/nginx.go create mode 100644 receiver/nginxreceiver/vts_stats.go diff --git a/receiver/nginxreceiver/config.go b/receiver/nginxreceiver/config.go index 4a81800a208f..abf7f7a35647 100644 --- a/receiver/nginxreceiver/config.go +++ b/receiver/nginxreceiver/config.go @@ -14,4 +14,6 @@ type Config struct { scraperhelper.ControllerConfig `mapstructure:",squash"` confighttp.ClientConfig `mapstructure:",squash"` MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"` + + VTSEndpoint string `mapstructure:"vts_endpoint"` } diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 3dd32a196cde..32ce1a676262 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -42,6 +42,14 @@ The total number of handled connections. Generally, the parameter value is the s | ---- | ----------- | ---------- | ----------------------- | --------- | | connections | Sum | Int | Cumulative | true | +### nginx.load_timestamp + +Time of the last reload of configuration (time since Epoch). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + ### nginx.requests Total number of requests made to the server since it started @@ -49,3 +57,18 @@ Total number of requests made to the server since it started | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | | requests | Sum | Int | Cumulative | true | + +### nginx.upstream.peers.response_time + +The average time to receive the last byte of data from this server. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address of the upstream server | Any Str | diff --git a/receiver/nginxreceiver/generated_package_test.go b/receiver/nginxreceiver/generated_package_test.go index 4f3af726f855..a48783927193 100644 --- a/receiver/nginxreceiver/generated_package_test.go +++ b/receiver/nginxreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package nginxreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/nginxreceiver/go.mod b/receiver/nginxreceiver/go.mod index 43bb6702f8c6..bd70ddd1ed18 100644 --- a/receiver/nginxreceiver/go.mod +++ b/receiver/nginxreceiver/go.mod @@ -25,6 +25,12 @@ require ( go.uber.org/zap v1.27.0 ) +require ( + github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect +) + require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect diff --git a/receiver/nginxreceiver/go.sum b/receiver/nginxreceiver/go.sum index 718ece24b715..6ce7b9ede932 100644 --- a/receiver/nginxreceiver/go.sum +++ b/receiver/nginxreceiver/go.sum @@ -56,6 +56,10 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= @@ -74,6 +78,10 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -239,6 +247,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= diff --git a/receiver/nginxreceiver/internal/metadata/generated_config.go b/receiver/nginxreceiver/internal/metadata/generated_config.go index e7a5fdfedeb2..ca163e97a465 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config.go @@ -27,10 +27,12 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for nginx metrics. type MetricsConfig struct { - NginxConnectionsAccepted MetricConfig `mapstructure:"nginx.connections_accepted"` - NginxConnectionsCurrent MetricConfig `mapstructure:"nginx.connections_current"` - NginxConnectionsHandled MetricConfig `mapstructure:"nginx.connections_handled"` - NginxRequests MetricConfig `mapstructure:"nginx.requests"` + NginxConnectionsAccepted MetricConfig `mapstructure:"nginx.connections_accepted"` + NginxConnectionsCurrent MetricConfig `mapstructure:"nginx.connections_current"` + NginxConnectionsHandled MetricConfig `mapstructure:"nginx.connections_handled"` + NginxLoadTimestamp MetricConfig `mapstructure:"nginx.load_timestamp"` + NginxRequests MetricConfig `mapstructure:"nginx.requests"` + NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` } func DefaultMetricsConfig() MetricsConfig { @@ -44,9 +46,15 @@ func DefaultMetricsConfig() MetricsConfig { NginxConnectionsHandled: MetricConfig{ Enabled: true, }, + NginxLoadTimestamp: MetricConfig{ + Enabled: true, + }, NginxRequests: MetricConfig{ Enabled: true, }, + NginxUpstreamPeersResponseTime: MetricConfig{ + Enabled: true, + }, } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index 6613f525b5bf..4d112b51505a 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -25,10 +25,12 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - NginxConnectionsAccepted: MetricConfig{Enabled: true}, - NginxConnectionsCurrent: MetricConfig{Enabled: true}, - NginxConnectionsHandled: MetricConfig{Enabled: true}, - NginxRequests: MetricConfig{Enabled: true}, + NginxConnectionsAccepted: MetricConfig{Enabled: true}, + NginxConnectionsCurrent: MetricConfig{Enabled: true}, + NginxConnectionsHandled: MetricConfig{Enabled: true}, + NginxLoadTimestamp: MetricConfig{Enabled: true}, + NginxRequests: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, }, }, }, @@ -36,10 +38,12 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - NginxConnectionsAccepted: MetricConfig{Enabled: false}, - NginxConnectionsCurrent: MetricConfig{Enabled: false}, - NginxConnectionsHandled: MetricConfig{Enabled: false}, - NginxRequests: MetricConfig{Enabled: false}, + NginxConnectionsAccepted: MetricConfig{Enabled: false}, + NginxConnectionsCurrent: MetricConfig{Enabled: false}, + NginxConnectionsHandled: MetricConfig{Enabled: false}, + NginxLoadTimestamp: MetricConfig{Enabled: false}, + NginxRequests: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index af41bd69db7f..b5ca616f8e1d 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -200,6 +200,55 @@ func newMetricNginxConnectionsHandled(cfg MetricConfig) metricNginxConnectionsHa return m } +type metricNginxLoadTimestamp struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.load_timestamp metric with initial data. +func (m *metricNginxLoadTimestamp) init() { + m.data.SetName("nginx.load_timestamp") + m.data.SetDescription("Time of the last reload of configuration (time since Epoch).") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() +} + +func (m *metricNginxLoadTimestamp) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxLoadTimestamp) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxLoadTimestamp) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxLoadTimestamp(cfg MetricConfig) metricNginxLoadTimestamp { + m := metricNginxLoadTimestamp{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxRequests struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -251,18 +300,72 @@ func newMetricNginxRequests(cfg MetricConfig) metricNginxRequests { return m } +type metricNginxUpstreamPeersResponseTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.response_time metric with initial data. +func (m *metricNginxUpstreamPeersResponseTime) init() { + m.data.SetName("nginx.upstream.peers.response_time") + m.data.SetDescription("The average time to receive the last byte of data from this server.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponseTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponseTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponseTime(cfg MetricConfig) metricNginxUpstreamPeersResponseTime { + m := metricNginxUpstreamPeersResponseTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - metricNginxConnectionsAccepted metricNginxConnectionsAccepted - metricNginxConnectionsCurrent metricNginxConnectionsCurrent - metricNginxConnectionsHandled metricNginxConnectionsHandled - metricNginxRequests metricNginxRequests + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + metricNginxConnectionsAccepted metricNginxConnectionsAccepted + metricNginxConnectionsCurrent metricNginxConnectionsCurrent + metricNginxConnectionsHandled metricNginxConnectionsHandled + metricNginxLoadTimestamp metricNginxLoadTimestamp + metricNginxRequests metricNginxRequests + metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime } // MetricBuilderOption applies changes to default metrics builder. @@ -285,14 +388,16 @@ func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(mbc.Metrics.NginxConnectionsAccepted), - metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(mbc.Metrics.NginxConnectionsCurrent), - metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(mbc.Metrics.NginxConnectionsHandled), - metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(mbc.Metrics.NginxConnectionsAccepted), + metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(mbc.Metrics.NginxConnectionsCurrent), + metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(mbc.Metrics.NginxConnectionsHandled), + metricNginxLoadTimestamp: newMetricNginxLoadTimestamp(mbc.Metrics.NginxLoadTimestamp), + metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), + metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), } for _, op := range options { @@ -361,7 +466,9 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxConnectionsAccepted.emit(ils.Metrics()) mb.metricNginxConnectionsCurrent.emit(ils.Metrics()) mb.metricNginxConnectionsHandled.emit(ils.Metrics()) + mb.metricNginxLoadTimestamp.emit(ils.Metrics()) mb.metricNginxRequests.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponseTime.emit(ils.Metrics()) for _, op := range options { op.apply(rm) @@ -398,11 +505,21 @@ func (mb *MetricsBuilder) RecordNginxConnectionsHandledDataPoint(ts pcommon.Time mb.metricNginxConnectionsHandled.recordDataPoint(mb.startTime, ts, val) } +// RecordNginxLoadTimestampDataPoint adds a data point to nginx.load_timestamp metric. +func (mb *MetricsBuilder) RecordNginxLoadTimestampDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxLoadTimestamp.recordDataPoint(mb.startTime, ts, val) +} + // RecordNginxRequestsDataPoint adds a data point to nginx.requests metric. func (mb *MetricsBuilder) RecordNginxRequestsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricNginxRequests.recordDataPoint(mb.startTime, ts, val) } +// RecordNginxUpstreamPeersResponseTimeDataPoint adds a data point to nginx.upstream.peers.response_time metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponseTimeDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponseTime.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index 1c4ffdbf469b..47b4ad5cc70e 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -71,10 +71,18 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxConnectionsHandledDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxLoadTimestampDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxRequestsDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponseTimeDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) @@ -142,6 +150,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.load_timestamp": + assert.False(t, validatedMetrics["nginx.load_timestamp"], "Found a duplicate in the metrics slice: nginx.load_timestamp") + validatedMetrics["nginx.load_timestamp"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time of the last reload of configuration (time since Epoch).", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "nginx.requests": assert.False(t, validatedMetrics["nginx.requests"], "Found a duplicate in the metrics slice: nginx.requests") validatedMetrics["nginx.requests"] = true @@ -156,6 +176,24 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.upstream.peers.response_time": + assert.False(t, validatedMetrics["nginx.upstream.peers.response_time"], "Found a duplicate in the metrics slice: nginx.upstream.peers.response_time") + validatedMetrics["nginx.upstream.peers.response_time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The average time to receive the last byte of data from this server.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) } } }) diff --git a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml index 05f6368506a2..7b16a0fec7da 100644 --- a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml @@ -7,8 +7,12 @@ all_set: enabled: true nginx.connections_handled: enabled: true + nginx.load_timestamp: + enabled: true nginx.requests: enabled: true + nginx.upstream.peers.response_time: + enabled: true none_set: metrics: nginx.connections_accepted: @@ -17,5 +21,9 @@ none_set: enabled: false nginx.connections_handled: enabled: false + nginx.load_timestamp: + enabled: false nginx.requests: enabled: false + nginx.upstream.peers.response_time: + enabled: false diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 90ef854d1fef..97e9eea30bf6 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -17,7 +17,13 @@ attributes: - reading - writing - waiting - + upstream_block_name: + description: The name of the upstream block + type: string + upstream_peer_address: + description: The address of the upstream server + type: string + metrics: nginx.requests: enabled: true @@ -55,3 +61,25 @@ metrics: monotonic: false aggregation_temporality: cumulative attributes: [state] + + ### Timing metrics + nginx.load_timestamp: + enabled: true + description: Time of the last reload of configuration (time since Epoch). + gauge: + value_type: int + unit: ms + + nginx.upstream.peers.response_time: + attributes: + - upstream_block_name + - upstream_peer_address + enabled: true + description: The average time to receive the last byte of data from this server. + gauge: + value_type: int + unit: ms + +# `nginx.load_timestamp` - Time when NGINX was loaded +# - `nginx.timestamp` - Current timestamp +# - `nginx.upstream.peers.response_time` - Response time in milliseconds for upstream servers diff --git a/receiver/nginxreceiver/nginx.go b/receiver/nginxreceiver/nginx.go new file mode 100644 index 000000000000..b929c072c99e --- /dev/null +++ b/receiver/nginxreceiver/nginx.go @@ -0,0 +1,133 @@ +package nginxreceiver + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" +) + +const templateMetrics string = `Active connections: %d +server accepts handled requests +%d %d %d +Reading: %d Writing: %d Waiting: %d +` + +// NginxClient allows you to fetch NGINX metrics from the stub_status page. +type NginxClient struct { + apiEndpoint string + vtsEndpoint string + httpClient *http.Client +} + +// StubStats represents NGINX stub_status metrics. +type StubStats struct { + Connections StubConnections + Requests int64 +} + +// StubConnections represents connections related metrics. +type StubConnections struct { + Active int64 + Accepted int64 + Handled int64 + Reading int64 + Writing int64 + Waiting int64 +} + +// NewNginxClient creates an NginxClient. +func NewNginxClient(httpClient *http.Client, apiEndpoint string, vtsEndpoint string) (*NginxClient, error) { + client := &NginxClient{ + apiEndpoint: apiEndpoint, + vtsEndpoint: vtsEndpoint, + httpClient: httpClient, + } + + _, err := client.GetStubStats() + return client, err +} + +// GetStubStats fetches the stub_status metrics. +func (client *NginxClient) GetStubStats() (*StubStats, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, client.apiEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("failed to create a get request: %w", err) + } + resp, err := client.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to get %v: %w", client.apiEndpoint, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("expected %v response, got %v", http.StatusOK, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read the response body: %w", err) + } + + r := bytes.NewReader(body) + stats, err := parseStubStats(r) + if err != nil { + return nil, fmt.Errorf("failed to parse response body %q: %w", string(body), err) + } + + return stats, nil +} + +func (client *NginxClient) GetVtsStats() (*NginxVtsStatus, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, client.vtsEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("failed to create a get request: %w", err) + } + + resp, err := client.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to get %v: %w", client.apiEndpoint, err) + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + + return nil, fmt.Errorf("expected %v response, got %v", http.StatusOK, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read the response body: %w", err) + } + + r := bytes.NewReader(body) + stats, err := ParseVtsStats(r) + if err != nil { + return nil, fmt.Errorf("failed to parse response body %q: %w", string(body), err) + } + + return stats, nil +} + +func parseStubStats(r io.Reader) (*StubStats, error) { + var s StubStats + if _, err := fmt.Fscanf(r, templateMetrics, + &s.Connections.Active, + &s.Connections.Accepted, + &s.Connections.Handled, + &s.Requests, + &s.Connections.Reading, + &s.Connections.Writing, + &s.Connections.Waiting); err != nil { + return nil, fmt.Errorf("failed to scan template metrics: %w", err) + } + return &s, nil +} diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 7f202ab40c2b..85bef18fda9f 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -8,19 +8,19 @@ import ( "net/http" "time" - "github.com/nginxinc/nginx-prometheus-exporter/client" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" + "github.com/k0kubun/pp" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver/internal/metadata" ) type nginxScraper struct { httpClient *http.Client - client *client.NginxClient + client *NginxClient settings component.TelemetrySettings cfg *Config @@ -53,7 +53,8 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { // Init client in scrape method in case there are transient errors in the constructor. if r.client == nil { var err error - r.client, err = client.NewNginxClient(r.httpClient, r.cfg.ClientConfig.Endpoint) + r.client, err = NewNginxClient(r.httpClient, r.cfg.ClientConfig.Endpoint, r.cfg.VTSEndpoint) + if err != nil { r.client = nil return pmetric.Metrics{}, err @@ -66,7 +67,19 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { return pmetric.Metrics{}, err } + vtsStats, err := r.client.GetVtsStats() + + if err != nil { + r.settings.Logger.Error("Failed to fetch nginx stats", zap.Error(err)) + return pmetric.Metrics{}, err + } + + // pp.Println(vtsStats) + now := pcommon.NewTimestampFromTime(time.Now()) + + r.recordVtsStats(now, vtsStats) + r.mb.RecordNginxRequestsDataPoint(now, stats.Requests) r.mb.RecordNginxConnectionsAcceptedDataPoint(now, stats.Connections.Accepted) r.mb.RecordNginxConnectionsHandledDataPoint(now, stats.Connections.Handled) @@ -74,5 +87,26 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Reading, metadata.AttributeStateReading) r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Writing, metadata.AttributeStateWriting) r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Waiting, metadata.AttributeStateWaiting) + return r.mb.Emit(), nil } + +func (r *nginxScraper) recordVtsStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + r.recordTimingStats(now, vtsStats) +} + +func (r *nginxScraper) recordTimingStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + + // r.mb.RecordNginxLoadTimestampDataPoint(now, vtsStats.LoadMsec) + + for upstreamZones, v := range vtsStats.UpstreamZones { + for _, val := range v { + pp.Println(val.Server) + pp.Println(val.ResponseMsec) + + r.mb.RecordNginxUpstreamPeersResponseTimeDataPoint( + now, val.ResponseMsec, upstreamZones, val.Server, + ) + } + } +} diff --git a/receiver/nginxreceiver/vts_stats.go b/receiver/nginxreceiver/vts_stats.go new file mode 100644 index 000000000000..0b7dd88d16bf --- /dev/null +++ b/receiver/nginxreceiver/vts_stats.go @@ -0,0 +1,174 @@ +package nginxreceiver + +import ( + "encoding/json" + "fmt" + "io" +) + +type NginxVtsStatus struct { + HostName string `json:"hostName"` + ModuleVersion string `json:"moduleVersion"` + NginxVersion string `json:"nginxVersion"` + LoadMsec int64 `json:"loadMsec"` + NowMsec int64 `json:"nowMsec"` + Connections Connections `json:"connections"` + SharedZones SharedZones `json:"sharedZones"` + ServerZones ServerZones `json:"serverZones"` + FilterZones FilterZones `json:"filterZones"` + UpstreamZones UpstreamZones `json:"upstreamZones"` + CacheZones CacheZones `json:"cacheZones"` +} + +type Connections struct { + Active int64 `json:"active"` + Reading int64 `json:"reading"` + Writing int64 `json:"writing"` + Waiting int64 `json:"waiting"` + Accepted int64 `json:"accepted"` + Handled int64 `json:"handled"` + Requests int64 `json:"requests"` +} + +type SharedZones struct { + Name string `json:"name"` + MaxSize int64 `json:"maxSize"` + UsedSize int64 `json:"usedSize"` + UsedNode int64 `json:"usedNode"` +} + +type Responses struct { + Status1xx int64 `json:"1xx"` + Status2xx int64 `json:"2xx"` + Status3xx int64 `json:"3xx"` + Status4xx int64 `json:"4xx"` + Status5xx int64 `json:"5xx"` + Miss int64 `json:"miss"` + Bypass int64 `json:"bypass"` + Expired int64 `json:"expired"` + Stale int64 `json:"stale"` + Updating int64 `json:"updating"` + Revalidated int64 `json:"revalidated"` + Hit int64 `json:"hit"` + Scarce int64 `json:"scarce"` +} + +type RequestMetrics struct { + Times []int64 `json:"times"` + Msecs []int64 `json:"msecs"` +} + +type RequestBuckets struct { + Msecs []int64 `json:"msecs"` + Counters []int64 `json:"counters"` +} + +type ZoneStats struct { + RequestCounter int64 `json:"requestCounter"` + InBytes int64 `json:"inBytes"` + OutBytes int64 `json:"outBytes"` + Responses Responses `json:"responses"` + RequestMsecCounter int64 `json:"requestMsecCounter"` + RequestMsec int64 `json:"requestMsec"` + RequestMsecs RequestMetrics `json:"requestMsecs"` + RequestBuckets RequestBuckets `json:"requestBuckets"` +} + +type ServerZones map[string]ZoneStats + +type FilterZones map[string]map[string]ZoneStats + +type UpstreamServer struct { + ZoneStats + Server string `json:"server"` + ResponseMsecCounter int64 `json:"responseMsecCounter"` + ResponseMsec int64 `json:"responseMsec"` + ResponseMsecs RequestMetrics `json:"responseMsecs"` + ResponseBuckets RequestBuckets `json:"responseBuckets"` + Weight int `json:"weight"` + MaxFails int `json:"maxFails"` + FailTimeout int `json:"failTimeout"` + Backup bool `json:"backup"` + Down bool `json:"down"` +} + +type UpstreamZones map[string][]UpstreamServer + +type CacheZoneStats struct { + MaxSize int64 `json:"maxSize"` + UsedSize int64 `json:"usedSize"` + InBytes int64 `json:"inBytes"` + OutBytes int64 `json:"outBytes"` + Responses Responses `json:"responses"` +} + +type CacheZones map[string]CacheZoneStats + +func ParseVtsStats(r io.Reader) (*NginxVtsStatus, error) { + decoder := json.NewDecoder(r) + + // Create a map to store raw JSON first + var rawData map[string]interface{} + if err := decoder.Decode(&rawData); err != nil { + return nil, fmt.Errorf("failed to decode JSON: %w", err) + } + + // Marshal back to JSON bytes to ensure proper handling of numeric types + jsonBytes, err := json.Marshal(rawData) + if err != nil { + return nil, fmt.Errorf("failed to marshal intermediate JSON: %w", err) + } + + // Unmarshal into our structured type + var stats NginxVtsStatus + if err := json.Unmarshal(jsonBytes, &stats); err != nil { + return nil, fmt.Errorf("failed to unmarshal into NginxStatus: %w", err) + } + + // Validate required fields + if err := validateStats(&stats); err != nil { + return nil, fmt.Errorf("stats validation failed: %w", err) + } + + return &stats, nil +} + +func validateStats(stats *NginxVtsStatus) error { + if stats == nil { + return fmt.Errorf("stats cannot be nil") + } + + // Validate required string fields + if stats.HostName == "" { + return fmt.Errorf("hostName is required") + } + if stats.ModuleVersion == "" { + return fmt.Errorf("moduleVersion is required") + } + if stats.NginxVersion == "" { + return fmt.Errorf("nginxVersion is required") + } + + // Validate time fields + if stats.LoadMsec <= 0 { + return fmt.Errorf("loadMsec must be positive") + } + if stats.NowMsec <= 0 { + return fmt.Errorf("nowMsec must be positive") + } + + // Validate connections + if stats.Connections.Handled < 0 || + stats.Connections.Accepted < 0 || + stats.Connections.Active < 0 || + stats.Connections.Requests < 0 { + return fmt.Errorf("connection counts cannot be negative") + } + + // Basic validation of zones + if len(stats.ServerZones) == 0 { + return fmt.Errorf("serverZones cannot be empty") + } + + return nil +} From ca036337be829d161229229c3f3017c286fbfbf5 Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Thu, 14 Nov 2024 11:34:40 +0530 Subject: [PATCH 2/9] added connections stats --- receiver/nginxreceiver/documentation.md | 26 ++- .../internal/metadata/generated_config.go | 12 ++ .../metadata/generated_config_test.go | 6 + .../internal/metadata/generated_metrics.go | 171 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 48 +++++ .../internal/metadata/testdata/config.yaml | 12 ++ receiver/nginxreceiver/metadata.yaml | 26 ++- receiver/nginxreceiver/scraper.go | 7 + 8 files changed, 303 insertions(+), 5 deletions(-) diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 32ce1a676262..3b23078add63 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -50,6 +50,30 @@ Time of the last reload of configuration (time since Epoch). | ---- | ----------- | ---------- | | ms | Gauge | Int | +### nginx.net.reading + +Current number of connections where NGINX is reading the request header + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| connections | Gauge | Int | + +### nginx.net.waiting + +Current number of connections where NGINX is waiting the response back to the client + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| connections | Gauge | Int | + +### nginx.net.writing + +Current number of connections where NGINX is writing the response back to the client + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| connections | Gauge | Int | + ### nginx.requests Total number of requests made to the server since it started @@ -71,4 +95,4 @@ The average time to receive the last byte of data from this server. | Name | Description | Values | | ---- | ----------- | ------ | | upstream_block_name | The name of the upstream block | Any Str | -| upstream_peer_address | The address of the upstream server | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | diff --git a/receiver/nginxreceiver/internal/metadata/generated_config.go b/receiver/nginxreceiver/internal/metadata/generated_config.go index ca163e97a465..5211a9e27d11 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config.go @@ -31,6 +31,9 @@ type MetricsConfig struct { NginxConnectionsCurrent MetricConfig `mapstructure:"nginx.connections_current"` NginxConnectionsHandled MetricConfig `mapstructure:"nginx.connections_handled"` NginxLoadTimestamp MetricConfig `mapstructure:"nginx.load_timestamp"` + NginxNetReading MetricConfig `mapstructure:"nginx.net.reading"` + NginxNetWaiting MetricConfig `mapstructure:"nginx.net.waiting"` + NginxNetWriting MetricConfig `mapstructure:"nginx.net.writing"` NginxRequests MetricConfig `mapstructure:"nginx.requests"` NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` } @@ -49,6 +52,15 @@ func DefaultMetricsConfig() MetricsConfig { NginxLoadTimestamp: MetricConfig{ Enabled: true, }, + NginxNetReading: MetricConfig{ + Enabled: true, + }, + NginxNetWaiting: MetricConfig{ + Enabled: true, + }, + NginxNetWriting: MetricConfig{ + Enabled: true, + }, NginxRequests: MetricConfig{ Enabled: true, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index 4d112b51505a..958a7855aa1e 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -29,6 +29,9 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxConnectionsCurrent: MetricConfig{Enabled: true}, NginxConnectionsHandled: MetricConfig{Enabled: true}, NginxLoadTimestamp: MetricConfig{Enabled: true}, + NginxNetReading: MetricConfig{Enabled: true}, + NginxNetWaiting: MetricConfig{Enabled: true}, + NginxNetWriting: MetricConfig{Enabled: true}, NginxRequests: MetricConfig{Enabled: true}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, }, @@ -42,6 +45,9 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxConnectionsCurrent: MetricConfig{Enabled: false}, NginxConnectionsHandled: MetricConfig{Enabled: false}, NginxLoadTimestamp: MetricConfig{Enabled: false}, + NginxNetReading: MetricConfig{Enabled: false}, + NginxNetWaiting: MetricConfig{Enabled: false}, + NginxNetWriting: MetricConfig{Enabled: false}, NginxRequests: MetricConfig{Enabled: false}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index b5ca616f8e1d..dfe8ebae076b 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -249,6 +249,153 @@ func newMetricNginxLoadTimestamp(cfg MetricConfig) metricNginxLoadTimestamp { return m } +type metricNginxNetReading struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.net.reading metric with initial data. +func (m *metricNginxNetReading) init() { + m.data.SetName("nginx.net.reading") + m.data.SetDescription("Current number of connections where NGINX is reading the request header") + m.data.SetUnit("connections") + m.data.SetEmptyGauge() +} + +func (m *metricNginxNetReading) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxNetReading) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxNetReading) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxNetReading(cfg MetricConfig) metricNginxNetReading { + m := metricNginxNetReading{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxNetWaiting struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.net.waiting metric with initial data. +func (m *metricNginxNetWaiting) init() { + m.data.SetName("nginx.net.waiting") + m.data.SetDescription("Current number of connections where NGINX is waiting the response back to the client") + m.data.SetUnit("connections") + m.data.SetEmptyGauge() +} + +func (m *metricNginxNetWaiting) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxNetWaiting) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxNetWaiting) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxNetWaiting(cfg MetricConfig) metricNginxNetWaiting { + m := metricNginxNetWaiting{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxNetWriting struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.net.writing metric with initial data. +func (m *metricNginxNetWriting) init() { + m.data.SetName("nginx.net.writing") + m.data.SetDescription("Current number of connections where NGINX is writing the response back to the client") + m.data.SetUnit("connections") + m.data.SetEmptyGauge() +} + +func (m *metricNginxNetWriting) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxNetWriting) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxNetWriting) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxNetWriting(cfg MetricConfig) metricNginxNetWriting { + m := metricNginxNetWriting{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxRequests struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -364,6 +511,9 @@ type MetricsBuilder struct { metricNginxConnectionsCurrent metricNginxConnectionsCurrent metricNginxConnectionsHandled metricNginxConnectionsHandled metricNginxLoadTimestamp metricNginxLoadTimestamp + metricNginxNetReading metricNginxNetReading + metricNginxNetWaiting metricNginxNetWaiting + metricNginxNetWriting metricNginxNetWriting metricNginxRequests metricNginxRequests metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime } @@ -396,6 +546,9 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(mbc.Metrics.NginxConnectionsCurrent), metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(mbc.Metrics.NginxConnectionsHandled), metricNginxLoadTimestamp: newMetricNginxLoadTimestamp(mbc.Metrics.NginxLoadTimestamp), + metricNginxNetReading: newMetricNginxNetReading(mbc.Metrics.NginxNetReading), + metricNginxNetWaiting: newMetricNginxNetWaiting(mbc.Metrics.NginxNetWaiting), + metricNginxNetWriting: newMetricNginxNetWriting(mbc.Metrics.NginxNetWriting), metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), } @@ -467,6 +620,9 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxConnectionsCurrent.emit(ils.Metrics()) mb.metricNginxConnectionsHandled.emit(ils.Metrics()) mb.metricNginxLoadTimestamp.emit(ils.Metrics()) + mb.metricNginxNetReading.emit(ils.Metrics()) + mb.metricNginxNetWaiting.emit(ils.Metrics()) + mb.metricNginxNetWriting.emit(ils.Metrics()) mb.metricNginxRequests.emit(ils.Metrics()) mb.metricNginxUpstreamPeersResponseTime.emit(ils.Metrics()) @@ -510,6 +666,21 @@ func (mb *MetricsBuilder) RecordNginxLoadTimestampDataPoint(ts pcommon.Timestamp mb.metricNginxLoadTimestamp.recordDataPoint(mb.startTime, ts, val) } +// RecordNginxNetReadingDataPoint adds a data point to nginx.net.reading metric. +func (mb *MetricsBuilder) RecordNginxNetReadingDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxNetReading.recordDataPoint(mb.startTime, ts, val) +} + +// RecordNginxNetWaitingDataPoint adds a data point to nginx.net.waiting metric. +func (mb *MetricsBuilder) RecordNginxNetWaitingDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxNetWaiting.recordDataPoint(mb.startTime, ts, val) +} + +// RecordNginxNetWritingDataPoint adds a data point to nginx.net.writing metric. +func (mb *MetricsBuilder) RecordNginxNetWritingDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxNetWriting.recordDataPoint(mb.startTime, ts, val) +} + // RecordNginxRequestsDataPoint adds a data point to nginx.requests metric. func (mb *MetricsBuilder) RecordNginxRequestsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricNginxRequests.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index 47b4ad5cc70e..cccc03f4d6ad 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -75,6 +75,18 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxLoadTimestampDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxNetReadingDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxNetWaitingDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxNetWritingDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxRequestsDataPoint(ts, 1) @@ -162,6 +174,42 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.net.reading": + assert.False(t, validatedMetrics["nginx.net.reading"], "Found a duplicate in the metrics slice: nginx.net.reading") + validatedMetrics["nginx.net.reading"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Current number of connections where NGINX is reading the request header", ms.At(i).Description()) + assert.Equal(t, "connections", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.net.waiting": + assert.False(t, validatedMetrics["nginx.net.waiting"], "Found a duplicate in the metrics slice: nginx.net.waiting") + validatedMetrics["nginx.net.waiting"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Current number of connections where NGINX is waiting the response back to the client", ms.At(i).Description()) + assert.Equal(t, "connections", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.net.writing": + assert.False(t, validatedMetrics["nginx.net.writing"], "Found a duplicate in the metrics slice: nginx.net.writing") + validatedMetrics["nginx.net.writing"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Current number of connections where NGINX is writing the response back to the client", ms.At(i).Description()) + assert.Equal(t, "connections", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "nginx.requests": assert.False(t, validatedMetrics["nginx.requests"], "Found a duplicate in the metrics slice: nginx.requests") validatedMetrics["nginx.requests"] = true diff --git a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml index 7b16a0fec7da..1bcd74b00604 100644 --- a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml @@ -9,6 +9,12 @@ all_set: enabled: true nginx.load_timestamp: enabled: true + nginx.net.reading: + enabled: true + nginx.net.waiting: + enabled: true + nginx.net.writing: + enabled: true nginx.requests: enabled: true nginx.upstream.peers.response_time: @@ -23,6 +29,12 @@ none_set: enabled: false nginx.load_timestamp: enabled: false + nginx.net.reading: + enabled: false + nginx.net.waiting: + enabled: false + nginx.net.writing: + enabled: false nginx.requests: enabled: false nginx.upstream.peers.response_time: diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 97e9eea30bf6..22c7689f50fc 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -21,7 +21,7 @@ attributes: description: The name of the upstream block type: string upstream_peer_address: - description: The address of the upstream server + description: The address f the upstream server type: string metrics: @@ -80,6 +80,24 @@ metrics: value_type: int unit: ms -# `nginx.load_timestamp` - Time when NGINX was loaded -# - `nginx.timestamp` - Current timestamp -# - `nginx.upstream.peers.response_time` - Response time in milliseconds for upstream servers + nginx.net.reading: + enabled: true + description: Current number of connections where NGINX is reading the request header + gauge: + value_type: int + unit: connections + + nginx.net.writing: + enabled: true + description: Current number of connections where NGINX is writing the response back to the client + gauge: + value_type: int + unit: connections + + nginx.net.waiting: + enabled: true + description: Current number of connections where NGINX is waiting the response back to the client + gauge: + value_type: int + unit: connections + diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 85bef18fda9f..af5e3fe78873 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -93,6 +93,13 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { func (r *nginxScraper) recordVtsStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { r.recordTimingStats(now, vtsStats) + r.recordVtsConnectionStats(now, vtsStats) +} + +func (r *nginxScraper) recordVtsConnectionStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + r.mb.RecordNginxNetReadingDataPoint(now, vtsStats.Connections.Reading) + r.mb.RecordNginxNetWritingDataPoint(now, vtsStats.Connections.Writing) + r.mb.RecordNginxNetWaitingDataPoint(now, vtsStats.Connections.Waiting) } func (r *nginxScraper) recordTimingStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { From d3fabb17b5fdaec832cc36663a5eeaec645dacb3 Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Thu, 14 Nov 2024 12:37:03 +0530 Subject: [PATCH 3/9] added serverzone response metrics --- receiver/nginxreceiver/documentation.md | 70 +++++ .../internal/metadata/generated_config.go | 20 ++ .../metadata/generated_config_test.go | 10 + .../internal/metadata/generated_metrics.go | 295 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 95 ++++++ .../internal/metadata/testdata/config.yaml | 20 ++ receiver/nginxreceiver/metadata.yaml | 62 +++- receiver/nginxreceiver/scraper.go | 25 ++ 8 files changed, 596 insertions(+), 1 deletion(-) diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 3b23078add63..5c86f6025bb4 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -82,6 +82,76 @@ Total number of requests made to the server since it started | ---- | ----------- | ---------- | ----------------------- | --------- | | requests | Sum | Int | Cumulative | true | +### nginx.server_zone.responses.1xx + +The number of responses with 1xx status code. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| response | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.2xx + +The number of responses with 2xx status code. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| response | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.3xx + +The number of responses with 3xx status code. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| response | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.4xx + +The number of responses with 4xx status code. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| response | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.5xx + +The number of responses with 5xx status code. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| response | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + ### nginx.upstream.peers.response_time The average time to receive the last byte of data from this server. diff --git a/receiver/nginxreceiver/internal/metadata/generated_config.go b/receiver/nginxreceiver/internal/metadata/generated_config.go index 5211a9e27d11..0127e6b7f2ed 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config.go @@ -35,6 +35,11 @@ type MetricsConfig struct { NginxNetWaiting MetricConfig `mapstructure:"nginx.net.waiting"` NginxNetWriting MetricConfig `mapstructure:"nginx.net.writing"` NginxRequests MetricConfig `mapstructure:"nginx.requests"` + NginxServerZoneResponses1xx MetricConfig `mapstructure:"nginx.server_zone.responses.1xx"` + NginxServerZoneResponses2xx MetricConfig `mapstructure:"nginx.server_zone.responses.2xx"` + NginxServerZoneResponses3xx MetricConfig `mapstructure:"nginx.server_zone.responses.3xx"` + NginxServerZoneResponses4xx MetricConfig `mapstructure:"nginx.server_zone.responses.4xx"` + NginxServerZoneResponses5xx MetricConfig `mapstructure:"nginx.server_zone.responses.5xx"` NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` } @@ -64,6 +69,21 @@ func DefaultMetricsConfig() MetricsConfig { NginxRequests: MetricConfig{ Enabled: true, }, + NginxServerZoneResponses1xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses2xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses3xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses4xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses5xx: MetricConfig{ + Enabled: true, + }, NginxUpstreamPeersResponseTime: MetricConfig{ Enabled: true, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index 958a7855aa1e..cbe40fc339c9 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -33,6 +33,11 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxNetWaiting: MetricConfig{Enabled: true}, NginxNetWriting: MetricConfig{Enabled: true}, NginxRequests: MetricConfig{Enabled: true}, + NginxServerZoneResponses1xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses2xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses3xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses4xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses5xx: MetricConfig{Enabled: true}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, }, }, @@ -49,6 +54,11 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxNetWaiting: MetricConfig{Enabled: false}, NginxNetWriting: MetricConfig{Enabled: false}, NginxRequests: MetricConfig{Enabled: false}, + NginxServerZoneResponses1xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses2xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses3xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses4xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses5xx: MetricConfig{Enabled: false}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, }, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index dfe8ebae076b..1401c3dfb8e9 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -447,6 +447,261 @@ func newMetricNginxRequests(cfg MetricConfig) metricNginxRequests { return m } +type metricNginxServerZoneResponses1xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.1xx metric with initial data. +func (m *metricNginxServerZoneResponses1xx) init() { + m.data.SetName("nginx.server_zone.responses.1xx") + m.data.SetDescription("The number of responses with 1xx status code.") + m.data.SetUnit("response") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses1xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses1xx) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses1xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses1xx(cfg MetricConfig) metricNginxServerZoneResponses1xx { + m := metricNginxServerZoneResponses1xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses2xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.2xx metric with initial data. +func (m *metricNginxServerZoneResponses2xx) init() { + m.data.SetName("nginx.server_zone.responses.2xx") + m.data.SetDescription("The number of responses with 2xx status code.") + m.data.SetUnit("response") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses2xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses2xx) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses2xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses2xx(cfg MetricConfig) metricNginxServerZoneResponses2xx { + m := metricNginxServerZoneResponses2xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses3xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.3xx metric with initial data. +func (m *metricNginxServerZoneResponses3xx) init() { + m.data.SetName("nginx.server_zone.responses.3xx") + m.data.SetDescription("The number of responses with 3xx status code.") + m.data.SetUnit("response") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses3xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses3xx) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses3xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses3xx(cfg MetricConfig) metricNginxServerZoneResponses3xx { + m := metricNginxServerZoneResponses3xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses4xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.4xx metric with initial data. +func (m *metricNginxServerZoneResponses4xx) init() { + m.data.SetName("nginx.server_zone.responses.4xx") + m.data.SetDescription("The number of responses with 4xx status code.") + m.data.SetUnit("response") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses4xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses4xx) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses4xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses4xx(cfg MetricConfig) metricNginxServerZoneResponses4xx { + m := metricNginxServerZoneResponses4xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses5xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.5xx metric with initial data. +func (m *metricNginxServerZoneResponses5xx) init() { + m.data.SetName("nginx.server_zone.responses.5xx") + m.data.SetDescription("The number of responses with 5xx status code.") + m.data.SetUnit("response") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses5xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses5xx) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses5xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses5xx(cfg MetricConfig) metricNginxServerZoneResponses5xx { + m := metricNginxServerZoneResponses5xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxUpstreamPeersResponseTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -515,6 +770,11 @@ type MetricsBuilder struct { metricNginxNetWaiting metricNginxNetWaiting metricNginxNetWriting metricNginxNetWriting metricNginxRequests metricNginxRequests + metricNginxServerZoneResponses1xx metricNginxServerZoneResponses1xx + metricNginxServerZoneResponses2xx metricNginxServerZoneResponses2xx + metricNginxServerZoneResponses3xx metricNginxServerZoneResponses3xx + metricNginxServerZoneResponses4xx metricNginxServerZoneResponses4xx + metricNginxServerZoneResponses5xx metricNginxServerZoneResponses5xx metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime } @@ -550,6 +810,11 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricNginxNetWaiting: newMetricNginxNetWaiting(mbc.Metrics.NginxNetWaiting), metricNginxNetWriting: newMetricNginxNetWriting(mbc.Metrics.NginxNetWriting), metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), + metricNginxServerZoneResponses1xx: newMetricNginxServerZoneResponses1xx(mbc.Metrics.NginxServerZoneResponses1xx), + metricNginxServerZoneResponses2xx: newMetricNginxServerZoneResponses2xx(mbc.Metrics.NginxServerZoneResponses2xx), + metricNginxServerZoneResponses3xx: newMetricNginxServerZoneResponses3xx(mbc.Metrics.NginxServerZoneResponses3xx), + metricNginxServerZoneResponses4xx: newMetricNginxServerZoneResponses4xx(mbc.Metrics.NginxServerZoneResponses4xx), + metricNginxServerZoneResponses5xx: newMetricNginxServerZoneResponses5xx(mbc.Metrics.NginxServerZoneResponses5xx), metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), } @@ -624,6 +889,11 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxNetWaiting.emit(ils.Metrics()) mb.metricNginxNetWriting.emit(ils.Metrics()) mb.metricNginxRequests.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses1xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses2xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses3xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses4xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses5xx.emit(ils.Metrics()) mb.metricNginxUpstreamPeersResponseTime.emit(ils.Metrics()) for _, op := range options { @@ -686,6 +956,31 @@ func (mb *MetricsBuilder) RecordNginxRequestsDataPoint(ts pcommon.Timestamp, val mb.metricNginxRequests.recordDataPoint(mb.startTime, ts, val) } +// RecordNginxServerZoneResponses1xxDataPoint adds a data point to nginx.server_zone.responses.1xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses1xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses1xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses2xxDataPoint adds a data point to nginx.server_zone.responses.2xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses2xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses2xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses3xxDataPoint adds a data point to nginx.server_zone.responses.3xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses3xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses3xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses4xxDataPoint adds a data point to nginx.server_zone.responses.4xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses4xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses4xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses5xxDataPoint adds a data point to nginx.server_zone.responses.5xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses5xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses5xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + // RecordNginxUpstreamPeersResponseTimeDataPoint adds a data point to nginx.upstream.peers.response_time metric. func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponseTimeDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { mb.metricNginxUpstreamPeersResponseTime.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index cccc03f4d6ad..0665e86f9e67 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -91,6 +91,26 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxRequestsDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses1xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses2xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses3xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses4xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses5xxDataPoint(ts, 1, "serverzone_name-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxUpstreamPeersResponseTimeDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") @@ -224,6 +244,81 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.server_zone.responses.1xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.1xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.1xx") + validatedMetrics["nginx.server_zone.responses.1xx"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of responses with 1xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.2xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.2xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.2xx") + validatedMetrics["nginx.server_zone.responses.2xx"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of responses with 2xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.3xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.3xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.3xx") + validatedMetrics["nginx.server_zone.responses.3xx"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of responses with 3xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.4xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.4xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.4xx") + validatedMetrics["nginx.server_zone.responses.4xx"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of responses with 4xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.5xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.5xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.5xx") + validatedMetrics["nginx.server_zone.responses.5xx"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of responses with 5xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) case "nginx.upstream.peers.response_time": assert.False(t, validatedMetrics["nginx.upstream.peers.response_time"], "Found a duplicate in the metrics slice: nginx.upstream.peers.response_time") validatedMetrics["nginx.upstream.peers.response_time"] = true diff --git a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml index 1bcd74b00604..373ef391add2 100644 --- a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml @@ -17,6 +17,16 @@ all_set: enabled: true nginx.requests: enabled: true + nginx.server_zone.responses.1xx: + enabled: true + nginx.server_zone.responses.2xx: + enabled: true + nginx.server_zone.responses.3xx: + enabled: true + nginx.server_zone.responses.4xx: + enabled: true + nginx.server_zone.responses.5xx: + enabled: true nginx.upstream.peers.response_time: enabled: true none_set: @@ -37,5 +47,15 @@ none_set: enabled: false nginx.requests: enabled: false + nginx.server_zone.responses.1xx: + enabled: false + nginx.server_zone.responses.2xx: + enabled: false + nginx.server_zone.responses.3xx: + enabled: false + nginx.server_zone.responses.4xx: + enabled: false + nginx.server_zone.responses.5xx: + enabled: false nginx.upstream.peers.response_time: enabled: false diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 22c7689f50fc..a721d2da7be6 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -23,7 +23,10 @@ attributes: upstream_peer_address: description: The address f the upstream server type: string - + serverzone_name: + description: The name of serverzone + type: string + metrics: nginx.requests: enabled: true @@ -101,3 +104,60 @@ metrics: value_type: int unit: connections + +### Server Zone Response Codes +# - `nginx.server_zone.responses.1xx` - Number of responses with 1xx status codes +# - `nginx.server_zone.responses.2xx` - Number of responses with 2xx status codes +# - `nginx.server_zone.responses.3xx` - Number of responses with 3xx status codes +# - `nginx.server_zone.responses.4xx` - Number of responses with 4xx status codes +# - `nginx.server_zone.responses.5xx` - Number of responses with 5xx status codes + + nginx.server_zone.responses.1xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 1xx status code. + gauge: + value_type: int + unit: + response + + nginx.server_zone.responses.2xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 2xx status code. + gauge: + value_type: int + unit: + response + + nginx.server_zone.responses.3xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 3xx status code. + gauge: + value_type: int + unit: + response + + nginx.server_zone.responses.4xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 4xx status code. + gauge: + value_type: int + unit: + response + + nginx.server_zone.responses.5xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 5xx status code. + gauge: + value_type: int + unit: + response \ No newline at end of file diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index af5e3fe78873..fb395612aa9e 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -94,6 +94,31 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { func (r *nginxScraper) recordVtsStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { r.recordTimingStats(now, vtsStats) r.recordVtsConnectionStats(now, vtsStats) + r.recordVtsServerZoneResponseStats(now, vtsStats) +} + +func (r *nginxScraper) recordVtsServerZoneResponseStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + for serverZoneName, serverZone := range vtsStats.ServerZones { + r.mb.RecordNginxServerZoneResponses1xxDataPoint( + now, serverZone.Responses.Status1xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses2xxDataPoint( + now, serverZone.Responses.Status2xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses3xxDataPoint( + now, serverZone.Responses.Status3xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses4xxDataPoint( + now, serverZone.Responses.Status4xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses5xxDataPoint( + now, serverZone.Responses.Status5xx, serverZoneName, + ) + } } func (r *nginxScraper) recordVtsConnectionStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { From c4821a60d32658cd0f6058781ccc52296fee0ed2 Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Thu, 14 Nov 2024 14:10:04 +0530 Subject: [PATCH 4/9] added serverzone traffic stats --- receiver/nginxreceiver/documentation.md | 28 ++++ .../internal/metadata/generated_config.go | 8 ++ .../metadata/generated_config_test.go | 4 + .../internal/metadata/generated_metrics.go | 122 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 42 ++++++ .../internal/metadata/testdata/config.yaml | 8 ++ receiver/nginxreceiver/metadata.yaml | 37 ++++-- receiver/nginxreceiver/scraper.go | 8 ++ 8 files changed, 248 insertions(+), 9 deletions(-) diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 5c86f6025bb4..6a793f7ad824 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -82,6 +82,20 @@ Total number of requests made to the server since it started | ---- | ----------- | ---------- | ----------------------- | --------- | | requests | Sum | Int | Cumulative | true | +### nginx.server_zone.received + +Bytes received by server zones + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + ### nginx.server_zone.responses.1xx The number of responses with 1xx status code. @@ -152,6 +166,20 @@ The number of responses with 5xx status code. | ---- | ----------- | ------ | | serverzone_name | The name of serverzone | Any Str | +### nginx.server_zone.sent + +Bytes sent by server zones + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + ### nginx.upstream.peers.response_time The average time to receive the last byte of data from this server. diff --git a/receiver/nginxreceiver/internal/metadata/generated_config.go b/receiver/nginxreceiver/internal/metadata/generated_config.go index 0127e6b7f2ed..790e3adb75f9 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config.go @@ -35,11 +35,13 @@ type MetricsConfig struct { NginxNetWaiting MetricConfig `mapstructure:"nginx.net.waiting"` NginxNetWriting MetricConfig `mapstructure:"nginx.net.writing"` NginxRequests MetricConfig `mapstructure:"nginx.requests"` + NginxServerZoneReceived MetricConfig `mapstructure:"nginx.server_zone.received"` NginxServerZoneResponses1xx MetricConfig `mapstructure:"nginx.server_zone.responses.1xx"` NginxServerZoneResponses2xx MetricConfig `mapstructure:"nginx.server_zone.responses.2xx"` NginxServerZoneResponses3xx MetricConfig `mapstructure:"nginx.server_zone.responses.3xx"` NginxServerZoneResponses4xx MetricConfig `mapstructure:"nginx.server_zone.responses.4xx"` NginxServerZoneResponses5xx MetricConfig `mapstructure:"nginx.server_zone.responses.5xx"` + NginxServerZoneSent MetricConfig `mapstructure:"nginx.server_zone.sent"` NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` } @@ -69,6 +71,9 @@ func DefaultMetricsConfig() MetricsConfig { NginxRequests: MetricConfig{ Enabled: true, }, + NginxServerZoneReceived: MetricConfig{ + Enabled: true, + }, NginxServerZoneResponses1xx: MetricConfig{ Enabled: true, }, @@ -84,6 +89,9 @@ func DefaultMetricsConfig() MetricsConfig { NginxServerZoneResponses5xx: MetricConfig{ Enabled: true, }, + NginxServerZoneSent: MetricConfig{ + Enabled: true, + }, NginxUpstreamPeersResponseTime: MetricConfig{ Enabled: true, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index cbe40fc339c9..147ca8aef725 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -33,11 +33,13 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxNetWaiting: MetricConfig{Enabled: true}, NginxNetWriting: MetricConfig{Enabled: true}, NginxRequests: MetricConfig{Enabled: true}, + NginxServerZoneReceived: MetricConfig{Enabled: true}, NginxServerZoneResponses1xx: MetricConfig{Enabled: true}, NginxServerZoneResponses2xx: MetricConfig{Enabled: true}, NginxServerZoneResponses3xx: MetricConfig{Enabled: true}, NginxServerZoneResponses4xx: MetricConfig{Enabled: true}, NginxServerZoneResponses5xx: MetricConfig{Enabled: true}, + NginxServerZoneSent: MetricConfig{Enabled: true}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, }, }, @@ -54,11 +56,13 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxNetWaiting: MetricConfig{Enabled: false}, NginxNetWriting: MetricConfig{Enabled: false}, NginxRequests: MetricConfig{Enabled: false}, + NginxServerZoneReceived: MetricConfig{Enabled: false}, NginxServerZoneResponses1xx: MetricConfig{Enabled: false}, NginxServerZoneResponses2xx: MetricConfig{Enabled: false}, NginxServerZoneResponses3xx: MetricConfig{Enabled: false}, NginxServerZoneResponses4xx: MetricConfig{Enabled: false}, NginxServerZoneResponses5xx: MetricConfig{Enabled: false}, + NginxServerZoneSent: MetricConfig{Enabled: false}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, }, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index 1401c3dfb8e9..6b23f771d045 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -447,6 +447,59 @@ func newMetricNginxRequests(cfg MetricConfig) metricNginxRequests { return m } +type metricNginxServerZoneReceived struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.received metric with initial data. +func (m *metricNginxServerZoneReceived) init() { + m.data.SetName("nginx.server_zone.received") + m.data.SetDescription("Bytes received by server zones") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneReceived) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneReceived) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneReceived) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneReceived(cfg MetricConfig) metricNginxServerZoneReceived { + m := metricNginxServerZoneReceived{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxServerZoneResponses1xx struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -702,6 +755,59 @@ func newMetricNginxServerZoneResponses5xx(cfg MetricConfig) metricNginxServerZon return m } +type metricNginxServerZoneSent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.sent metric with initial data. +func (m *metricNginxServerZoneSent) init() { + m.data.SetName("nginx.server_zone.sent") + m.data.SetDescription("Bytes sent by server zones") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneSent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneSent) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneSent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneSent(cfg MetricConfig) metricNginxServerZoneSent { + m := metricNginxServerZoneSent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxUpstreamPeersResponseTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -770,11 +876,13 @@ type MetricsBuilder struct { metricNginxNetWaiting metricNginxNetWaiting metricNginxNetWriting metricNginxNetWriting metricNginxRequests metricNginxRequests + metricNginxServerZoneReceived metricNginxServerZoneReceived metricNginxServerZoneResponses1xx metricNginxServerZoneResponses1xx metricNginxServerZoneResponses2xx metricNginxServerZoneResponses2xx metricNginxServerZoneResponses3xx metricNginxServerZoneResponses3xx metricNginxServerZoneResponses4xx metricNginxServerZoneResponses4xx metricNginxServerZoneResponses5xx metricNginxServerZoneResponses5xx + metricNginxServerZoneSent metricNginxServerZoneSent metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime } @@ -810,11 +918,13 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricNginxNetWaiting: newMetricNginxNetWaiting(mbc.Metrics.NginxNetWaiting), metricNginxNetWriting: newMetricNginxNetWriting(mbc.Metrics.NginxNetWriting), metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), + metricNginxServerZoneReceived: newMetricNginxServerZoneReceived(mbc.Metrics.NginxServerZoneReceived), metricNginxServerZoneResponses1xx: newMetricNginxServerZoneResponses1xx(mbc.Metrics.NginxServerZoneResponses1xx), metricNginxServerZoneResponses2xx: newMetricNginxServerZoneResponses2xx(mbc.Metrics.NginxServerZoneResponses2xx), metricNginxServerZoneResponses3xx: newMetricNginxServerZoneResponses3xx(mbc.Metrics.NginxServerZoneResponses3xx), metricNginxServerZoneResponses4xx: newMetricNginxServerZoneResponses4xx(mbc.Metrics.NginxServerZoneResponses4xx), metricNginxServerZoneResponses5xx: newMetricNginxServerZoneResponses5xx(mbc.Metrics.NginxServerZoneResponses5xx), + metricNginxServerZoneSent: newMetricNginxServerZoneSent(mbc.Metrics.NginxServerZoneSent), metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), } @@ -889,11 +999,13 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxNetWaiting.emit(ils.Metrics()) mb.metricNginxNetWriting.emit(ils.Metrics()) mb.metricNginxRequests.emit(ils.Metrics()) + mb.metricNginxServerZoneReceived.emit(ils.Metrics()) mb.metricNginxServerZoneResponses1xx.emit(ils.Metrics()) mb.metricNginxServerZoneResponses2xx.emit(ils.Metrics()) mb.metricNginxServerZoneResponses3xx.emit(ils.Metrics()) mb.metricNginxServerZoneResponses4xx.emit(ils.Metrics()) mb.metricNginxServerZoneResponses5xx.emit(ils.Metrics()) + mb.metricNginxServerZoneSent.emit(ils.Metrics()) mb.metricNginxUpstreamPeersResponseTime.emit(ils.Metrics()) for _, op := range options { @@ -956,6 +1068,11 @@ func (mb *MetricsBuilder) RecordNginxRequestsDataPoint(ts pcommon.Timestamp, val mb.metricNginxRequests.recordDataPoint(mb.startTime, ts, val) } +// RecordNginxServerZoneReceivedDataPoint adds a data point to nginx.server_zone.received metric. +func (mb *MetricsBuilder) RecordNginxServerZoneReceivedDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneReceived.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + // RecordNginxServerZoneResponses1xxDataPoint adds a data point to nginx.server_zone.responses.1xx metric. func (mb *MetricsBuilder) RecordNginxServerZoneResponses1xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { mb.metricNginxServerZoneResponses1xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) @@ -981,6 +1098,11 @@ func (mb *MetricsBuilder) RecordNginxServerZoneResponses5xxDataPoint(ts pcommon. mb.metricNginxServerZoneResponses5xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) } +// RecordNginxServerZoneSentDataPoint adds a data point to nginx.server_zone.sent metric. +func (mb *MetricsBuilder) RecordNginxServerZoneSentDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneSent.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + // RecordNginxUpstreamPeersResponseTimeDataPoint adds a data point to nginx.upstream.peers.response_time metric. func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponseTimeDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { mb.metricNginxUpstreamPeersResponseTime.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index 0665e86f9e67..29a4022243e8 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -91,6 +91,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxRequestsDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneReceivedDataPoint(ts, 1, "serverzone_name-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxServerZoneResponses1xxDataPoint(ts, 1, "serverzone_name-val") @@ -111,6 +115,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxServerZoneResponses5xxDataPoint(ts, 1, "serverzone_name-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneSentDataPoint(ts, 1, "serverzone_name-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxUpstreamPeersResponseTimeDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") @@ -244,6 +252,23 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.server_zone.received": + assert.False(t, validatedMetrics["nginx.server_zone.received"], "Found a duplicate in the metrics slice: nginx.server_zone.received") + validatedMetrics["nginx.server_zone.received"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes received by server zones", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) case "nginx.server_zone.responses.1xx": assert.False(t, validatedMetrics["nginx.server_zone.responses.1xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.1xx") validatedMetrics["nginx.server_zone.responses.1xx"] = true @@ -319,6 +344,23 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("serverzone_name") assert.True(t, ok) assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.sent": + assert.False(t, validatedMetrics["nginx.server_zone.sent"], "Found a duplicate in the metrics slice: nginx.server_zone.sent") + validatedMetrics["nginx.server_zone.sent"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes sent by server zones", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) case "nginx.upstream.peers.response_time": assert.False(t, validatedMetrics["nginx.upstream.peers.response_time"], "Found a duplicate in the metrics slice: nginx.upstream.peers.response_time") validatedMetrics["nginx.upstream.peers.response_time"] = true diff --git a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml index 373ef391add2..39b9e346e7ac 100644 --- a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml @@ -17,6 +17,8 @@ all_set: enabled: true nginx.requests: enabled: true + nginx.server_zone.received: + enabled: true nginx.server_zone.responses.1xx: enabled: true nginx.server_zone.responses.2xx: @@ -27,6 +29,8 @@ all_set: enabled: true nginx.server_zone.responses.5xx: enabled: true + nginx.server_zone.sent: + enabled: true nginx.upstream.peers.response_time: enabled: true none_set: @@ -47,6 +51,8 @@ none_set: enabled: false nginx.requests: enabled: false + nginx.server_zone.received: + enabled: false nginx.server_zone.responses.1xx: enabled: false nginx.server_zone.responses.2xx: @@ -57,5 +63,7 @@ none_set: enabled: false nginx.server_zone.responses.5xx: enabled: false + nginx.server_zone.sent: + enabled: false nginx.upstream.peers.response_time: enabled: false diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index a721d2da7be6..001176f906b0 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -104,14 +104,6 @@ metrics: value_type: int unit: connections - -### Server Zone Response Codes -# - `nginx.server_zone.responses.1xx` - Number of responses with 1xx status codes -# - `nginx.server_zone.responses.2xx` - Number of responses with 2xx status codes -# - `nginx.server_zone.responses.3xx` - Number of responses with 3xx status codes -# - `nginx.server_zone.responses.4xx` - Number of responses with 4xx status codes -# - `nginx.server_zone.responses.5xx` - Number of responses with 5xx status codes - nginx.server_zone.responses.1xx: enabled: true attributes: @@ -160,4 +152,31 @@ metrics: gauge: value_type: int unit: - response \ No newline at end of file + response + +### Server Zone Traffic +# - `nginx.server_zone.received` - Bytes received by server zones +# - `nginx.server_zone.sent` - Bytes sent by server zones + + nginx.server_zone.received: + enabled: true + attributes: + - serverzone_name + description: Bytes received by server zones + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By + + nginx.server_zone.sent: + enabled: true + attributes: + - serverzone_name + description: Bytes sent by server zones + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By + \ No newline at end of file diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index fb395612aa9e..3bb3c9994f76 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -95,6 +95,14 @@ func (r *nginxScraper) recordVtsStats(now pcommon.Timestamp, vtsStats *NginxVtsS r.recordTimingStats(now, vtsStats) r.recordVtsConnectionStats(now, vtsStats) r.recordVtsServerZoneResponseStats(now, vtsStats) + r.recordVtsServerZoneTrafficStats(now, vtsStats) +} + +func (r *nginxScraper) recordVtsServerZoneTrafficStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + for serverZoneName, serverZone := range vtsStats.ServerZones { + r.mb.RecordNginxServerZoneSentDataPoint(now, serverZone.OutBytes, serverZoneName) + r.mb.RecordNginxServerZoneReceivedDataPoint(now, serverZone.InBytes, serverZoneName) + } } func (r *nginxScraper) recordVtsServerZoneResponseStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { From 8b6bcdd95fb17cf01bdfe952253ac9e27c236c2b Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Thu, 14 Nov 2024 15:17:06 +0530 Subject: [PATCH 5/9] upstream traffic stats --- receiver/nginxreceiver/documentation.md | 45 +++++ .../internal/metadata/generated_config.go | 12 ++ .../metadata/generated_config_test.go | 6 + .../internal/metadata/generated_metrics.go | 186 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 72 +++++++ .../internal/metadata/testdata/config.yaml | 12 ++ receiver/nginxreceiver/metadata.yaml | 46 ++++- receiver/nginxreceiver/scraper.go | 23 +++ 8 files changed, 397 insertions(+), 5 deletions(-) diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 6a793f7ad824..d70a21f09a08 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -180,6 +180,36 @@ Bytes sent by server zones | ---- | ----------- | ------ | | serverzone_name | The name of serverzone | Any Str | +### nginx.upstream.peers.received + +Bytes received from upstream servers + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.requests + +Number of requests made to upstream servers + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| requests | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + ### nginx.upstream.peers.response_time The average time to receive the last byte of data from this server. @@ -194,3 +224,18 @@ The average time to receive the last byte of data from this server. | ---- | ----------- | ------ | | upstream_block_name | The name of the upstream block | Any Str | | upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.sent + +Bytes sent from upstream servers + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | diff --git a/receiver/nginxreceiver/internal/metadata/generated_config.go b/receiver/nginxreceiver/internal/metadata/generated_config.go index 790e3adb75f9..a297a4130c21 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config.go @@ -42,7 +42,10 @@ type MetricsConfig struct { NginxServerZoneResponses4xx MetricConfig `mapstructure:"nginx.server_zone.responses.4xx"` NginxServerZoneResponses5xx MetricConfig `mapstructure:"nginx.server_zone.responses.5xx"` NginxServerZoneSent MetricConfig `mapstructure:"nginx.server_zone.sent"` + NginxUpstreamPeersReceived MetricConfig `mapstructure:"nginx.upstream.peers.received"` + NginxUpstreamPeersRequests MetricConfig `mapstructure:"nginx.upstream.peers.requests"` NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` + NginxUpstreamPeersSent MetricConfig `mapstructure:"nginx.upstream.peers.sent"` } func DefaultMetricsConfig() MetricsConfig { @@ -92,9 +95,18 @@ func DefaultMetricsConfig() MetricsConfig { NginxServerZoneSent: MetricConfig{ Enabled: true, }, + NginxUpstreamPeersReceived: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersRequests: MetricConfig{ + Enabled: true, + }, NginxUpstreamPeersResponseTime: MetricConfig{ Enabled: true, }, + NginxUpstreamPeersSent: MetricConfig{ + Enabled: true, + }, } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index 147ca8aef725..8d3c8f36faab 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -40,7 +40,10 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxServerZoneResponses4xx: MetricConfig{Enabled: true}, NginxServerZoneResponses5xx: MetricConfig{Enabled: true}, NginxServerZoneSent: MetricConfig{Enabled: true}, + NginxUpstreamPeersReceived: MetricConfig{Enabled: true}, + NginxUpstreamPeersRequests: MetricConfig{Enabled: true}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, + NginxUpstreamPeersSent: MetricConfig{Enabled: true}, }, }, }, @@ -63,7 +66,10 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxServerZoneResponses4xx: MetricConfig{Enabled: false}, NginxServerZoneResponses5xx: MetricConfig{Enabled: false}, NginxServerZoneSent: MetricConfig{Enabled: false}, + NginxUpstreamPeersReceived: MetricConfig{Enabled: false}, + NginxUpstreamPeersRequests: MetricConfig{Enabled: false}, NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, + NginxUpstreamPeersSent: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index 6b23f771d045..15f7a3167f00 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -808,6 +808,114 @@ func newMetricNginxServerZoneSent(cfg MetricConfig) metricNginxServerZoneSent { return m } +type metricNginxUpstreamPeersReceived struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.received metric with initial data. +func (m *metricNginxUpstreamPeersReceived) init() { + m.data.SetName("nginx.upstream.peers.received") + m.data.SetDescription("Bytes received from upstream servers") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersReceived) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersReceived) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersReceived) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersReceived(cfg MetricConfig) metricNginxUpstreamPeersReceived { + m := metricNginxUpstreamPeersReceived{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersRequests struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.requests metric with initial data. +func (m *metricNginxUpstreamPeersRequests) init() { + m.data.SetName("nginx.upstream.peers.requests") + m.data.SetDescription("Number of requests made to upstream servers") + m.data.SetUnit("requests") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersRequests) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersRequests) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersRequests(cfg MetricConfig) metricNginxUpstreamPeersRequests { + m := metricNginxUpstreamPeersRequests{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxUpstreamPeersResponseTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -860,6 +968,60 @@ func newMetricNginxUpstreamPeersResponseTime(cfg MetricConfig) metricNginxUpstre return m } +type metricNginxUpstreamPeersSent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.sent metric with initial data. +func (m *metricNginxUpstreamPeersSent) init() { + m.data.SetName("nginx.upstream.peers.sent") + m.data.SetDescription("Bytes sent from upstream servers") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersSent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersSent) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersSent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersSent(cfg MetricConfig) metricNginxUpstreamPeersSent { + m := metricNginxUpstreamPeersSent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { @@ -883,7 +1045,10 @@ type MetricsBuilder struct { metricNginxServerZoneResponses4xx metricNginxServerZoneResponses4xx metricNginxServerZoneResponses5xx metricNginxServerZoneResponses5xx metricNginxServerZoneSent metricNginxServerZoneSent + metricNginxUpstreamPeersReceived metricNginxUpstreamPeersReceived + metricNginxUpstreamPeersRequests metricNginxUpstreamPeersRequests metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime + metricNginxUpstreamPeersSent metricNginxUpstreamPeersSent } // MetricBuilderOption applies changes to default metrics builder. @@ -925,7 +1090,10 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricNginxServerZoneResponses4xx: newMetricNginxServerZoneResponses4xx(mbc.Metrics.NginxServerZoneResponses4xx), metricNginxServerZoneResponses5xx: newMetricNginxServerZoneResponses5xx(mbc.Metrics.NginxServerZoneResponses5xx), metricNginxServerZoneSent: newMetricNginxServerZoneSent(mbc.Metrics.NginxServerZoneSent), + metricNginxUpstreamPeersReceived: newMetricNginxUpstreamPeersReceived(mbc.Metrics.NginxUpstreamPeersReceived), + metricNginxUpstreamPeersRequests: newMetricNginxUpstreamPeersRequests(mbc.Metrics.NginxUpstreamPeersRequests), metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), + metricNginxUpstreamPeersSent: newMetricNginxUpstreamPeersSent(mbc.Metrics.NginxUpstreamPeersSent), } for _, op := range options { @@ -1006,7 +1174,10 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxServerZoneResponses4xx.emit(ils.Metrics()) mb.metricNginxServerZoneResponses5xx.emit(ils.Metrics()) mb.metricNginxServerZoneSent.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersReceived.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersRequests.emit(ils.Metrics()) mb.metricNginxUpstreamPeersResponseTime.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersSent.emit(ils.Metrics()) for _, op := range options { op.apply(rm) @@ -1103,11 +1274,26 @@ func (mb *MetricsBuilder) RecordNginxServerZoneSentDataPoint(ts pcommon.Timestam mb.metricNginxServerZoneSent.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) } +// RecordNginxUpstreamPeersReceivedDataPoint adds a data point to nginx.upstream.peers.received metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersReceivedDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersReceived.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersRequestsDataPoint adds a data point to nginx.upstream.peers.requests metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersRequestsDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersRequests.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + // RecordNginxUpstreamPeersResponseTimeDataPoint adds a data point to nginx.upstream.peers.response_time metric. func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponseTimeDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { mb.metricNginxUpstreamPeersResponseTime.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) } +// RecordNginxUpstreamPeersSentDataPoint adds a data point to nginx.upstream.peers.sent metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersSentDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersSent.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index 29a4022243e8..1a7160c510bb 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -119,10 +119,22 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxServerZoneSentDataPoint(ts, 1, "serverzone_name-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersReceivedDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersRequestsDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxUpstreamPeersResponseTimeDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersSentDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) @@ -361,6 +373,46 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("serverzone_name") assert.True(t, ok) assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.upstream.peers.received": + assert.False(t, validatedMetrics["nginx.upstream.peers.received"], "Found a duplicate in the metrics slice: nginx.upstream.peers.received") + validatedMetrics["nginx.upstream.peers.received"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes received from upstream servers", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.requests": + assert.False(t, validatedMetrics["nginx.upstream.peers.requests"], "Found a duplicate in the metrics slice: nginx.upstream.peers.requests") + validatedMetrics["nginx.upstream.peers.requests"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of requests made to upstream servers", ms.At(i).Description()) + assert.Equal(t, "requests", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) case "nginx.upstream.peers.response_time": assert.False(t, validatedMetrics["nginx.upstream.peers.response_time"], "Found a duplicate in the metrics slice: nginx.upstream.peers.response_time") validatedMetrics["nginx.upstream.peers.response_time"] = true @@ -379,6 +431,26 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok = dp.Attributes().Get("upstream_peer_address") assert.True(t, ok) assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.sent": + assert.False(t, validatedMetrics["nginx.upstream.peers.sent"], "Found a duplicate in the metrics slice: nginx.upstream.peers.sent") + validatedMetrics["nginx.upstream.peers.sent"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes sent from upstream servers", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) } } }) diff --git a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml index 39b9e346e7ac..6df5912e1437 100644 --- a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml @@ -31,8 +31,14 @@ all_set: enabled: true nginx.server_zone.sent: enabled: true + nginx.upstream.peers.received: + enabled: true + nginx.upstream.peers.requests: + enabled: true nginx.upstream.peers.response_time: enabled: true + nginx.upstream.peers.sent: + enabled: true none_set: metrics: nginx.connections_accepted: @@ -65,5 +71,11 @@ none_set: enabled: false nginx.server_zone.sent: enabled: false + nginx.upstream.peers.received: + enabled: false + nginx.upstream.peers.requests: + enabled: false nginx.upstream.peers.response_time: enabled: false + nginx.upstream.peers.sent: + enabled: false diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 001176f906b0..0a184b8a681d 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -154,10 +154,6 @@ metrics: unit: response -### Server Zone Traffic -# - `nginx.server_zone.received` - Bytes received by server zones -# - `nginx.server_zone.sent` - Bytes sent by server zones - nginx.server_zone.received: enabled: true attributes: @@ -179,4 +175,44 @@ metrics: monotonic: true aggregation_temporality: cumulative unit: By - \ No newline at end of file + + ### Request and Traffic +# - `nginx.upstream.peers.requests` - Number of requests made to upstream servers +# - `nginx.upstream.peers.received` - Bytes received from upstream servers +# - `nginx.upstream.peers.sent` - Bytes sent to upstream servers + + nginx.upstream.peers.requests: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of requests made to upstream servers + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: requests + + nginx.upstream.peers.received: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Bytes received from upstream servers + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By + + nginx.upstream.peers.sent: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Bytes sent from upstream servers + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By \ No newline at end of file diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 3bb3c9994f76..14e8c127fbe9 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -96,6 +96,29 @@ func (r *nginxScraper) recordVtsStats(now pcommon.Timestamp, vtsStats *NginxVtsS r.recordVtsConnectionStats(now, vtsStats) r.recordVtsServerZoneResponseStats(now, vtsStats) r.recordVtsServerZoneTrafficStats(now, vtsStats) + r.recordVtsUpstreamRequestTrafficStats(now, vtsStats) +} + +func (r *nginxScraper) recordVtsUpstreamRequestTrafficStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + for upstreamZoneName, upstreamZoneServers := range vtsStats.UpstreamZones { + for _, upstreamZoneServer := range upstreamZoneServers { + // pp.Println(upstreamZoneName) + // pp.Println("Upstream Zone Server Address: ", upstreamZoneServer.Server) + // pp.Println("Reqs made to this server: ", upstreamZoneServer.RequestCounter) + // pp.Println("Bytes received by this server: ", upstreamZoneServer.InBytes) + // pp.Println("Bytes sent by this server: ", upstreamZoneServer.OutBytes) + + r.mb.RecordNginxUpstreamPeersRequestsDataPoint( + now, upstreamZoneServer.RequestCounter, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersReceivedDataPoint( + now, upstreamZoneServer.InBytes, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersSentDataPoint( + now, upstreamZoneServer.OutBytes, upstreamZoneName, upstreamZoneServer.Server, + ) + } + } } func (r *nginxScraper) recordVtsServerZoneTrafficStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { From 00acd3b16da6a48808795468c3a3ed84cb3bc76f Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Thu, 14 Nov 2024 17:41:50 +0530 Subject: [PATCH 6/9] server config stats --- receiver/nginxreceiver/documentation.md | 120 ++++ receiver/nginxreceiver/go.mod | 1 - receiver/nginxreceiver/go.sum | 2 - .../internal/metadata/generated_config.go | 70 ++- .../metadata/generated_config_test.go | 92 +-- .../internal/metadata/generated_metrics.go | 584 ++++++++++++++++-- .../metadata/generated_metrics_test.go | 186 ++++++ .../internal/metadata/testdata/config.yaml | 32 + receiver/nginxreceiver/metadata.yaml | 104 +++- receiver/nginxreceiver/scraper.go | 46 +- receiver/nginxreceiver/vts_stats.go | 2 +- 11 files changed, 1120 insertions(+), 119 deletions(-) diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index d70a21f09a08..8ca02722b47b 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -180,6 +180,36 @@ Bytes sent by server zones | ---- | ----------- | ------ | | serverzone_name | The name of serverzone | Any Str | +### nginx.upstream.peers.backup + +Whether upstream server is a backup server + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {state} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.health_checks.last_passed + +Boolean indicating if the last health check request was successful and passed tests. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {status} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + ### nginx.upstream.peers.received Bytes received from upstream servers @@ -225,6 +255,81 @@ The average time to receive the last byte of data from this server. | upstream_block_name | The name of the upstream block | Any Str | | upstream_peer_address | The address f the upstream server | Any Str | +### nginx.upstream.peers.responses.1xx + +Number of responses from upstream with 1xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.2xx + +Number of responses from upstream with 2xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.3xx + +Number of responses from upstream with 3xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.4xx + +Number of responses from upstream with 4xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.5xx + +Number of responses from upstream with 5xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + ### nginx.upstream.peers.sent Bytes sent from upstream servers @@ -239,3 +344,18 @@ Bytes sent from upstream servers | ---- | ----------- | ------ | | upstream_block_name | The name of the upstream block | Any Str | | upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.weight + +Weight of upstream server + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| weight | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | diff --git a/receiver/nginxreceiver/go.mod b/receiver/nginxreceiver/go.mod index bd70ddd1ed18..9cc55c4fe047 100644 --- a/receiver/nginxreceiver/go.mod +++ b/receiver/nginxreceiver/go.mod @@ -4,7 +4,6 @@ go 1.22.0 require ( github.com/google/go-cmp v0.6.0 - github.com/nginxinc/nginx-prometheus-exporter v0.11.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 diff --git a/receiver/nginxreceiver/go.sum b/receiver/nginxreceiver/go.sum index 6ce7b9ede932..6df42f1dd3c5 100644 --- a/receiver/nginxreceiver/go.sum +++ b/receiver/nginxreceiver/go.sum @@ -105,8 +105,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/nginxinc/nginx-prometheus-exporter v0.11.0 h1:21xjnqNgxtni2jDgAQ90bl15uDnrTreO9sIlu1YsX/U= -github.com/nginxinc/nginx-prometheus-exporter v0.11.0/go.mod h1:GdyHnWAb8q8OW1Pssrrqbcqra0SH0Vn6UXICMmyWkw8= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= diff --git a/receiver/nginxreceiver/internal/metadata/generated_config.go b/receiver/nginxreceiver/internal/metadata/generated_config.go index a297a4130c21..d9df76607844 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config.go @@ -27,25 +27,33 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for nginx metrics. type MetricsConfig struct { - NginxConnectionsAccepted MetricConfig `mapstructure:"nginx.connections_accepted"` - NginxConnectionsCurrent MetricConfig `mapstructure:"nginx.connections_current"` - NginxConnectionsHandled MetricConfig `mapstructure:"nginx.connections_handled"` - NginxLoadTimestamp MetricConfig `mapstructure:"nginx.load_timestamp"` - NginxNetReading MetricConfig `mapstructure:"nginx.net.reading"` - NginxNetWaiting MetricConfig `mapstructure:"nginx.net.waiting"` - NginxNetWriting MetricConfig `mapstructure:"nginx.net.writing"` - NginxRequests MetricConfig `mapstructure:"nginx.requests"` - NginxServerZoneReceived MetricConfig `mapstructure:"nginx.server_zone.received"` - NginxServerZoneResponses1xx MetricConfig `mapstructure:"nginx.server_zone.responses.1xx"` - NginxServerZoneResponses2xx MetricConfig `mapstructure:"nginx.server_zone.responses.2xx"` - NginxServerZoneResponses3xx MetricConfig `mapstructure:"nginx.server_zone.responses.3xx"` - NginxServerZoneResponses4xx MetricConfig `mapstructure:"nginx.server_zone.responses.4xx"` - NginxServerZoneResponses5xx MetricConfig `mapstructure:"nginx.server_zone.responses.5xx"` - NginxServerZoneSent MetricConfig `mapstructure:"nginx.server_zone.sent"` - NginxUpstreamPeersReceived MetricConfig `mapstructure:"nginx.upstream.peers.received"` - NginxUpstreamPeersRequests MetricConfig `mapstructure:"nginx.upstream.peers.requests"` - NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` - NginxUpstreamPeersSent MetricConfig `mapstructure:"nginx.upstream.peers.sent"` + NginxConnectionsAccepted MetricConfig `mapstructure:"nginx.connections_accepted"` + NginxConnectionsCurrent MetricConfig `mapstructure:"nginx.connections_current"` + NginxConnectionsHandled MetricConfig `mapstructure:"nginx.connections_handled"` + NginxLoadTimestamp MetricConfig `mapstructure:"nginx.load_timestamp"` + NginxNetReading MetricConfig `mapstructure:"nginx.net.reading"` + NginxNetWaiting MetricConfig `mapstructure:"nginx.net.waiting"` + NginxNetWriting MetricConfig `mapstructure:"nginx.net.writing"` + NginxRequests MetricConfig `mapstructure:"nginx.requests"` + NginxServerZoneReceived MetricConfig `mapstructure:"nginx.server_zone.received"` + NginxServerZoneResponses1xx MetricConfig `mapstructure:"nginx.server_zone.responses.1xx"` + NginxServerZoneResponses2xx MetricConfig `mapstructure:"nginx.server_zone.responses.2xx"` + NginxServerZoneResponses3xx MetricConfig `mapstructure:"nginx.server_zone.responses.3xx"` + NginxServerZoneResponses4xx MetricConfig `mapstructure:"nginx.server_zone.responses.4xx"` + NginxServerZoneResponses5xx MetricConfig `mapstructure:"nginx.server_zone.responses.5xx"` + NginxServerZoneSent MetricConfig `mapstructure:"nginx.server_zone.sent"` + NginxUpstreamPeersBackup MetricConfig `mapstructure:"nginx.upstream.peers.backup"` + NginxUpstreamPeersHealthChecksLastPassed MetricConfig `mapstructure:"nginx.upstream.peers.health_checks.last_passed"` + NginxUpstreamPeersReceived MetricConfig `mapstructure:"nginx.upstream.peers.received"` + NginxUpstreamPeersRequests MetricConfig `mapstructure:"nginx.upstream.peers.requests"` + NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` + NginxUpstreamPeersResponses1xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.1xx"` + NginxUpstreamPeersResponses2xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.2xx"` + NginxUpstreamPeersResponses3xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.3xx"` + NginxUpstreamPeersResponses4xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.4xx"` + NginxUpstreamPeersResponses5xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.5xx"` + NginxUpstreamPeersSent MetricConfig `mapstructure:"nginx.upstream.peers.sent"` + NginxUpstreamPeersWeight MetricConfig `mapstructure:"nginx.upstream.peers.weight"` } func DefaultMetricsConfig() MetricsConfig { @@ -95,6 +103,12 @@ func DefaultMetricsConfig() MetricsConfig { NginxServerZoneSent: MetricConfig{ Enabled: true, }, + NginxUpstreamPeersBackup: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersHealthChecksLastPassed: MetricConfig{ + Enabled: true, + }, NginxUpstreamPeersReceived: MetricConfig{ Enabled: true, }, @@ -104,9 +118,27 @@ func DefaultMetricsConfig() MetricsConfig { NginxUpstreamPeersResponseTime: MetricConfig{ Enabled: true, }, + NginxUpstreamPeersResponses1xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses2xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses3xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses4xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses5xx: MetricConfig{ + Enabled: true, + }, NginxUpstreamPeersSent: MetricConfig{ Enabled: true, }, + NginxUpstreamPeersWeight: MetricConfig{ + Enabled: true, + }, } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index 8d3c8f36faab..e442b9485b60 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -25,25 +25,33 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - NginxConnectionsAccepted: MetricConfig{Enabled: true}, - NginxConnectionsCurrent: MetricConfig{Enabled: true}, - NginxConnectionsHandled: MetricConfig{Enabled: true}, - NginxLoadTimestamp: MetricConfig{Enabled: true}, - NginxNetReading: MetricConfig{Enabled: true}, - NginxNetWaiting: MetricConfig{Enabled: true}, - NginxNetWriting: MetricConfig{Enabled: true}, - NginxRequests: MetricConfig{Enabled: true}, - NginxServerZoneReceived: MetricConfig{Enabled: true}, - NginxServerZoneResponses1xx: MetricConfig{Enabled: true}, - NginxServerZoneResponses2xx: MetricConfig{Enabled: true}, - NginxServerZoneResponses3xx: MetricConfig{Enabled: true}, - NginxServerZoneResponses4xx: MetricConfig{Enabled: true}, - NginxServerZoneResponses5xx: MetricConfig{Enabled: true}, - NginxServerZoneSent: MetricConfig{Enabled: true}, - NginxUpstreamPeersReceived: MetricConfig{Enabled: true}, - NginxUpstreamPeersRequests: MetricConfig{Enabled: true}, - NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, - NginxUpstreamPeersSent: MetricConfig{Enabled: true}, + NginxConnectionsAccepted: MetricConfig{Enabled: true}, + NginxConnectionsCurrent: MetricConfig{Enabled: true}, + NginxConnectionsHandled: MetricConfig{Enabled: true}, + NginxLoadTimestamp: MetricConfig{Enabled: true}, + NginxNetReading: MetricConfig{Enabled: true}, + NginxNetWaiting: MetricConfig{Enabled: true}, + NginxNetWriting: MetricConfig{Enabled: true}, + NginxRequests: MetricConfig{Enabled: true}, + NginxServerZoneReceived: MetricConfig{Enabled: true}, + NginxServerZoneResponses1xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses2xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses3xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses4xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses5xx: MetricConfig{Enabled: true}, + NginxServerZoneSent: MetricConfig{Enabled: true}, + NginxUpstreamPeersBackup: MetricConfig{Enabled: true}, + NginxUpstreamPeersHealthChecksLastPassed: MetricConfig{Enabled: true}, + NginxUpstreamPeersReceived: MetricConfig{Enabled: true}, + NginxUpstreamPeersRequests: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses1xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses2xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses3xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses4xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses5xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersSent: MetricConfig{Enabled: true}, + NginxUpstreamPeersWeight: MetricConfig{Enabled: true}, }, }, }, @@ -51,25 +59,33 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - NginxConnectionsAccepted: MetricConfig{Enabled: false}, - NginxConnectionsCurrent: MetricConfig{Enabled: false}, - NginxConnectionsHandled: MetricConfig{Enabled: false}, - NginxLoadTimestamp: MetricConfig{Enabled: false}, - NginxNetReading: MetricConfig{Enabled: false}, - NginxNetWaiting: MetricConfig{Enabled: false}, - NginxNetWriting: MetricConfig{Enabled: false}, - NginxRequests: MetricConfig{Enabled: false}, - NginxServerZoneReceived: MetricConfig{Enabled: false}, - NginxServerZoneResponses1xx: MetricConfig{Enabled: false}, - NginxServerZoneResponses2xx: MetricConfig{Enabled: false}, - NginxServerZoneResponses3xx: MetricConfig{Enabled: false}, - NginxServerZoneResponses4xx: MetricConfig{Enabled: false}, - NginxServerZoneResponses5xx: MetricConfig{Enabled: false}, - NginxServerZoneSent: MetricConfig{Enabled: false}, - NginxUpstreamPeersReceived: MetricConfig{Enabled: false}, - NginxUpstreamPeersRequests: MetricConfig{Enabled: false}, - NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, - NginxUpstreamPeersSent: MetricConfig{Enabled: false}, + NginxConnectionsAccepted: MetricConfig{Enabled: false}, + NginxConnectionsCurrent: MetricConfig{Enabled: false}, + NginxConnectionsHandled: MetricConfig{Enabled: false}, + NginxLoadTimestamp: MetricConfig{Enabled: false}, + NginxNetReading: MetricConfig{Enabled: false}, + NginxNetWaiting: MetricConfig{Enabled: false}, + NginxNetWriting: MetricConfig{Enabled: false}, + NginxRequests: MetricConfig{Enabled: false}, + NginxServerZoneReceived: MetricConfig{Enabled: false}, + NginxServerZoneResponses1xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses2xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses3xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses4xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses5xx: MetricConfig{Enabled: false}, + NginxServerZoneSent: MetricConfig{Enabled: false}, + NginxUpstreamPeersBackup: MetricConfig{Enabled: false}, + NginxUpstreamPeersHealthChecksLastPassed: MetricConfig{Enabled: false}, + NginxUpstreamPeersReceived: MetricConfig{Enabled: false}, + NginxUpstreamPeersRequests: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses1xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses2xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses3xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses4xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses5xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersSent: MetricConfig{Enabled: false}, + NginxUpstreamPeersWeight: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index 15f7a3167f00..b118663ad195 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -808,6 +808,110 @@ func newMetricNginxServerZoneSent(cfg MetricConfig) metricNginxServerZoneSent { return m } +type metricNginxUpstreamPeersBackup struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.backup metric with initial data. +func (m *metricNginxUpstreamPeersBackup) init() { + m.data.SetName("nginx.upstream.peers.backup") + m.data.SetDescription("Whether upstream server is a backup server") + m.data.SetUnit("{state}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersBackup) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersBackup) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersBackup) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersBackup(cfg MetricConfig) metricNginxUpstreamPeersBackup { + m := metricNginxUpstreamPeersBackup{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersHealthChecksLastPassed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.health_checks.last_passed metric with initial data. +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) init() { + m.data.SetName("nginx.upstream.peers.health_checks.last_passed") + m.data.SetDescription("Boolean indicating if the last health check request was successful and passed tests.") + m.data.SetUnit("{status}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersHealthChecksLastPassed(cfg MetricConfig) metricNginxUpstreamPeersHealthChecksLastPassed { + m := metricNginxUpstreamPeersHealthChecksLastPassed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxUpstreamPeersReceived struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -968,6 +1072,276 @@ func newMetricNginxUpstreamPeersResponseTime(cfg MetricConfig) metricNginxUpstre return m } +type metricNginxUpstreamPeersResponses1xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.1xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses1xx) init() { + m.data.SetName("nginx.upstream.peers.responses.1xx") + m.data.SetDescription("Number of responses from upstream with 1xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses1xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses1xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses1xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses1xx(cfg MetricConfig) metricNginxUpstreamPeersResponses1xx { + m := metricNginxUpstreamPeersResponses1xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses2xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.2xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses2xx) init() { + m.data.SetName("nginx.upstream.peers.responses.2xx") + m.data.SetDescription("Number of responses from upstream with 2xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses2xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses2xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses2xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses2xx(cfg MetricConfig) metricNginxUpstreamPeersResponses2xx { + m := metricNginxUpstreamPeersResponses2xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses3xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.3xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses3xx) init() { + m.data.SetName("nginx.upstream.peers.responses.3xx") + m.data.SetDescription("Number of responses from upstream with 3xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses3xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses3xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses3xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses3xx(cfg MetricConfig) metricNginxUpstreamPeersResponses3xx { + m := metricNginxUpstreamPeersResponses3xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses4xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.4xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses4xx) init() { + m.data.SetName("nginx.upstream.peers.responses.4xx") + m.data.SetDescription("Number of responses from upstream with 4xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses4xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses4xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses4xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses4xx(cfg MetricConfig) metricNginxUpstreamPeersResponses4xx { + m := metricNginxUpstreamPeersResponses4xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses5xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.5xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses5xx) init() { + m.data.SetName("nginx.upstream.peers.responses.5xx") + m.data.SetDescription("Number of responses from upstream with 5xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses5xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses5xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses5xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses5xx(cfg MetricConfig) metricNginxUpstreamPeersResponses5xx { + m := metricNginxUpstreamPeersResponses5xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxUpstreamPeersSent struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1022,33 +1396,93 @@ func newMetricNginxUpstreamPeersSent(cfg MetricConfig) metricNginxUpstreamPeersS return m } +type metricNginxUpstreamPeersWeight struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.weight metric with initial data. +func (m *metricNginxUpstreamPeersWeight) init() { + m.data.SetName("nginx.upstream.peers.weight") + m.data.SetDescription("Weight of upstream server") + m.data.SetUnit("weight") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersWeight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersWeight) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersWeight) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersWeight(cfg MetricConfig) metricNginxUpstreamPeersWeight { + m := metricNginxUpstreamPeersWeight{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - metricNginxConnectionsAccepted metricNginxConnectionsAccepted - metricNginxConnectionsCurrent metricNginxConnectionsCurrent - metricNginxConnectionsHandled metricNginxConnectionsHandled - metricNginxLoadTimestamp metricNginxLoadTimestamp - metricNginxNetReading metricNginxNetReading - metricNginxNetWaiting metricNginxNetWaiting - metricNginxNetWriting metricNginxNetWriting - metricNginxRequests metricNginxRequests - metricNginxServerZoneReceived metricNginxServerZoneReceived - metricNginxServerZoneResponses1xx metricNginxServerZoneResponses1xx - metricNginxServerZoneResponses2xx metricNginxServerZoneResponses2xx - metricNginxServerZoneResponses3xx metricNginxServerZoneResponses3xx - metricNginxServerZoneResponses4xx metricNginxServerZoneResponses4xx - metricNginxServerZoneResponses5xx metricNginxServerZoneResponses5xx - metricNginxServerZoneSent metricNginxServerZoneSent - metricNginxUpstreamPeersReceived metricNginxUpstreamPeersReceived - metricNginxUpstreamPeersRequests metricNginxUpstreamPeersRequests - metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime - metricNginxUpstreamPeersSent metricNginxUpstreamPeersSent + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + metricNginxConnectionsAccepted metricNginxConnectionsAccepted + metricNginxConnectionsCurrent metricNginxConnectionsCurrent + metricNginxConnectionsHandled metricNginxConnectionsHandled + metricNginxLoadTimestamp metricNginxLoadTimestamp + metricNginxNetReading metricNginxNetReading + metricNginxNetWaiting metricNginxNetWaiting + metricNginxNetWriting metricNginxNetWriting + metricNginxRequests metricNginxRequests + metricNginxServerZoneReceived metricNginxServerZoneReceived + metricNginxServerZoneResponses1xx metricNginxServerZoneResponses1xx + metricNginxServerZoneResponses2xx metricNginxServerZoneResponses2xx + metricNginxServerZoneResponses3xx metricNginxServerZoneResponses3xx + metricNginxServerZoneResponses4xx metricNginxServerZoneResponses4xx + metricNginxServerZoneResponses5xx metricNginxServerZoneResponses5xx + metricNginxServerZoneSent metricNginxServerZoneSent + metricNginxUpstreamPeersBackup metricNginxUpstreamPeersBackup + metricNginxUpstreamPeersHealthChecksLastPassed metricNginxUpstreamPeersHealthChecksLastPassed + metricNginxUpstreamPeersReceived metricNginxUpstreamPeersReceived + metricNginxUpstreamPeersRequests metricNginxUpstreamPeersRequests + metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime + metricNginxUpstreamPeersResponses1xx metricNginxUpstreamPeersResponses1xx + metricNginxUpstreamPeersResponses2xx metricNginxUpstreamPeersResponses2xx + metricNginxUpstreamPeersResponses3xx metricNginxUpstreamPeersResponses3xx + metricNginxUpstreamPeersResponses4xx metricNginxUpstreamPeersResponses4xx + metricNginxUpstreamPeersResponses5xx metricNginxUpstreamPeersResponses5xx + metricNginxUpstreamPeersSent metricNginxUpstreamPeersSent + metricNginxUpstreamPeersWeight metricNginxUpstreamPeersWeight } // MetricBuilderOption applies changes to default metrics builder. @@ -1071,29 +1505,37 @@ func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(mbc.Metrics.NginxConnectionsAccepted), - metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(mbc.Metrics.NginxConnectionsCurrent), - metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(mbc.Metrics.NginxConnectionsHandled), - metricNginxLoadTimestamp: newMetricNginxLoadTimestamp(mbc.Metrics.NginxLoadTimestamp), - metricNginxNetReading: newMetricNginxNetReading(mbc.Metrics.NginxNetReading), - metricNginxNetWaiting: newMetricNginxNetWaiting(mbc.Metrics.NginxNetWaiting), - metricNginxNetWriting: newMetricNginxNetWriting(mbc.Metrics.NginxNetWriting), - metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), - metricNginxServerZoneReceived: newMetricNginxServerZoneReceived(mbc.Metrics.NginxServerZoneReceived), - metricNginxServerZoneResponses1xx: newMetricNginxServerZoneResponses1xx(mbc.Metrics.NginxServerZoneResponses1xx), - metricNginxServerZoneResponses2xx: newMetricNginxServerZoneResponses2xx(mbc.Metrics.NginxServerZoneResponses2xx), - metricNginxServerZoneResponses3xx: newMetricNginxServerZoneResponses3xx(mbc.Metrics.NginxServerZoneResponses3xx), - metricNginxServerZoneResponses4xx: newMetricNginxServerZoneResponses4xx(mbc.Metrics.NginxServerZoneResponses4xx), - metricNginxServerZoneResponses5xx: newMetricNginxServerZoneResponses5xx(mbc.Metrics.NginxServerZoneResponses5xx), - metricNginxServerZoneSent: newMetricNginxServerZoneSent(mbc.Metrics.NginxServerZoneSent), - metricNginxUpstreamPeersReceived: newMetricNginxUpstreamPeersReceived(mbc.Metrics.NginxUpstreamPeersReceived), - metricNginxUpstreamPeersRequests: newMetricNginxUpstreamPeersRequests(mbc.Metrics.NginxUpstreamPeersRequests), - metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), - metricNginxUpstreamPeersSent: newMetricNginxUpstreamPeersSent(mbc.Metrics.NginxUpstreamPeersSent), + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(mbc.Metrics.NginxConnectionsAccepted), + metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(mbc.Metrics.NginxConnectionsCurrent), + metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(mbc.Metrics.NginxConnectionsHandled), + metricNginxLoadTimestamp: newMetricNginxLoadTimestamp(mbc.Metrics.NginxLoadTimestamp), + metricNginxNetReading: newMetricNginxNetReading(mbc.Metrics.NginxNetReading), + metricNginxNetWaiting: newMetricNginxNetWaiting(mbc.Metrics.NginxNetWaiting), + metricNginxNetWriting: newMetricNginxNetWriting(mbc.Metrics.NginxNetWriting), + metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), + metricNginxServerZoneReceived: newMetricNginxServerZoneReceived(mbc.Metrics.NginxServerZoneReceived), + metricNginxServerZoneResponses1xx: newMetricNginxServerZoneResponses1xx(mbc.Metrics.NginxServerZoneResponses1xx), + metricNginxServerZoneResponses2xx: newMetricNginxServerZoneResponses2xx(mbc.Metrics.NginxServerZoneResponses2xx), + metricNginxServerZoneResponses3xx: newMetricNginxServerZoneResponses3xx(mbc.Metrics.NginxServerZoneResponses3xx), + metricNginxServerZoneResponses4xx: newMetricNginxServerZoneResponses4xx(mbc.Metrics.NginxServerZoneResponses4xx), + metricNginxServerZoneResponses5xx: newMetricNginxServerZoneResponses5xx(mbc.Metrics.NginxServerZoneResponses5xx), + metricNginxServerZoneSent: newMetricNginxServerZoneSent(mbc.Metrics.NginxServerZoneSent), + metricNginxUpstreamPeersBackup: newMetricNginxUpstreamPeersBackup(mbc.Metrics.NginxUpstreamPeersBackup), + metricNginxUpstreamPeersHealthChecksLastPassed: newMetricNginxUpstreamPeersHealthChecksLastPassed(mbc.Metrics.NginxUpstreamPeersHealthChecksLastPassed), + metricNginxUpstreamPeersReceived: newMetricNginxUpstreamPeersReceived(mbc.Metrics.NginxUpstreamPeersReceived), + metricNginxUpstreamPeersRequests: newMetricNginxUpstreamPeersRequests(mbc.Metrics.NginxUpstreamPeersRequests), + metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), + metricNginxUpstreamPeersResponses1xx: newMetricNginxUpstreamPeersResponses1xx(mbc.Metrics.NginxUpstreamPeersResponses1xx), + metricNginxUpstreamPeersResponses2xx: newMetricNginxUpstreamPeersResponses2xx(mbc.Metrics.NginxUpstreamPeersResponses2xx), + metricNginxUpstreamPeersResponses3xx: newMetricNginxUpstreamPeersResponses3xx(mbc.Metrics.NginxUpstreamPeersResponses3xx), + metricNginxUpstreamPeersResponses4xx: newMetricNginxUpstreamPeersResponses4xx(mbc.Metrics.NginxUpstreamPeersResponses4xx), + metricNginxUpstreamPeersResponses5xx: newMetricNginxUpstreamPeersResponses5xx(mbc.Metrics.NginxUpstreamPeersResponses5xx), + metricNginxUpstreamPeersSent: newMetricNginxUpstreamPeersSent(mbc.Metrics.NginxUpstreamPeersSent), + metricNginxUpstreamPeersWeight: newMetricNginxUpstreamPeersWeight(mbc.Metrics.NginxUpstreamPeersWeight), } for _, op := range options { @@ -1174,10 +1616,18 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxServerZoneResponses4xx.emit(ils.Metrics()) mb.metricNginxServerZoneResponses5xx.emit(ils.Metrics()) mb.metricNginxServerZoneSent.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersBackup.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersHealthChecksLastPassed.emit(ils.Metrics()) mb.metricNginxUpstreamPeersReceived.emit(ils.Metrics()) mb.metricNginxUpstreamPeersRequests.emit(ils.Metrics()) mb.metricNginxUpstreamPeersResponseTime.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses1xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses2xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses3xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses4xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses5xx.emit(ils.Metrics()) mb.metricNginxUpstreamPeersSent.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersWeight.emit(ils.Metrics()) for _, op := range options { op.apply(rm) @@ -1274,6 +1724,16 @@ func (mb *MetricsBuilder) RecordNginxServerZoneSentDataPoint(ts pcommon.Timestam mb.metricNginxServerZoneSent.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) } +// RecordNginxUpstreamPeersBackupDataPoint adds a data point to nginx.upstream.peers.backup metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersBackupDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersBackup.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint adds a data point to nginx.upstream.peers.health_checks.last_passed metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersHealthChecksLastPassed.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + // RecordNginxUpstreamPeersReceivedDataPoint adds a data point to nginx.upstream.peers.received metric. func (mb *MetricsBuilder) RecordNginxUpstreamPeersReceivedDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { mb.metricNginxUpstreamPeersReceived.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) @@ -1289,11 +1749,41 @@ func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponseTimeDataPoint(ts pcomm mb.metricNginxUpstreamPeersResponseTime.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) } +// RecordNginxUpstreamPeersResponses1xxDataPoint adds a data point to nginx.upstream.peers.responses.1xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses1xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses1xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses2xxDataPoint adds a data point to nginx.upstream.peers.responses.2xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses2xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses2xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses3xxDataPoint adds a data point to nginx.upstream.peers.responses.3xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses3xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses3xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses4xxDataPoint adds a data point to nginx.upstream.peers.responses.4xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses4xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses4xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses5xxDataPoint adds a data point to nginx.upstream.peers.responses.5xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses5xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses5xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + // RecordNginxUpstreamPeersSentDataPoint adds a data point to nginx.upstream.peers.sent metric. func (mb *MetricsBuilder) RecordNginxUpstreamPeersSentDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { mb.metricNginxUpstreamPeersSent.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) } +// RecordNginxUpstreamPeersWeightDataPoint adds a data point to nginx.upstream.peers.weight metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersWeightDataPoint(ts pcommon.Timestamp, val float64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersWeight.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index 1a7160c510bb..b1a6ca6924e0 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -119,6 +119,14 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxServerZoneSentDataPoint(ts, 1, "serverzone_name-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersBackupDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxUpstreamPeersReceivedDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") @@ -131,10 +139,34 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxUpstreamPeersResponseTimeDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses1xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses2xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses3xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses4xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses5xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxUpstreamPeersSentDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersWeightDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) @@ -373,6 +405,42 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("serverzone_name") assert.True(t, ok) assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.upstream.peers.backup": + assert.False(t, validatedMetrics["nginx.upstream.peers.backup"], "Found a duplicate in the metrics slice: nginx.upstream.peers.backup") + validatedMetrics["nginx.upstream.peers.backup"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Whether upstream server is a backup server", ms.At(i).Description()) + assert.Equal(t, "{state}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.health_checks.last_passed": + assert.False(t, validatedMetrics["nginx.upstream.peers.health_checks.last_passed"], "Found a duplicate in the metrics slice: nginx.upstream.peers.health_checks.last_passed") + validatedMetrics["nginx.upstream.peers.health_checks.last_passed"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Boolean indicating if the last health check request was successful and passed tests.", ms.At(i).Description()) + assert.Equal(t, "{status}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) case "nginx.upstream.peers.received": assert.False(t, validatedMetrics["nginx.upstream.peers.received"], "Found a duplicate in the metrics slice: nginx.upstream.peers.received") validatedMetrics["nginx.upstream.peers.received"] = true @@ -431,6 +499,106 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok = dp.Attributes().Get("upstream_peer_address") assert.True(t, ok) assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.1xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.1xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.1xx") + validatedMetrics["nginx.upstream.peers.responses.1xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 1xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.2xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.2xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.2xx") + validatedMetrics["nginx.upstream.peers.responses.2xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 2xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.3xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.3xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.3xx") + validatedMetrics["nginx.upstream.peers.responses.3xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 3xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.4xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.4xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.4xx") + validatedMetrics["nginx.upstream.peers.responses.4xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 4xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.5xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.5xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.5xx") + validatedMetrics["nginx.upstream.peers.responses.5xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 5xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) case "nginx.upstream.peers.sent": assert.False(t, validatedMetrics["nginx.upstream.peers.sent"], "Found a duplicate in the metrics slice: nginx.upstream.peers.sent") validatedMetrics["nginx.upstream.peers.sent"] = true @@ -451,6 +619,24 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok = dp.Attributes().Get("upstream_peer_address") assert.True(t, ok) assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.weight": + assert.False(t, validatedMetrics["nginx.upstream.peers.weight"], "Found a duplicate in the metrics slice: nginx.upstream.peers.weight") + validatedMetrics["nginx.upstream.peers.weight"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Weight of upstream server", ms.At(i).Description()) + assert.Equal(t, "weight", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) } } }) diff --git a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml index 6df5912e1437..3a896e7cdadd 100644 --- a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml @@ -31,14 +31,30 @@ all_set: enabled: true nginx.server_zone.sent: enabled: true + nginx.upstream.peers.backup: + enabled: true + nginx.upstream.peers.health_checks.last_passed: + enabled: true nginx.upstream.peers.received: enabled: true nginx.upstream.peers.requests: enabled: true nginx.upstream.peers.response_time: enabled: true + nginx.upstream.peers.responses.1xx: + enabled: true + nginx.upstream.peers.responses.2xx: + enabled: true + nginx.upstream.peers.responses.3xx: + enabled: true + nginx.upstream.peers.responses.4xx: + enabled: true + nginx.upstream.peers.responses.5xx: + enabled: true nginx.upstream.peers.sent: enabled: true + nginx.upstream.peers.weight: + enabled: true none_set: metrics: nginx.connections_accepted: @@ -71,11 +87,27 @@ none_set: enabled: false nginx.server_zone.sent: enabled: false + nginx.upstream.peers.backup: + enabled: false + nginx.upstream.peers.health_checks.last_passed: + enabled: false nginx.upstream.peers.received: enabled: false nginx.upstream.peers.requests: enabled: false nginx.upstream.peers.response_time: enabled: false + nginx.upstream.peers.responses.1xx: + enabled: false + nginx.upstream.peers.responses.2xx: + enabled: false + nginx.upstream.peers.responses.3xx: + enabled: false + nginx.upstream.peers.responses.4xx: + enabled: false + nginx.upstream.peers.responses.5xx: + enabled: false nginx.upstream.peers.sent: enabled: false + nginx.upstream.peers.weight: + enabled: false diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 0a184b8a681d..245508c9d00e 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -215,4 +215,106 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - unit: By \ No newline at end of file + unit: By + + ### Response Codes +# - `nginx.upstream.peers.responses.1xx` - Number of responses from upstream with 1xx status codes +# - `nginx.upstream.peers.responses.2xx` - Number of responses from upstream with 2xx status codes +# - `nginx.upstream.peers.responses.3xx` - Number of responses from upstream with 3xx status codes +# - `nginx.upstream.peers.responses.4xx` - Number of responses from upstream with 4xx status codes +# - `nginx.upstream.peers.responses.5xx` - Number of responses from upstream with 5xx status codes + + nginx.upstream.peers.responses.1xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 1xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.2xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 2xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.3xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 3xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.4xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 4xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.5xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 5xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + ### Server Configuration +# - `nginx.upstream.peers.weight` - Weight of upstream server +# - `nginx.upstream.peers.backup` - Whether upstream server is a backup server +# - `nginx.upstream.peers.health_checks.last_passed` - Status of last health check for upstream server + + nginx.upstream.peers.weight: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Weight of upstream server + gauge: + value_type: double + unit: weight + + nginx.upstream.peers.backup: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Whether upstream server is a backup server + gauge: + value_type: int + unit: "{state}" + + nginx.upstream.peers.health_checks.last_passed: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Boolean indicating if the last health check request was successful and passed tests. + gauge: + value_type: int + unit: "{status}" \ No newline at end of file diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 14e8c127fbe9..5ab3dfb3b762 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -96,18 +96,12 @@ func (r *nginxScraper) recordVtsStats(now pcommon.Timestamp, vtsStats *NginxVtsS r.recordVtsConnectionStats(now, vtsStats) r.recordVtsServerZoneResponseStats(now, vtsStats) r.recordVtsServerZoneTrafficStats(now, vtsStats) - r.recordVtsUpstreamRequestTrafficStats(now, vtsStats) + r.recordVtsUpstreamStats(now, vtsStats) } -func (r *nginxScraper) recordVtsUpstreamRequestTrafficStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { +func (r *nginxScraper) recordVtsUpstreamStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { for upstreamZoneName, upstreamZoneServers := range vtsStats.UpstreamZones { for _, upstreamZoneServer := range upstreamZoneServers { - // pp.Println(upstreamZoneName) - // pp.Println("Upstream Zone Server Address: ", upstreamZoneServer.Server) - // pp.Println("Reqs made to this server: ", upstreamZoneServer.RequestCounter) - // pp.Println("Bytes received by this server: ", upstreamZoneServer.InBytes) - // pp.Println("Bytes sent by this server: ", upstreamZoneServer.OutBytes) - r.mb.RecordNginxUpstreamPeersRequestsDataPoint( now, upstreamZoneServer.RequestCounter, upstreamZoneName, upstreamZoneServer.Server, ) @@ -117,6 +111,32 @@ func (r *nginxScraper) recordVtsUpstreamRequestTrafficStats(now pcommon.Timestam r.mb.RecordNginxUpstreamPeersSentDataPoint( now, upstreamZoneServer.OutBytes, upstreamZoneName, upstreamZoneServer.Server, ) + + r.mb.RecordNginxUpstreamPeersResponses1xxDataPoint( + now, upstreamZoneServer.Responses.Status1xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses2xxDataPoint( + now, upstreamZoneServer.Responses.Status2xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses3xxDataPoint( + now, upstreamZoneServer.Responses.Status3xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses4xxDataPoint( + now, upstreamZoneServer.Responses.Status4xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses5xxDataPoint( + now, upstreamZoneServer.Responses.Status5xx, upstreamZoneName, upstreamZoneServer.Server, + ) + + r.mb.RecordNginxUpstreamPeersWeightDataPoint( + now, upstreamZoneServer.Weight, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersBackupDataPoint( + now, int64(boolToInt(upstreamZoneServer.Backup)), upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint( + now, int64(boolToInt(upstreamZoneServer.Down)), upstreamZoneName, upstreamZoneServer.Server, + ) } } } @@ -160,8 +180,6 @@ func (r *nginxScraper) recordVtsConnectionStats(now pcommon.Timestamp, vtsStats func (r *nginxScraper) recordTimingStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { - // r.mb.RecordNginxLoadTimestampDataPoint(now, vtsStats.LoadMsec) - for upstreamZones, v := range vtsStats.UpstreamZones { for _, val := range v { pp.Println(val.Server) @@ -173,3 +191,11 @@ func (r *nginxScraper) recordTimingStats(now pcommon.Timestamp, vtsStats *NginxV } } } + +func boolToInt(bitSet bool) int8 { + var bitSetVar int8 + if bitSet { + bitSetVar = 1 + } + return bitSetVar +} diff --git a/receiver/nginxreceiver/vts_stats.go b/receiver/nginxreceiver/vts_stats.go index 0b7dd88d16bf..52eec4f9aa91 100644 --- a/receiver/nginxreceiver/vts_stats.go +++ b/receiver/nginxreceiver/vts_stats.go @@ -85,7 +85,7 @@ type UpstreamServer struct { ResponseMsec int64 `json:"responseMsec"` ResponseMsecs RequestMetrics `json:"responseMsecs"` ResponseBuckets RequestBuckets `json:"responseBuckets"` - Weight int `json:"weight"` + Weight float64 `json:"weight"` MaxFails int `json:"maxFails"` FailTimeout int `json:"failTimeout"` Backup bool `json:"backup"` From f9a0001683f1716a561320eadcbb6b5754dceefb Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Mon, 16 Dec 2024 11:32:07 +0530 Subject: [PATCH 7/9] Removed print statements' --- receiver/nginxreceiver/scraper.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 5ab3dfb3b762..3c0fd0fee7ff 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -14,7 +14,6 @@ import ( "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" - "github.com/k0kubun/pp" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver/internal/metadata" ) @@ -182,8 +181,6 @@ func (r *nginxScraper) recordTimingStats(now pcommon.Timestamp, vtsStats *NginxV for upstreamZones, v := range vtsStats.UpstreamZones { for _, val := range v { - pp.Println(val.Server) - pp.Println(val.ResponseMsec) r.mb.RecordNginxUpstreamPeersResponseTimeDataPoint( now, val.ResponseMsec, upstreamZones, val.Server, From 395f976faee23632fa14b60590713c4337cd06df Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Thu, 12 Dec 2024 14:21:25 +0530 Subject: [PATCH 8/9] some cleanup --- receiver/nginxreceiver/documentation.md | 18 ++++---- .../internal/metadata/generated_metrics.go | 42 +++++++++++-------- .../metadata/generated_metrics_test.go | 24 +++++++---- receiver/nginxreceiver/metadata.yaml | 41 ++++++------------ receiver/nginxreceiver/scraper.go | 3 -- 5 files changed, 60 insertions(+), 68 deletions(-) diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 8ca02722b47b..b83659e28c6a 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -114,9 +114,9 @@ The number of responses with 1xx status code. The number of responses with 2xx status code. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| response | Gauge | Int | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | #### Attributes @@ -142,9 +142,9 @@ The number of responses with 3xx status code. The number of responses with 4xx status code. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| response | Gauge | Int | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | #### Attributes @@ -156,9 +156,9 @@ The number of responses with 4xx status code. The number of responses with 5xx status code. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| response | Gauge | Int | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | #### Attributes diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index b118663ad195..f801ffd275c8 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -562,15 +562,17 @@ func (m *metricNginxServerZoneResponses2xx) init() { m.data.SetName("nginx.server_zone.responses.2xx") m.data.SetDescription("The number of responses with 2xx status code.") m.data.SetUnit("response") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricNginxServerZoneResponses2xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) @@ -579,14 +581,14 @@ func (m *metricNginxServerZoneResponses2xx) recordDataPoint(start pcommon.Timest // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricNginxServerZoneResponses2xx) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricNginxServerZoneResponses2xx) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() @@ -664,15 +666,17 @@ func (m *metricNginxServerZoneResponses4xx) init() { m.data.SetName("nginx.server_zone.responses.4xx") m.data.SetDescription("The number of responses with 4xx status code.") m.data.SetUnit("response") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricNginxServerZoneResponses4xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) @@ -681,14 +685,14 @@ func (m *metricNginxServerZoneResponses4xx) recordDataPoint(start pcommon.Timest // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricNginxServerZoneResponses4xx) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricNginxServerZoneResponses4xx) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() @@ -715,15 +719,17 @@ func (m *metricNginxServerZoneResponses5xx) init() { m.data.SetName("nginx.server_zone.responses.5xx") m.data.SetDescription("The number of responses with 5xx status code.") m.data.SetUnit("response") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricNginxServerZoneResponses5xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) @@ -732,14 +738,14 @@ func (m *metricNginxServerZoneResponses5xx) recordDataPoint(start pcommon.Timest // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricNginxServerZoneResponses5xx) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricNginxServerZoneResponses5xx) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index b1a6ca6924e0..f82f6349ff29 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -331,11 +331,13 @@ func TestMetricsBuilder(t *testing.T) { case "nginx.server_zone.responses.2xx": assert.False(t, validatedMetrics["nginx.server_zone.responses.2xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.2xx") validatedMetrics["nginx.server_zone.responses.2xx"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses with 2xx status code.", ms.At(i).Description()) assert.Equal(t, "response", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) @@ -361,11 +363,13 @@ func TestMetricsBuilder(t *testing.T) { case "nginx.server_zone.responses.4xx": assert.False(t, validatedMetrics["nginx.server_zone.responses.4xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.4xx") validatedMetrics["nginx.server_zone.responses.4xx"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses with 4xx status code.", ms.At(i).Description()) assert.Equal(t, "response", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) @@ -376,11 +380,13 @@ func TestMetricsBuilder(t *testing.T) { case "nginx.server_zone.responses.5xx": assert.False(t, validatedMetrics["nginx.server_zone.responses.5xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.5xx") validatedMetrics["nginx.server_zone.responses.5xx"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses with 5xx status code.", ms.At(i).Description()) assert.Equal(t, "response", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 245508c9d00e..8deecac1fc7d 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -36,7 +36,7 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - attributes: [] + attributes: [] nginx.connections_accepted: enabled: true description: The total number of accepted client connections @@ -119,11 +119,12 @@ metrics: attributes: - serverzone_name description: The number of responses with 2xx status code. - gauge: + sum: value_type: int + monotonic: true + aggregation_temporality: cumulative unit: response - nginx.server_zone.responses.3xx: enabled: true attributes: @@ -139,21 +140,23 @@ metrics: attributes: - serverzone_name description: The number of responses with 4xx status code. - gauge: + sum: value_type: int + monotonic: true + aggregation_temporality: cumulative unit: response - nginx.server_zone.responses.5xx: enabled: true attributes: - serverzone_name description: The number of responses with 5xx status code. - gauge: + sum: value_type: int + monotonic: true + aggregation_temporality: cumulative unit: response - nginx.server_zone.received: enabled: true attributes: @@ -164,7 +167,6 @@ metrics: monotonic: true aggregation_temporality: cumulative unit: By - nginx.server_zone.sent: enabled: true attributes: @@ -175,12 +177,6 @@ metrics: monotonic: true aggregation_temporality: cumulative unit: By - - ### Request and Traffic -# - `nginx.upstream.peers.requests` - Number of requests made to upstream servers -# - `nginx.upstream.peers.received` - Bytes received from upstream servers -# - `nginx.upstream.peers.sent` - Bytes sent to upstream servers - nginx.upstream.peers.requests: enabled: true attributes: @@ -192,7 +188,6 @@ metrics: monotonic: true aggregation_temporality: cumulative unit: requests - nginx.upstream.peers.received: enabled: true attributes: @@ -217,13 +212,6 @@ metrics: aggregation_temporality: cumulative unit: By - ### Response Codes -# - `nginx.upstream.peers.responses.1xx` - Number of responses from upstream with 1xx status codes -# - `nginx.upstream.peers.responses.2xx` - Number of responses from upstream with 2xx status codes -# - `nginx.upstream.peers.responses.3xx` - Number of responses from upstream with 3xx status codes -# - `nginx.upstream.peers.responses.4xx` - Number of responses from upstream with 4xx status codes -# - `nginx.upstream.peers.responses.5xx` - Number of responses from upstream with 5xx status codes - nginx.upstream.peers.responses.1xx: enabled: true attributes: @@ -247,7 +235,7 @@ metrics: monotonic: true aggregation_temporality: cumulative unit: responses - + nginx.upstream.peers.responses.3xx: enabled: true attributes: @@ -271,7 +259,7 @@ metrics: monotonic: true aggregation_temporality: cumulative unit: responses - + nginx.upstream.peers.responses.5xx: enabled: true attributes: @@ -283,11 +271,6 @@ metrics: monotonic: true aggregation_temporality: cumulative unit: responses - - ### Server Configuration -# - `nginx.upstream.peers.weight` - Weight of upstream server -# - `nginx.upstream.peers.backup` - Whether upstream server is a backup server -# - `nginx.upstream.peers.health_checks.last_passed` - Status of last health check for upstream server nginx.upstream.peers.weight: enabled: true diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 3c0fd0fee7ff..7f3f1ebc9ad4 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -49,7 +49,6 @@ func (r *nginxScraper) start(ctx context.Context, host component.Host) error { } func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { - // Init client in scrape method in case there are transient errors in the constructor. if r.client == nil { var err error r.client, err = NewNginxClient(r.httpClient, r.cfg.ClientConfig.Endpoint, r.cfg.VTSEndpoint) @@ -73,8 +72,6 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { return pmetric.Metrics{}, err } - // pp.Println(vtsStats) - now := pcommon.NewTimestampFromTime(time.Now()) r.recordVtsStats(now, vtsStats) From 0b93d5990c239f7d8dea4ffe404ece97e41295e0 Mon Sep 17 00:00:00 2001 From: naman47vyas Date: Mon, 16 Dec 2024 14:53:49 +0530 Subject: [PATCH 9/9] some response code metrics had incorrect type:gauge. Change it to sum --- receiver/nginxreceiver/README.md | 2 +- receiver/nginxreceiver/documentation.md | 12 +-- .../nginxreceiver/generated_component_test.go | 14 ++-- .../metadata/generated_config_test.go | 5 +- .../internal/metadata/generated_metrics.go | 82 ++++++++----------- .../metadata/generated_metrics_test.go | 36 ++++---- .../internal/metadata/generated_status.go | 3 +- receiver/nginxreceiver/metadata.yaml | 8 +- 8 files changed, 79 insertions(+), 83 deletions(-) diff --git a/receiver/nginxreceiver/README.md b/receiver/nginxreceiver/README.md index 23ba20634461..f2a5517c6eeb 100644 --- a/receiver/nginxreceiver/README.md +++ b/receiver/nginxreceiver/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fnginx%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fnginx) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fnginx%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fnginx) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@djaglowski](https://www.github.com/djaglowski) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index b83659e28c6a..b458bc070701 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -100,9 +100,9 @@ Bytes received by server zones The number of responses with 1xx status code. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| response | Gauge | Int | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | #### Attributes @@ -128,9 +128,9 @@ The number of responses with 2xx status code. The number of responses with 3xx status code. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| response | Gauge | Int | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | #### Attributes diff --git a/receiver/nginxreceiver/generated_component_test.go b/receiver/nginxreceiver/generated_component_test.go index 4b7536b3a88a..b64e62ed18b6 100644 --- a/receiver/nginxreceiver/generated_component_test.go +++ b/receiver/nginxreceiver/generated_component_test.go @@ -34,7 +34,7 @@ func TestComponentLifecycle(t *testing.T) { { name: "metrics", createFn: func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) { - return factory.CreateMetrics(ctx, set, cfg, consumertest.NewNop()) + return factory.CreateMetricsReceiver(ctx, set, cfg, consumertest.NewNop()) }, }, } @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index e442b9485b60..9403275ba450 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -93,8 +93,9 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index f801ffd275c8..02f3e7a2615b 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -511,15 +511,17 @@ func (m *metricNginxServerZoneResponses1xx) init() { m.data.SetName("nginx.server_zone.responses.1xx") m.data.SetDescription("The number of responses with 1xx status code.") m.data.SetUnit("response") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricNginxServerZoneResponses1xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) @@ -528,14 +530,14 @@ func (m *metricNginxServerZoneResponses1xx) recordDataPoint(start pcommon.Timest // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricNginxServerZoneResponses1xx) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricNginxServerZoneResponses1xx) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() @@ -615,15 +617,17 @@ func (m *metricNginxServerZoneResponses3xx) init() { m.data.SetName("nginx.server_zone.responses.3xx") m.data.SetDescription("The number of responses with 3xx status code.") m.data.SetUnit("response") - m.data.SetEmptyGauge() - m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricNginxServerZoneResponses3xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { if !m.config.Enabled { return } - dp := m.data.Gauge().DataPoints().AppendEmpty() + dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) @@ -632,14 +636,14 @@ func (m *metricNginxServerZoneResponses3xx) recordDataPoint(start pcommon.Timest // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricNginxServerZoneResponses3xx) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricNginxServerZoneResponses3xx) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() @@ -1491,25 +1495,17 @@ type MetricsBuilder struct { metricNginxUpstreamPeersWeight metricNginxUpstreamPeersWeight } -// MetricBuilderOption applies changes to default metrics builder. -type MetricBuilderOption interface { - apply(*MetricsBuilder) -} - -type metricBuilderOptionFunc func(mb *MetricsBuilder) - -func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { - mbof(mb) -} +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { - return metricBuilderOptionFunc(func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { mb.startTime = startTime - }) + } } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -1545,7 +1541,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op.apply(mb) + op(mb) } return mb } @@ -1558,28 +1554,20 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption interface { - apply(pmetric.ResourceMetrics) -} - -type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) - -func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { - rmof(rm) -} +type ResourceMetricsOption func(pmetric.ResourceMetrics) // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - }) + } } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -1593,7 +1581,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - }) + } } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -1601,7 +1589,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver") @@ -1635,8 +1623,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxUpstreamPeersSent.emit(ils.Metrics()) mb.metricNginxUpstreamPeersWeight.emit(ils.Metrics()) - for _, op := range options { - op.apply(rm) + for _, op := range rmo { + op(rm) } if ils.Metrics().Len() > 0 { @@ -1648,8 +1636,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(options...) +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -1792,9 +1780,9 @@ func (mb *MetricsBuilder) RecordNginxUpstreamPeersWeightDataPoint(ts pcommon.Tim // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op.apply(mb) + op(mb) } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index f82f6349ff29..c6e9c2377d23 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -170,7 +170,7 @@ func TestMetricsBuilder(t *testing.T) { res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if tt.expectEmpty { + if test.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -180,10 +180,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if tt.metricsSet == testDataSetDefault { + if test.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if tt.metricsSet == testDataSetAll { + if test.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -196,7 +196,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of accepted client connections", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -210,7 +210,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The current number of nginx connections by state", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.False(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -227,7 +227,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of handled connections. Generally, the parameter value is the same as nginx.connections_accepted unless some resource limits have been reached (for example, the worker_connections limit).", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -289,7 +289,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Total number of requests made to the server since it started", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -316,11 +316,13 @@ func TestMetricsBuilder(t *testing.T) { case "nginx.server_zone.responses.1xx": assert.False(t, validatedMetrics["nginx.server_zone.responses.1xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.1xx") validatedMetrics["nginx.server_zone.responses.1xx"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses with 1xx status code.", ms.At(i).Description()) assert.Equal(t, "response", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) @@ -348,11 +350,13 @@ func TestMetricsBuilder(t *testing.T) { case "nginx.server_zone.responses.3xx": assert.False(t, validatedMetrics["nginx.server_zone.responses.3xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.3xx") validatedMetrics["nginx.server_zone.responses.3xx"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses with 3xx status code.", ms.At(i).Description()) assert.Equal(t, "response", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) diff --git a/receiver/nginxreceiver/internal/metadata/generated_status.go b/receiver/nginxreceiver/internal/metadata/generated_status.go index acfba477b8fd..f4cd74a5e478 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_status.go +++ b/receiver/nginxreceiver/internal/metadata/generated_status.go @@ -7,8 +7,7 @@ import ( ) var ( - Type = component.MustNewType("nginx") - ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver" + Type = component.MustNewType("nginx") ) const ( diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 8deecac1fc7d..4d4b58b2f968 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -109,8 +109,10 @@ metrics: attributes: - serverzone_name description: The number of responses with 1xx status code. - gauge: + sum: value_type: int + monotonic: true + aggregation_temporality: cumulative unit: response @@ -130,8 +132,10 @@ metrics: attributes: - serverzone_name description: The number of responses with 3xx status code. - gauge: + sum: value_type: int + monotonic: true + aggregation_temporality: cumulative unit: response