diff --git a/receiver/nginxreceiver/README.md b/receiver/nginxreceiver/README.md index 23ba20634461..f2a5517c6eeb 100644 --- a/receiver/nginxreceiver/README.md +++ b/receiver/nginxreceiver/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fnginx%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fnginx) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fnginx%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fnginx) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@djaglowski](https://www.github.com/djaglowski) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/receiver/nginxreceiver/config.go b/receiver/nginxreceiver/config.go index 4a81800a208f..abf7f7a35647 100644 --- a/receiver/nginxreceiver/config.go +++ b/receiver/nginxreceiver/config.go @@ -14,4 +14,6 @@ type Config struct { scraperhelper.ControllerConfig `mapstructure:",squash"` confighttp.ClientConfig `mapstructure:",squash"` MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"` + + VTSEndpoint string `mapstructure:"vts_endpoint"` } diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 3dd32a196cde..b458bc070701 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -42,6 +42,38 @@ The total number of handled connections. Generally, the parameter value is the s | ---- | ----------- | ---------- | ----------------------- | --------- | | connections | Sum | Int | Cumulative | true | +### nginx.load_timestamp + +Time of the last reload of configuration (time since Epoch). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +### nginx.net.reading + +Current number of connections where NGINX is reading the request header + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| connections | Gauge | Int | + +### nginx.net.waiting + +Current number of connections where NGINX is waiting the response back to the client + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| connections | Gauge | Int | + +### nginx.net.writing + +Current number of connections where NGINX is writing the response back to the client + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| connections | Gauge | Int | + ### nginx.requests Total number of requests made to the server since it started @@ -49,3 +81,281 @@ Total number of requests made to the server since it started | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | | requests | Sum | Int | Cumulative | true | + +### nginx.server_zone.received + +Bytes received by server zones + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.1xx + +The number of responses with 1xx status code. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.2xx + +The number of responses with 2xx status code. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.3xx + +The number of responses with 3xx status code. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.4xx + +The number of responses with 4xx status code. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.responses.5xx + +The number of responses with 5xx status code. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| response | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.server_zone.sent + +Bytes sent by server zones + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| serverzone_name | The name of serverzone | Any Str | + +### nginx.upstream.peers.backup + +Whether upstream server is a backup server + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {state} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.health_checks.last_passed + +Boolean indicating if the last health check request was successful and passed tests. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {status} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.received + +Bytes received from upstream servers + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.requests + +Number of requests made to upstream servers + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| requests | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.response_time + +The average time to receive the last byte of data from this server. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.1xx + +Number of responses from upstream with 1xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.2xx + +Number of responses from upstream with 2xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.3xx + +Number of responses from upstream with 3xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.4xx + +Number of responses from upstream with 4xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.responses.5xx + +Number of responses from upstream with 5xx status codes + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| responses | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.sent + +Bytes sent from upstream servers + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | + +### nginx.upstream.peers.weight + +Weight of upstream server + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| weight | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| upstream_block_name | The name of the upstream block | Any Str | +| upstream_peer_address | The address f the upstream server | Any Str | diff --git a/receiver/nginxreceiver/generated_component_test.go b/receiver/nginxreceiver/generated_component_test.go index 4b7536b3a88a..b64e62ed18b6 100644 --- a/receiver/nginxreceiver/generated_component_test.go +++ b/receiver/nginxreceiver/generated_component_test.go @@ -34,7 +34,7 @@ func TestComponentLifecycle(t *testing.T) { { name: "metrics", createFn: func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) { - return factory.CreateMetrics(ctx, set, cfg, consumertest.NewNop()) + return factory.CreateMetricsReceiver(ctx, set, cfg, consumertest.NewNop()) }, }, } @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/receiver/nginxreceiver/generated_package_test.go b/receiver/nginxreceiver/generated_package_test.go index 4f3af726f855..a48783927193 100644 --- a/receiver/nginxreceiver/generated_package_test.go +++ b/receiver/nginxreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package nginxreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/nginxreceiver/go.mod b/receiver/nginxreceiver/go.mod index 43bb6702f8c6..9cc55c4fe047 100644 --- a/receiver/nginxreceiver/go.mod +++ b/receiver/nginxreceiver/go.mod @@ -4,7 +4,6 @@ go 1.22.0 require ( github.com/google/go-cmp v0.6.0 - github.com/nginxinc/nginx-prometheus-exporter v0.11.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 @@ -25,6 +24,12 @@ require ( go.uber.org/zap v1.27.0 ) +require ( + github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect +) + require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect diff --git a/receiver/nginxreceiver/go.sum b/receiver/nginxreceiver/go.sum index 718ece24b715..6df42f1dd3c5 100644 --- a/receiver/nginxreceiver/go.sum +++ b/receiver/nginxreceiver/go.sum @@ -56,6 +56,10 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= @@ -74,6 +78,10 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -97,8 +105,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/nginxinc/nginx-prometheus-exporter v0.11.0 h1:21xjnqNgxtni2jDgAQ90bl15uDnrTreO9sIlu1YsX/U= -github.com/nginxinc/nginx-prometheus-exporter v0.11.0/go.mod h1:GdyHnWAb8q8OW1Pssrrqbcqra0SH0Vn6UXICMmyWkw8= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -239,6 +245,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= diff --git a/receiver/nginxreceiver/internal/metadata/generated_config.go b/receiver/nginxreceiver/internal/metadata/generated_config.go index e7a5fdfedeb2..d9df76607844 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config.go @@ -27,10 +27,33 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for nginx metrics. type MetricsConfig struct { - NginxConnectionsAccepted MetricConfig `mapstructure:"nginx.connections_accepted"` - NginxConnectionsCurrent MetricConfig `mapstructure:"nginx.connections_current"` - NginxConnectionsHandled MetricConfig `mapstructure:"nginx.connections_handled"` - NginxRequests MetricConfig `mapstructure:"nginx.requests"` + NginxConnectionsAccepted MetricConfig `mapstructure:"nginx.connections_accepted"` + NginxConnectionsCurrent MetricConfig `mapstructure:"nginx.connections_current"` + NginxConnectionsHandled MetricConfig `mapstructure:"nginx.connections_handled"` + NginxLoadTimestamp MetricConfig `mapstructure:"nginx.load_timestamp"` + NginxNetReading MetricConfig `mapstructure:"nginx.net.reading"` + NginxNetWaiting MetricConfig `mapstructure:"nginx.net.waiting"` + NginxNetWriting MetricConfig `mapstructure:"nginx.net.writing"` + NginxRequests MetricConfig `mapstructure:"nginx.requests"` + NginxServerZoneReceived MetricConfig `mapstructure:"nginx.server_zone.received"` + NginxServerZoneResponses1xx MetricConfig `mapstructure:"nginx.server_zone.responses.1xx"` + NginxServerZoneResponses2xx MetricConfig `mapstructure:"nginx.server_zone.responses.2xx"` + NginxServerZoneResponses3xx MetricConfig `mapstructure:"nginx.server_zone.responses.3xx"` + NginxServerZoneResponses4xx MetricConfig `mapstructure:"nginx.server_zone.responses.4xx"` + NginxServerZoneResponses5xx MetricConfig `mapstructure:"nginx.server_zone.responses.5xx"` + NginxServerZoneSent MetricConfig `mapstructure:"nginx.server_zone.sent"` + NginxUpstreamPeersBackup MetricConfig `mapstructure:"nginx.upstream.peers.backup"` + NginxUpstreamPeersHealthChecksLastPassed MetricConfig `mapstructure:"nginx.upstream.peers.health_checks.last_passed"` + NginxUpstreamPeersReceived MetricConfig `mapstructure:"nginx.upstream.peers.received"` + NginxUpstreamPeersRequests MetricConfig `mapstructure:"nginx.upstream.peers.requests"` + NginxUpstreamPeersResponseTime MetricConfig `mapstructure:"nginx.upstream.peers.response_time"` + NginxUpstreamPeersResponses1xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.1xx"` + NginxUpstreamPeersResponses2xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.2xx"` + NginxUpstreamPeersResponses3xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.3xx"` + NginxUpstreamPeersResponses4xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.4xx"` + NginxUpstreamPeersResponses5xx MetricConfig `mapstructure:"nginx.upstream.peers.responses.5xx"` + NginxUpstreamPeersSent MetricConfig `mapstructure:"nginx.upstream.peers.sent"` + NginxUpstreamPeersWeight MetricConfig `mapstructure:"nginx.upstream.peers.weight"` } func DefaultMetricsConfig() MetricsConfig { @@ -44,9 +67,78 @@ func DefaultMetricsConfig() MetricsConfig { NginxConnectionsHandled: MetricConfig{ Enabled: true, }, + NginxLoadTimestamp: MetricConfig{ + Enabled: true, + }, + NginxNetReading: MetricConfig{ + Enabled: true, + }, + NginxNetWaiting: MetricConfig{ + Enabled: true, + }, + NginxNetWriting: MetricConfig{ + Enabled: true, + }, NginxRequests: MetricConfig{ Enabled: true, }, + NginxServerZoneReceived: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses1xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses2xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses3xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses4xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneResponses5xx: MetricConfig{ + Enabled: true, + }, + NginxServerZoneSent: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersBackup: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersHealthChecksLastPassed: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersReceived: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersRequests: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponseTime: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses1xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses2xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses3xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses4xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersResponses5xx: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersSent: MetricConfig{ + Enabled: true, + }, + NginxUpstreamPeersWeight: MetricConfig{ + Enabled: true, + }, } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_config_test.go b/receiver/nginxreceiver/internal/metadata/generated_config_test.go index 6613f525b5bf..9403275ba450 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_config_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_config_test.go @@ -25,10 +25,33 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - NginxConnectionsAccepted: MetricConfig{Enabled: true}, - NginxConnectionsCurrent: MetricConfig{Enabled: true}, - NginxConnectionsHandled: MetricConfig{Enabled: true}, - NginxRequests: MetricConfig{Enabled: true}, + NginxConnectionsAccepted: MetricConfig{Enabled: true}, + NginxConnectionsCurrent: MetricConfig{Enabled: true}, + NginxConnectionsHandled: MetricConfig{Enabled: true}, + NginxLoadTimestamp: MetricConfig{Enabled: true}, + NginxNetReading: MetricConfig{Enabled: true}, + NginxNetWaiting: MetricConfig{Enabled: true}, + NginxNetWriting: MetricConfig{Enabled: true}, + NginxRequests: MetricConfig{Enabled: true}, + NginxServerZoneReceived: MetricConfig{Enabled: true}, + NginxServerZoneResponses1xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses2xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses3xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses4xx: MetricConfig{Enabled: true}, + NginxServerZoneResponses5xx: MetricConfig{Enabled: true}, + NginxServerZoneSent: MetricConfig{Enabled: true}, + NginxUpstreamPeersBackup: MetricConfig{Enabled: true}, + NginxUpstreamPeersHealthChecksLastPassed: MetricConfig{Enabled: true}, + NginxUpstreamPeersReceived: MetricConfig{Enabled: true}, + NginxUpstreamPeersRequests: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponseTime: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses1xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses2xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses3xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses4xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersResponses5xx: MetricConfig{Enabled: true}, + NginxUpstreamPeersSent: MetricConfig{Enabled: true}, + NginxUpstreamPeersWeight: MetricConfig{Enabled: true}, }, }, }, @@ -36,10 +59,33 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - NginxConnectionsAccepted: MetricConfig{Enabled: false}, - NginxConnectionsCurrent: MetricConfig{Enabled: false}, - NginxConnectionsHandled: MetricConfig{Enabled: false}, - NginxRequests: MetricConfig{Enabled: false}, + NginxConnectionsAccepted: MetricConfig{Enabled: false}, + NginxConnectionsCurrent: MetricConfig{Enabled: false}, + NginxConnectionsHandled: MetricConfig{Enabled: false}, + NginxLoadTimestamp: MetricConfig{Enabled: false}, + NginxNetReading: MetricConfig{Enabled: false}, + NginxNetWaiting: MetricConfig{Enabled: false}, + NginxNetWriting: MetricConfig{Enabled: false}, + NginxRequests: MetricConfig{Enabled: false}, + NginxServerZoneReceived: MetricConfig{Enabled: false}, + NginxServerZoneResponses1xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses2xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses3xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses4xx: MetricConfig{Enabled: false}, + NginxServerZoneResponses5xx: MetricConfig{Enabled: false}, + NginxServerZoneSent: MetricConfig{Enabled: false}, + NginxUpstreamPeersBackup: MetricConfig{Enabled: false}, + NginxUpstreamPeersHealthChecksLastPassed: MetricConfig{Enabled: false}, + NginxUpstreamPeersReceived: MetricConfig{Enabled: false}, + NginxUpstreamPeersRequests: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponseTime: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses1xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses2xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses3xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses4xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersResponses5xx: MetricConfig{Enabled: false}, + NginxUpstreamPeersSent: MetricConfig{Enabled: false}, + NginxUpstreamPeersWeight: MetricConfig{Enabled: false}, }, }, }, @@ -47,8 +93,9 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index af41bd69db7f..02f3e7a2615b 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -200,23 +200,1014 @@ func newMetricNginxConnectionsHandled(cfg MetricConfig) metricNginxConnectionsHa return m } +type metricNginxLoadTimestamp struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.load_timestamp metric with initial data. +func (m *metricNginxLoadTimestamp) init() { + m.data.SetName("nginx.load_timestamp") + m.data.SetDescription("Time of the last reload of configuration (time since Epoch).") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() +} + +func (m *metricNginxLoadTimestamp) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxLoadTimestamp) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxLoadTimestamp) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxLoadTimestamp(cfg MetricConfig) metricNginxLoadTimestamp { + m := metricNginxLoadTimestamp{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxNetReading struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.net.reading metric with initial data. +func (m *metricNginxNetReading) init() { + m.data.SetName("nginx.net.reading") + m.data.SetDescription("Current number of connections where NGINX is reading the request header") + m.data.SetUnit("connections") + m.data.SetEmptyGauge() +} + +func (m *metricNginxNetReading) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxNetReading) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxNetReading) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxNetReading(cfg MetricConfig) metricNginxNetReading { + m := metricNginxNetReading{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxNetWaiting struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.net.waiting metric with initial data. +func (m *metricNginxNetWaiting) init() { + m.data.SetName("nginx.net.waiting") + m.data.SetDescription("Current number of connections where NGINX is waiting the response back to the client") + m.data.SetUnit("connections") + m.data.SetEmptyGauge() +} + +func (m *metricNginxNetWaiting) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxNetWaiting) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxNetWaiting) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxNetWaiting(cfg MetricConfig) metricNginxNetWaiting { + m := metricNginxNetWaiting{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxNetWriting struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.net.writing metric with initial data. +func (m *metricNginxNetWriting) init() { + m.data.SetName("nginx.net.writing") + m.data.SetDescription("Current number of connections where NGINX is writing the response back to the client") + m.data.SetUnit("connections") + m.data.SetEmptyGauge() +} + +func (m *metricNginxNetWriting) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxNetWriting) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxNetWriting) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxNetWriting(cfg MetricConfig) metricNginxNetWriting { + m := metricNginxNetWriting{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricNginxRequests struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills nginx.requests metric with initial data. -func (m *metricNginxRequests) init() { - m.data.SetName("nginx.requests") - m.data.SetDescription("Total number of requests made to the server since it started") - m.data.SetUnit("requests") +// init fills nginx.requests metric with initial data. +func (m *metricNginxRequests) init() { + m.data.SetName("nginx.requests") + m.data.SetDescription("Total number of requests made to the server since it started") + m.data.SetUnit("requests") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricNginxRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxRequests) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxRequests) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxRequests(cfg MetricConfig) metricNginxRequests { + m := metricNginxRequests{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneReceived struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.received metric with initial data. +func (m *metricNginxServerZoneReceived) init() { + m.data.SetName("nginx.server_zone.received") + m.data.SetDescription("Bytes received by server zones") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneReceived) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneReceived) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneReceived) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneReceived(cfg MetricConfig) metricNginxServerZoneReceived { + m := metricNginxServerZoneReceived{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses1xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.1xx metric with initial data. +func (m *metricNginxServerZoneResponses1xx) init() { + m.data.SetName("nginx.server_zone.responses.1xx") + m.data.SetDescription("The number of responses with 1xx status code.") + m.data.SetUnit("response") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses1xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses1xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses1xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses1xx(cfg MetricConfig) metricNginxServerZoneResponses1xx { + m := metricNginxServerZoneResponses1xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses2xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.2xx metric with initial data. +func (m *metricNginxServerZoneResponses2xx) init() { + m.data.SetName("nginx.server_zone.responses.2xx") + m.data.SetDescription("The number of responses with 2xx status code.") + m.data.SetUnit("response") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses2xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses2xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses2xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses2xx(cfg MetricConfig) metricNginxServerZoneResponses2xx { + m := metricNginxServerZoneResponses2xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses3xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.3xx metric with initial data. +func (m *metricNginxServerZoneResponses3xx) init() { + m.data.SetName("nginx.server_zone.responses.3xx") + m.data.SetDescription("The number of responses with 3xx status code.") + m.data.SetUnit("response") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses3xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses3xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses3xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses3xx(cfg MetricConfig) metricNginxServerZoneResponses3xx { + m := metricNginxServerZoneResponses3xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses4xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.4xx metric with initial data. +func (m *metricNginxServerZoneResponses4xx) init() { + m.data.SetName("nginx.server_zone.responses.4xx") + m.data.SetDescription("The number of responses with 4xx status code.") + m.data.SetUnit("response") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses4xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses4xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses4xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses4xx(cfg MetricConfig) metricNginxServerZoneResponses4xx { + m := metricNginxServerZoneResponses4xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneResponses5xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.responses.5xx metric with initial data. +func (m *metricNginxServerZoneResponses5xx) init() { + m.data.SetName("nginx.server_zone.responses.5xx") + m.data.SetDescription("The number of responses with 5xx status code.") + m.data.SetUnit("response") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneResponses5xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneResponses5xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneResponses5xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneResponses5xx(cfg MetricConfig) metricNginxServerZoneResponses5xx { + m := metricNginxServerZoneResponses5xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxServerZoneSent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.server_zone.sent metric with initial data. +func (m *metricNginxServerZoneSent) init() { + m.data.SetName("nginx.server_zone.sent") + m.data.SetDescription("Bytes sent by server zones") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxServerZoneSent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("serverzone_name", serverzoneNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxServerZoneSent) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxServerZoneSent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxServerZoneSent(cfg MetricConfig) metricNginxServerZoneSent { + m := metricNginxServerZoneSent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersBackup struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.backup metric with initial data. +func (m *metricNginxUpstreamPeersBackup) init() { + m.data.SetName("nginx.upstream.peers.backup") + m.data.SetDescription("Whether upstream server is a backup server") + m.data.SetUnit("{state}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersBackup) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersBackup) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersBackup) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersBackup(cfg MetricConfig) metricNginxUpstreamPeersBackup { + m := metricNginxUpstreamPeersBackup{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersHealthChecksLastPassed struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.health_checks.last_passed metric with initial data. +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) init() { + m.data.SetName("nginx.upstream.peers.health_checks.last_passed") + m.data.SetDescription("Boolean indicating if the last health check request was successful and passed tests.") + m.data.SetUnit("{status}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersHealthChecksLastPassed) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersHealthChecksLastPassed(cfg MetricConfig) metricNginxUpstreamPeersHealthChecksLastPassed { + m := metricNginxUpstreamPeersHealthChecksLastPassed{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersReceived struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.received metric with initial data. +func (m *metricNginxUpstreamPeersReceived) init() { + m.data.SetName("nginx.upstream.peers.received") + m.data.SetDescription("Bytes received from upstream servers") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersReceived) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersReceived) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersReceived) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersReceived(cfg MetricConfig) metricNginxUpstreamPeersReceived { + m := metricNginxUpstreamPeersReceived{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersRequests struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.requests metric with initial data. +func (m *metricNginxUpstreamPeersRequests) init() { + m.data.SetName("nginx.upstream.peers.requests") + m.data.SetDescription("Number of requests made to upstream servers") + m.data.SetUnit("requests") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersRequests) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersRequests) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersRequests(cfg MetricConfig) metricNginxUpstreamPeersRequests { + m := metricNginxUpstreamPeersRequests{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponseTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.response_time metric with initial data. +func (m *metricNginxUpstreamPeersResponseTime) init() { + m.data.SetName("nginx.upstream.peers.response_time") + m.data.SetDescription("The average time to receive the last byte of data from this server.") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponseTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponseTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponseTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponseTime(cfg MetricConfig) metricNginxUpstreamPeersResponseTime { + m := metricNginxUpstreamPeersResponseTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses1xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.1xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses1xx) init() { + m.data.SetName("nginx.upstream.peers.responses.1xx") + m.data.SetDescription("Number of responses from upstream with 1xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses1xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses1xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses1xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses1xx(cfg MetricConfig) metricNginxUpstreamPeersResponses1xx { + m := metricNginxUpstreamPeersResponses1xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses2xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.2xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses2xx) init() { + m.data.SetName("nginx.upstream.peers.responses.2xx") + m.data.SetDescription("Number of responses from upstream with 2xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses2xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses2xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses2xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses2xx(cfg MetricConfig) metricNginxUpstreamPeersResponses2xx { + m := metricNginxUpstreamPeersResponses2xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses3xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.3xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses3xx) init() { + m.data.SetName("nginx.upstream.peers.responses.3xx") + m.data.SetDescription("Number of responses from upstream with 3xx status codes") + m.data.SetUnit("responses") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricNginxRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { +func (m *metricNginxUpstreamPeersResponses3xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { if !m.config.Enabled { return } @@ -224,17 +1215,19 @@ func (m *metricNginxRequests) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricNginxRequests) updateCapacity() { +func (m *metricNginxUpstreamPeersResponses3xx) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricNginxRequests) emit(metrics pmetric.MetricSlice) { +func (m *metricNginxUpstreamPeersResponses3xx) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -242,8 +1235,8 @@ func (m *metricNginxRequests) emit(metrics pmetric.MetricSlice) { } } -func newMetricNginxRequests(cfg MetricConfig) metricNginxRequests { - m := metricNginxRequests{config: cfg} +func newMetricNginxUpstreamPeersResponses3xx(cfg MetricConfig) metricNginxUpstreamPeersResponses3xx { + m := metricNginxUpstreamPeersResponses3xx{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -251,52 +1244,304 @@ func newMetricNginxRequests(cfg MetricConfig) metricNginxRequests { return m } -// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations -// required to produce metric representation defined in metadata and user config. -type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - metricNginxConnectionsAccepted metricNginxConnectionsAccepted - metricNginxConnectionsCurrent metricNginxConnectionsCurrent - metricNginxConnectionsHandled metricNginxConnectionsHandled - metricNginxRequests metricNginxRequests +type metricNginxUpstreamPeersResponses4xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.4xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses4xx) init() { + m.data.SetName("nginx.upstream.peers.responses.4xx") + m.data.SetDescription("Number of responses from upstream with 4xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses4xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses4xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses4xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses4xx(cfg MetricConfig) metricNginxUpstreamPeersResponses4xx { + m := metricNginxUpstreamPeersResponses4xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersResponses5xx struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.responses.5xx metric with initial data. +func (m *metricNginxUpstreamPeersResponses5xx) init() { + m.data.SetName("nginx.upstream.peers.responses.5xx") + m.data.SetDescription("Number of responses from upstream with 5xx status codes") + m.data.SetUnit("responses") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersResponses5xx) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersResponses5xx) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersResponses5xx) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersResponses5xx(cfg MetricConfig) metricNginxUpstreamPeersResponses5xx { + m := metricNginxUpstreamPeersResponses5xx{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m } -// MetricBuilderOption applies changes to default metrics builder. -type MetricBuilderOption interface { - apply(*MetricsBuilder) +type metricNginxUpstreamPeersSent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.sent metric with initial data. +func (m *metricNginxUpstreamPeersSent) init() { + m.data.SetName("nginx.upstream.peers.sent") + m.data.SetDescription("Bytes sent from upstream servers") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersSent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersSent) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersSent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricNginxUpstreamPeersSent(cfg MetricConfig) metricNginxUpstreamPeersSent { + m := metricNginxUpstreamPeersSent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricNginxUpstreamPeersWeight struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills nginx.upstream.peers.weight metric with initial data. +func (m *metricNginxUpstreamPeersWeight) init() { + m.data.SetName("nginx.upstream.peers.weight") + m.data.SetDescription("Weight of upstream server") + m.data.SetUnit("weight") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricNginxUpstreamPeersWeight) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("upstream_block_name", upstreamBlockNameAttributeValue) + dp.Attributes().PutStr("upstream_peer_address", upstreamPeerAddressAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricNginxUpstreamPeersWeight) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricNginxUpstreamPeersWeight) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } } -type metricBuilderOptionFunc func(mb *MetricsBuilder) +func newMetricNginxUpstreamPeersWeight(cfg MetricConfig) metricNginxUpstreamPeersWeight { + m := metricNginxUpstreamPeersWeight{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} -func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { - mbof(mb) +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user config. +type MetricsBuilder struct { + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + metricNginxConnectionsAccepted metricNginxConnectionsAccepted + metricNginxConnectionsCurrent metricNginxConnectionsCurrent + metricNginxConnectionsHandled metricNginxConnectionsHandled + metricNginxLoadTimestamp metricNginxLoadTimestamp + metricNginxNetReading metricNginxNetReading + metricNginxNetWaiting metricNginxNetWaiting + metricNginxNetWriting metricNginxNetWriting + metricNginxRequests metricNginxRequests + metricNginxServerZoneReceived metricNginxServerZoneReceived + metricNginxServerZoneResponses1xx metricNginxServerZoneResponses1xx + metricNginxServerZoneResponses2xx metricNginxServerZoneResponses2xx + metricNginxServerZoneResponses3xx metricNginxServerZoneResponses3xx + metricNginxServerZoneResponses4xx metricNginxServerZoneResponses4xx + metricNginxServerZoneResponses5xx metricNginxServerZoneResponses5xx + metricNginxServerZoneSent metricNginxServerZoneSent + metricNginxUpstreamPeersBackup metricNginxUpstreamPeersBackup + metricNginxUpstreamPeersHealthChecksLastPassed metricNginxUpstreamPeersHealthChecksLastPassed + metricNginxUpstreamPeersReceived metricNginxUpstreamPeersReceived + metricNginxUpstreamPeersRequests metricNginxUpstreamPeersRequests + metricNginxUpstreamPeersResponseTime metricNginxUpstreamPeersResponseTime + metricNginxUpstreamPeersResponses1xx metricNginxUpstreamPeersResponses1xx + metricNginxUpstreamPeersResponses2xx metricNginxUpstreamPeersResponses2xx + metricNginxUpstreamPeersResponses3xx metricNginxUpstreamPeersResponses3xx + metricNginxUpstreamPeersResponses4xx metricNginxUpstreamPeersResponses4xx + metricNginxUpstreamPeersResponses5xx metricNginxUpstreamPeersResponses5xx + metricNginxUpstreamPeersSent metricNginxUpstreamPeersSent + metricNginxUpstreamPeersWeight metricNginxUpstreamPeersWeight } +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { - return metricBuilderOptionFunc(func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { mb.startTime = startTime - }) + } } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(mbc.Metrics.NginxConnectionsAccepted), - metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(mbc.Metrics.NginxConnectionsCurrent), - metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(mbc.Metrics.NginxConnectionsHandled), - metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(mbc.Metrics.NginxConnectionsAccepted), + metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(mbc.Metrics.NginxConnectionsCurrent), + metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(mbc.Metrics.NginxConnectionsHandled), + metricNginxLoadTimestamp: newMetricNginxLoadTimestamp(mbc.Metrics.NginxLoadTimestamp), + metricNginxNetReading: newMetricNginxNetReading(mbc.Metrics.NginxNetReading), + metricNginxNetWaiting: newMetricNginxNetWaiting(mbc.Metrics.NginxNetWaiting), + metricNginxNetWriting: newMetricNginxNetWriting(mbc.Metrics.NginxNetWriting), + metricNginxRequests: newMetricNginxRequests(mbc.Metrics.NginxRequests), + metricNginxServerZoneReceived: newMetricNginxServerZoneReceived(mbc.Metrics.NginxServerZoneReceived), + metricNginxServerZoneResponses1xx: newMetricNginxServerZoneResponses1xx(mbc.Metrics.NginxServerZoneResponses1xx), + metricNginxServerZoneResponses2xx: newMetricNginxServerZoneResponses2xx(mbc.Metrics.NginxServerZoneResponses2xx), + metricNginxServerZoneResponses3xx: newMetricNginxServerZoneResponses3xx(mbc.Metrics.NginxServerZoneResponses3xx), + metricNginxServerZoneResponses4xx: newMetricNginxServerZoneResponses4xx(mbc.Metrics.NginxServerZoneResponses4xx), + metricNginxServerZoneResponses5xx: newMetricNginxServerZoneResponses5xx(mbc.Metrics.NginxServerZoneResponses5xx), + metricNginxServerZoneSent: newMetricNginxServerZoneSent(mbc.Metrics.NginxServerZoneSent), + metricNginxUpstreamPeersBackup: newMetricNginxUpstreamPeersBackup(mbc.Metrics.NginxUpstreamPeersBackup), + metricNginxUpstreamPeersHealthChecksLastPassed: newMetricNginxUpstreamPeersHealthChecksLastPassed(mbc.Metrics.NginxUpstreamPeersHealthChecksLastPassed), + metricNginxUpstreamPeersReceived: newMetricNginxUpstreamPeersReceived(mbc.Metrics.NginxUpstreamPeersReceived), + metricNginxUpstreamPeersRequests: newMetricNginxUpstreamPeersRequests(mbc.Metrics.NginxUpstreamPeersRequests), + metricNginxUpstreamPeersResponseTime: newMetricNginxUpstreamPeersResponseTime(mbc.Metrics.NginxUpstreamPeersResponseTime), + metricNginxUpstreamPeersResponses1xx: newMetricNginxUpstreamPeersResponses1xx(mbc.Metrics.NginxUpstreamPeersResponses1xx), + metricNginxUpstreamPeersResponses2xx: newMetricNginxUpstreamPeersResponses2xx(mbc.Metrics.NginxUpstreamPeersResponses2xx), + metricNginxUpstreamPeersResponses3xx: newMetricNginxUpstreamPeersResponses3xx(mbc.Metrics.NginxUpstreamPeersResponses3xx), + metricNginxUpstreamPeersResponses4xx: newMetricNginxUpstreamPeersResponses4xx(mbc.Metrics.NginxUpstreamPeersResponses4xx), + metricNginxUpstreamPeersResponses5xx: newMetricNginxUpstreamPeersResponses5xx(mbc.Metrics.NginxUpstreamPeersResponses5xx), + metricNginxUpstreamPeersSent: newMetricNginxUpstreamPeersSent(mbc.Metrics.NginxUpstreamPeersSent), + metricNginxUpstreamPeersWeight: newMetricNginxUpstreamPeersWeight(mbc.Metrics.NginxUpstreamPeersWeight), } for _, op := range options { - op.apply(mb) + op(mb) } return mb } @@ -309,28 +1554,20 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption interface { - apply(pmetric.ResourceMetrics) -} - -type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) - -func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { - rmof(rm) -} +type ResourceMetricsOption func(pmetric.ResourceMetrics) // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - }) + } } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -344,7 +1581,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - }) + } } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -352,7 +1589,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver") @@ -361,10 +1598,33 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxConnectionsAccepted.emit(ils.Metrics()) mb.metricNginxConnectionsCurrent.emit(ils.Metrics()) mb.metricNginxConnectionsHandled.emit(ils.Metrics()) + mb.metricNginxLoadTimestamp.emit(ils.Metrics()) + mb.metricNginxNetReading.emit(ils.Metrics()) + mb.metricNginxNetWaiting.emit(ils.Metrics()) + mb.metricNginxNetWriting.emit(ils.Metrics()) mb.metricNginxRequests.emit(ils.Metrics()) + mb.metricNginxServerZoneReceived.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses1xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses2xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses3xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses4xx.emit(ils.Metrics()) + mb.metricNginxServerZoneResponses5xx.emit(ils.Metrics()) + mb.metricNginxServerZoneSent.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersBackup.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersHealthChecksLastPassed.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersReceived.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersRequests.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponseTime.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses1xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses2xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses3xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses4xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersResponses5xx.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersSent.emit(ils.Metrics()) + mb.metricNginxUpstreamPeersWeight.emit(ils.Metrics()) - for _, op := range options { - op.apply(rm) + for _, op := range rmo { + op(rm) } if ils.Metrics().Len() > 0 { @@ -376,8 +1636,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(options...) +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -398,16 +1658,131 @@ func (mb *MetricsBuilder) RecordNginxConnectionsHandledDataPoint(ts pcommon.Time mb.metricNginxConnectionsHandled.recordDataPoint(mb.startTime, ts, val) } +// RecordNginxLoadTimestampDataPoint adds a data point to nginx.load_timestamp metric. +func (mb *MetricsBuilder) RecordNginxLoadTimestampDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxLoadTimestamp.recordDataPoint(mb.startTime, ts, val) +} + +// RecordNginxNetReadingDataPoint adds a data point to nginx.net.reading metric. +func (mb *MetricsBuilder) RecordNginxNetReadingDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxNetReading.recordDataPoint(mb.startTime, ts, val) +} + +// RecordNginxNetWaitingDataPoint adds a data point to nginx.net.waiting metric. +func (mb *MetricsBuilder) RecordNginxNetWaitingDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxNetWaiting.recordDataPoint(mb.startTime, ts, val) +} + +// RecordNginxNetWritingDataPoint adds a data point to nginx.net.writing metric. +func (mb *MetricsBuilder) RecordNginxNetWritingDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricNginxNetWriting.recordDataPoint(mb.startTime, ts, val) +} + // RecordNginxRequestsDataPoint adds a data point to nginx.requests metric. func (mb *MetricsBuilder) RecordNginxRequestsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricNginxRequests.recordDataPoint(mb.startTime, ts, val) } +// RecordNginxServerZoneReceivedDataPoint adds a data point to nginx.server_zone.received metric. +func (mb *MetricsBuilder) RecordNginxServerZoneReceivedDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneReceived.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses1xxDataPoint adds a data point to nginx.server_zone.responses.1xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses1xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses1xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses2xxDataPoint adds a data point to nginx.server_zone.responses.2xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses2xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses2xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses3xxDataPoint adds a data point to nginx.server_zone.responses.3xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses3xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses3xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses4xxDataPoint adds a data point to nginx.server_zone.responses.4xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses4xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses4xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneResponses5xxDataPoint adds a data point to nginx.server_zone.responses.5xx metric. +func (mb *MetricsBuilder) RecordNginxServerZoneResponses5xxDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneResponses5xx.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxServerZoneSentDataPoint adds a data point to nginx.server_zone.sent metric. +func (mb *MetricsBuilder) RecordNginxServerZoneSentDataPoint(ts pcommon.Timestamp, val int64, serverzoneNameAttributeValue string) { + mb.metricNginxServerZoneSent.recordDataPoint(mb.startTime, ts, val, serverzoneNameAttributeValue) +} + +// RecordNginxUpstreamPeersBackupDataPoint adds a data point to nginx.upstream.peers.backup metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersBackupDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersBackup.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint adds a data point to nginx.upstream.peers.health_checks.last_passed metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersHealthChecksLastPassed.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersReceivedDataPoint adds a data point to nginx.upstream.peers.received metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersReceivedDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersReceived.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersRequestsDataPoint adds a data point to nginx.upstream.peers.requests metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersRequestsDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersRequests.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponseTimeDataPoint adds a data point to nginx.upstream.peers.response_time metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponseTimeDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponseTime.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses1xxDataPoint adds a data point to nginx.upstream.peers.responses.1xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses1xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses1xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses2xxDataPoint adds a data point to nginx.upstream.peers.responses.2xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses2xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses2xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses3xxDataPoint adds a data point to nginx.upstream.peers.responses.3xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses3xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses3xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses4xxDataPoint adds a data point to nginx.upstream.peers.responses.4xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses4xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses4xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersResponses5xxDataPoint adds a data point to nginx.upstream.peers.responses.5xx metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersResponses5xxDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersResponses5xx.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersSentDataPoint adds a data point to nginx.upstream.peers.sent metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersSentDataPoint(ts pcommon.Timestamp, val int64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersSent.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + +// RecordNginxUpstreamPeersWeightDataPoint adds a data point to nginx.upstream.peers.weight metric. +func (mb *MetricsBuilder) RecordNginxUpstreamPeersWeightDataPoint(ts pcommon.Timestamp, val float64, upstreamBlockNameAttributeValue string, upstreamPeerAddressAttributeValue string) { + mb.metricNginxUpstreamPeersWeight.recordDataPoint(mb.startTime, ts, val, upstreamBlockNameAttributeValue, upstreamPeerAddressAttributeValue) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op.apply(mb) + op(mb) } } diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go index 1c4ffdbf469b..c6e9c2377d23 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -71,14 +71,106 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxConnectionsHandledDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxLoadTimestampDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxNetReadingDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxNetWaitingDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxNetWritingDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordNginxRequestsDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneReceivedDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses1xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses2xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses3xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses4xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneResponses5xxDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxServerZoneSentDataPoint(ts, 1, "serverzone_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersBackupDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersReceivedDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersRequestsDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponseTimeDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses1xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses2xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses3xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses4xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersResponses5xxDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersSentDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordNginxUpstreamPeersWeightDataPoint(ts, 1, "upstream_block_name-val", "upstream_peer_address-val") + res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if tt.expectEmpty { + if test.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -88,10 +180,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if tt.metricsSet == testDataSetDefault { + if test.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if tt.metricsSet == testDataSetAll { + if test.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -104,7 +196,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of accepted client connections", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -118,7 +210,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The current number of nginx connections by state", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.False(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -135,13 +227,61 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of handled connections. Generally, the parameter value is the same as nginx.connections_accepted unless some resource limits have been reached (for example, the worker_connections limit).", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.load_timestamp": + assert.False(t, validatedMetrics["nginx.load_timestamp"], "Found a duplicate in the metrics slice: nginx.load_timestamp") + validatedMetrics["nginx.load_timestamp"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Time of the last reload of configuration (time since Epoch).", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.net.reading": + assert.False(t, validatedMetrics["nginx.net.reading"], "Found a duplicate in the metrics slice: nginx.net.reading") + validatedMetrics["nginx.net.reading"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Current number of connections where NGINX is reading the request header", ms.At(i).Description()) + assert.Equal(t, "connections", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.net.waiting": + assert.False(t, validatedMetrics["nginx.net.waiting"], "Found a duplicate in the metrics slice: nginx.net.waiting") + validatedMetrics["nginx.net.waiting"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Current number of connections where NGINX is waiting the response back to the client", ms.At(i).Description()) + assert.Equal(t, "connections", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.net.writing": + assert.False(t, validatedMetrics["nginx.net.writing"], "Found a duplicate in the metrics slice: nginx.net.writing") + validatedMetrics["nginx.net.writing"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Current number of connections where NGINX is writing the response back to the client", ms.At(i).Description()) + assert.Equal(t, "connections", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "nginx.requests": assert.False(t, validatedMetrics["nginx.requests"], "Found a duplicate in the metrics slice: nginx.requests") validatedMetrics["nginx.requests"] = true @@ -149,13 +289,364 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Total number of requests made to the server since it started", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "nginx.server_zone.received": + assert.False(t, validatedMetrics["nginx.server_zone.received"], "Found a duplicate in the metrics slice: nginx.server_zone.received") + validatedMetrics["nginx.server_zone.received"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes received by server zones", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.1xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.1xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.1xx") + validatedMetrics["nginx.server_zone.responses.1xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of responses with 1xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.2xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.2xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.2xx") + validatedMetrics["nginx.server_zone.responses.2xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of responses with 2xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.3xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.3xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.3xx") + validatedMetrics["nginx.server_zone.responses.3xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of responses with 3xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.4xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.4xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.4xx") + validatedMetrics["nginx.server_zone.responses.4xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of responses with 4xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.responses.5xx": + assert.False(t, validatedMetrics["nginx.server_zone.responses.5xx"], "Found a duplicate in the metrics slice: nginx.server_zone.responses.5xx") + validatedMetrics["nginx.server_zone.responses.5xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of responses with 5xx status code.", ms.At(i).Description()) + assert.Equal(t, "response", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.server_zone.sent": + assert.False(t, validatedMetrics["nginx.server_zone.sent"], "Found a duplicate in the metrics slice: nginx.server_zone.sent") + validatedMetrics["nginx.server_zone.sent"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes sent by server zones", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("serverzone_name") + assert.True(t, ok) + assert.EqualValues(t, "serverzone_name-val", attrVal.Str()) + case "nginx.upstream.peers.backup": + assert.False(t, validatedMetrics["nginx.upstream.peers.backup"], "Found a duplicate in the metrics slice: nginx.upstream.peers.backup") + validatedMetrics["nginx.upstream.peers.backup"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Whether upstream server is a backup server", ms.At(i).Description()) + assert.Equal(t, "{state}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.health_checks.last_passed": + assert.False(t, validatedMetrics["nginx.upstream.peers.health_checks.last_passed"], "Found a duplicate in the metrics slice: nginx.upstream.peers.health_checks.last_passed") + validatedMetrics["nginx.upstream.peers.health_checks.last_passed"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Boolean indicating if the last health check request was successful and passed tests.", ms.At(i).Description()) + assert.Equal(t, "{status}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.received": + assert.False(t, validatedMetrics["nginx.upstream.peers.received"], "Found a duplicate in the metrics slice: nginx.upstream.peers.received") + validatedMetrics["nginx.upstream.peers.received"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes received from upstream servers", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.requests": + assert.False(t, validatedMetrics["nginx.upstream.peers.requests"], "Found a duplicate in the metrics slice: nginx.upstream.peers.requests") + validatedMetrics["nginx.upstream.peers.requests"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of requests made to upstream servers", ms.At(i).Description()) + assert.Equal(t, "requests", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.response_time": + assert.False(t, validatedMetrics["nginx.upstream.peers.response_time"], "Found a duplicate in the metrics slice: nginx.upstream.peers.response_time") + validatedMetrics["nginx.upstream.peers.response_time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The average time to receive the last byte of data from this server.", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.1xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.1xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.1xx") + validatedMetrics["nginx.upstream.peers.responses.1xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 1xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.2xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.2xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.2xx") + validatedMetrics["nginx.upstream.peers.responses.2xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 2xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.3xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.3xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.3xx") + validatedMetrics["nginx.upstream.peers.responses.3xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 3xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.4xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.4xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.4xx") + validatedMetrics["nginx.upstream.peers.responses.4xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 4xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.responses.5xx": + assert.False(t, validatedMetrics["nginx.upstream.peers.responses.5xx"], "Found a duplicate in the metrics slice: nginx.upstream.peers.responses.5xx") + validatedMetrics["nginx.upstream.peers.responses.5xx"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of responses from upstream with 5xx status codes", ms.At(i).Description()) + assert.Equal(t, "responses", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.sent": + assert.False(t, validatedMetrics["nginx.upstream.peers.sent"], "Found a duplicate in the metrics slice: nginx.upstream.peers.sent") + validatedMetrics["nginx.upstream.peers.sent"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Bytes sent from upstream servers", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) + case "nginx.upstream.peers.weight": + assert.False(t, validatedMetrics["nginx.upstream.peers.weight"], "Found a duplicate in the metrics slice: nginx.upstream.peers.weight") + validatedMetrics["nginx.upstream.peers.weight"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Weight of upstream server", ms.At(i).Description()) + assert.Equal(t, "weight", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("upstream_block_name") + assert.True(t, ok) + assert.EqualValues(t, "upstream_block_name-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("upstream_peer_address") + assert.True(t, ok) + assert.EqualValues(t, "upstream_peer_address-val", attrVal.Str()) } } }) diff --git a/receiver/nginxreceiver/internal/metadata/generated_status.go b/receiver/nginxreceiver/internal/metadata/generated_status.go index acfba477b8fd..f4cd74a5e478 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_status.go +++ b/receiver/nginxreceiver/internal/metadata/generated_status.go @@ -7,8 +7,7 @@ import ( ) var ( - Type = component.MustNewType("nginx") - ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver" + Type = component.MustNewType("nginx") ) const ( diff --git a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml index 05f6368506a2..3a896e7cdadd 100644 --- a/receiver/nginxreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/nginxreceiver/internal/metadata/testdata/config.yaml @@ -7,8 +7,54 @@ all_set: enabled: true nginx.connections_handled: enabled: true + nginx.load_timestamp: + enabled: true + nginx.net.reading: + enabled: true + nginx.net.waiting: + enabled: true + nginx.net.writing: + enabled: true nginx.requests: enabled: true + nginx.server_zone.received: + enabled: true + nginx.server_zone.responses.1xx: + enabled: true + nginx.server_zone.responses.2xx: + enabled: true + nginx.server_zone.responses.3xx: + enabled: true + nginx.server_zone.responses.4xx: + enabled: true + nginx.server_zone.responses.5xx: + enabled: true + nginx.server_zone.sent: + enabled: true + nginx.upstream.peers.backup: + enabled: true + nginx.upstream.peers.health_checks.last_passed: + enabled: true + nginx.upstream.peers.received: + enabled: true + nginx.upstream.peers.requests: + enabled: true + nginx.upstream.peers.response_time: + enabled: true + nginx.upstream.peers.responses.1xx: + enabled: true + nginx.upstream.peers.responses.2xx: + enabled: true + nginx.upstream.peers.responses.3xx: + enabled: true + nginx.upstream.peers.responses.4xx: + enabled: true + nginx.upstream.peers.responses.5xx: + enabled: true + nginx.upstream.peers.sent: + enabled: true + nginx.upstream.peers.weight: + enabled: true none_set: metrics: nginx.connections_accepted: @@ -17,5 +63,51 @@ none_set: enabled: false nginx.connections_handled: enabled: false + nginx.load_timestamp: + enabled: false + nginx.net.reading: + enabled: false + nginx.net.waiting: + enabled: false + nginx.net.writing: + enabled: false nginx.requests: enabled: false + nginx.server_zone.received: + enabled: false + nginx.server_zone.responses.1xx: + enabled: false + nginx.server_zone.responses.2xx: + enabled: false + nginx.server_zone.responses.3xx: + enabled: false + nginx.server_zone.responses.4xx: + enabled: false + nginx.server_zone.responses.5xx: + enabled: false + nginx.server_zone.sent: + enabled: false + nginx.upstream.peers.backup: + enabled: false + nginx.upstream.peers.health_checks.last_passed: + enabled: false + nginx.upstream.peers.received: + enabled: false + nginx.upstream.peers.requests: + enabled: false + nginx.upstream.peers.response_time: + enabled: false + nginx.upstream.peers.responses.1xx: + enabled: false + nginx.upstream.peers.responses.2xx: + enabled: false + nginx.upstream.peers.responses.3xx: + enabled: false + nginx.upstream.peers.responses.4xx: + enabled: false + nginx.upstream.peers.responses.5xx: + enabled: false + nginx.upstream.peers.sent: + enabled: false + nginx.upstream.peers.weight: + enabled: false diff --git a/receiver/nginxreceiver/metadata.yaml b/receiver/nginxreceiver/metadata.yaml index 90ef854d1fef..4d4b58b2f968 100644 --- a/receiver/nginxreceiver/metadata.yaml +++ b/receiver/nginxreceiver/metadata.yaml @@ -17,6 +17,15 @@ attributes: - reading - writing - waiting + upstream_block_name: + description: The name of the upstream block + type: string + upstream_peer_address: + description: The address f the upstream server + type: string + serverzone_name: + description: The name of serverzone + type: string metrics: nginx.requests: @@ -27,7 +36,7 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - attributes: [] + attributes: [] nginx.connections_accepted: enabled: true description: The total number of accepted client connections @@ -55,3 +64,244 @@ metrics: monotonic: false aggregation_temporality: cumulative attributes: [state] + + ### Timing metrics + nginx.load_timestamp: + enabled: true + description: Time of the last reload of configuration (time since Epoch). + gauge: + value_type: int + unit: ms + + nginx.upstream.peers.response_time: + attributes: + - upstream_block_name + - upstream_peer_address + enabled: true + description: The average time to receive the last byte of data from this server. + gauge: + value_type: int + unit: ms + + nginx.net.reading: + enabled: true + description: Current number of connections where NGINX is reading the request header + gauge: + value_type: int + unit: connections + + nginx.net.writing: + enabled: true + description: Current number of connections where NGINX is writing the response back to the client + gauge: + value_type: int + unit: connections + + nginx.net.waiting: + enabled: true + description: Current number of connections where NGINX is waiting the response back to the client + gauge: + value_type: int + unit: connections + + nginx.server_zone.responses.1xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 1xx status code. + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: + response + + nginx.server_zone.responses.2xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 2xx status code. + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: + response + nginx.server_zone.responses.3xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 3xx status code. + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: + response + + nginx.server_zone.responses.4xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 4xx status code. + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: + response + nginx.server_zone.responses.5xx: + enabled: true + attributes: + - serverzone_name + description: The number of responses with 5xx status code. + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: + response + nginx.server_zone.received: + enabled: true + attributes: + - serverzone_name + description: Bytes received by server zones + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By + nginx.server_zone.sent: + enabled: true + attributes: + - serverzone_name + description: Bytes sent by server zones + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By + nginx.upstream.peers.requests: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of requests made to upstream servers + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: requests + nginx.upstream.peers.received: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Bytes received from upstream servers + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By + + nginx.upstream.peers.sent: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Bytes sent from upstream servers + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: By + + nginx.upstream.peers.responses.1xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 1xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.2xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 2xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.3xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 3xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.4xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 4xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.responses.5xx: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Number of responses from upstream with 5xx status codes + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + unit: responses + + nginx.upstream.peers.weight: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Weight of upstream server + gauge: + value_type: double + unit: weight + + nginx.upstream.peers.backup: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Whether upstream server is a backup server + gauge: + value_type: int + unit: "{state}" + + nginx.upstream.peers.health_checks.last_passed: + enabled: true + attributes: + - upstream_block_name + - upstream_peer_address + description: Boolean indicating if the last health check request was successful and passed tests. + gauge: + value_type: int + unit: "{status}" \ No newline at end of file diff --git a/receiver/nginxreceiver/nginx.go b/receiver/nginxreceiver/nginx.go new file mode 100644 index 000000000000..b929c072c99e --- /dev/null +++ b/receiver/nginxreceiver/nginx.go @@ -0,0 +1,133 @@ +package nginxreceiver + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" +) + +const templateMetrics string = `Active connections: %d +server accepts handled requests +%d %d %d +Reading: %d Writing: %d Waiting: %d +` + +// NginxClient allows you to fetch NGINX metrics from the stub_status page. +type NginxClient struct { + apiEndpoint string + vtsEndpoint string + httpClient *http.Client +} + +// StubStats represents NGINX stub_status metrics. +type StubStats struct { + Connections StubConnections + Requests int64 +} + +// StubConnections represents connections related metrics. +type StubConnections struct { + Active int64 + Accepted int64 + Handled int64 + Reading int64 + Writing int64 + Waiting int64 +} + +// NewNginxClient creates an NginxClient. +func NewNginxClient(httpClient *http.Client, apiEndpoint string, vtsEndpoint string) (*NginxClient, error) { + client := &NginxClient{ + apiEndpoint: apiEndpoint, + vtsEndpoint: vtsEndpoint, + httpClient: httpClient, + } + + _, err := client.GetStubStats() + return client, err +} + +// GetStubStats fetches the stub_status metrics. +func (client *NginxClient) GetStubStats() (*StubStats, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, client.apiEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("failed to create a get request: %w", err) + } + resp, err := client.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to get %v: %w", client.apiEndpoint, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("expected %v response, got %v", http.StatusOK, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read the response body: %w", err) + } + + r := bytes.NewReader(body) + stats, err := parseStubStats(r) + if err != nil { + return nil, fmt.Errorf("failed to parse response body %q: %w", string(body), err) + } + + return stats, nil +} + +func (client *NginxClient) GetVtsStats() (*NginxVtsStatus, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, client.vtsEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("failed to create a get request: %w", err) + } + + resp, err := client.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to get %v: %w", client.apiEndpoint, err) + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + + return nil, fmt.Errorf("expected %v response, got %v", http.StatusOK, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read the response body: %w", err) + } + + r := bytes.NewReader(body) + stats, err := ParseVtsStats(r) + if err != nil { + return nil, fmt.Errorf("failed to parse response body %q: %w", string(body), err) + } + + return stats, nil +} + +func parseStubStats(r io.Reader) (*StubStats, error) { + var s StubStats + if _, err := fmt.Fscanf(r, templateMetrics, + &s.Connections.Active, + &s.Connections.Accepted, + &s.Connections.Handled, + &s.Requests, + &s.Connections.Reading, + &s.Connections.Writing, + &s.Connections.Waiting); err != nil { + return nil, fmt.Errorf("failed to scan template metrics: %w", err) + } + return &s, nil +} diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 7f202ab40c2b..7f3f1ebc9ad4 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -8,7 +8,6 @@ import ( "net/http" "time" - "github.com/nginxinc/nginx-prometheus-exporter/client" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -20,7 +19,7 @@ import ( type nginxScraper struct { httpClient *http.Client - client *client.NginxClient + client *NginxClient settings component.TelemetrySettings cfg *Config @@ -50,10 +49,10 @@ func (r *nginxScraper) start(ctx context.Context, host component.Host) error { } func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { - // Init client in scrape method in case there are transient errors in the constructor. if r.client == nil { var err error - r.client, err = client.NewNginxClient(r.httpClient, r.cfg.ClientConfig.Endpoint) + r.client, err = NewNginxClient(r.httpClient, r.cfg.ClientConfig.Endpoint, r.cfg.VTSEndpoint) + if err != nil { r.client = nil return pmetric.Metrics{}, err @@ -66,7 +65,17 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { return pmetric.Metrics{}, err } + vtsStats, err := r.client.GetVtsStats() + + if err != nil { + r.settings.Logger.Error("Failed to fetch nginx stats", zap.Error(err)) + return pmetric.Metrics{}, err + } + now := pcommon.NewTimestampFromTime(time.Now()) + + r.recordVtsStats(now, vtsStats) + r.mb.RecordNginxRequestsDataPoint(now, stats.Requests) r.mb.RecordNginxConnectionsAcceptedDataPoint(now, stats.Connections.Accepted) r.mb.RecordNginxConnectionsHandledDataPoint(now, stats.Connections.Handled) @@ -74,5 +83,113 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Reading, metadata.AttributeStateReading) r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Writing, metadata.AttributeStateWriting) r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Waiting, metadata.AttributeStateWaiting) + return r.mb.Emit(), nil } + +func (r *nginxScraper) recordVtsStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + r.recordTimingStats(now, vtsStats) + r.recordVtsConnectionStats(now, vtsStats) + r.recordVtsServerZoneResponseStats(now, vtsStats) + r.recordVtsServerZoneTrafficStats(now, vtsStats) + r.recordVtsUpstreamStats(now, vtsStats) +} + +func (r *nginxScraper) recordVtsUpstreamStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + for upstreamZoneName, upstreamZoneServers := range vtsStats.UpstreamZones { + for _, upstreamZoneServer := range upstreamZoneServers { + r.mb.RecordNginxUpstreamPeersRequestsDataPoint( + now, upstreamZoneServer.RequestCounter, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersReceivedDataPoint( + now, upstreamZoneServer.InBytes, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersSentDataPoint( + now, upstreamZoneServer.OutBytes, upstreamZoneName, upstreamZoneServer.Server, + ) + + r.mb.RecordNginxUpstreamPeersResponses1xxDataPoint( + now, upstreamZoneServer.Responses.Status1xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses2xxDataPoint( + now, upstreamZoneServer.Responses.Status2xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses3xxDataPoint( + now, upstreamZoneServer.Responses.Status3xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses4xxDataPoint( + now, upstreamZoneServer.Responses.Status4xx, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersResponses5xxDataPoint( + now, upstreamZoneServer.Responses.Status5xx, upstreamZoneName, upstreamZoneServer.Server, + ) + + r.mb.RecordNginxUpstreamPeersWeightDataPoint( + now, upstreamZoneServer.Weight, upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersBackupDataPoint( + now, int64(boolToInt(upstreamZoneServer.Backup)), upstreamZoneName, upstreamZoneServer.Server, + ) + r.mb.RecordNginxUpstreamPeersHealthChecksLastPassedDataPoint( + now, int64(boolToInt(upstreamZoneServer.Down)), upstreamZoneName, upstreamZoneServer.Server, + ) + } + } +} + +func (r *nginxScraper) recordVtsServerZoneTrafficStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + for serverZoneName, serverZone := range vtsStats.ServerZones { + r.mb.RecordNginxServerZoneSentDataPoint(now, serverZone.OutBytes, serverZoneName) + r.mb.RecordNginxServerZoneReceivedDataPoint(now, serverZone.InBytes, serverZoneName) + } +} + +func (r *nginxScraper) recordVtsServerZoneResponseStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + for serverZoneName, serverZone := range vtsStats.ServerZones { + r.mb.RecordNginxServerZoneResponses1xxDataPoint( + now, serverZone.Responses.Status1xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses2xxDataPoint( + now, serverZone.Responses.Status2xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses3xxDataPoint( + now, serverZone.Responses.Status3xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses4xxDataPoint( + now, serverZone.Responses.Status4xx, serverZoneName, + ) + + r.mb.RecordNginxServerZoneResponses5xxDataPoint( + now, serverZone.Responses.Status5xx, serverZoneName, + ) + } +} + +func (r *nginxScraper) recordVtsConnectionStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + r.mb.RecordNginxNetReadingDataPoint(now, vtsStats.Connections.Reading) + r.mb.RecordNginxNetWritingDataPoint(now, vtsStats.Connections.Writing) + r.mb.RecordNginxNetWaitingDataPoint(now, vtsStats.Connections.Waiting) +} + +func (r *nginxScraper) recordTimingStats(now pcommon.Timestamp, vtsStats *NginxVtsStatus) { + + for upstreamZones, v := range vtsStats.UpstreamZones { + for _, val := range v { + + r.mb.RecordNginxUpstreamPeersResponseTimeDataPoint( + now, val.ResponseMsec, upstreamZones, val.Server, + ) + } + } +} + +func boolToInt(bitSet bool) int8 { + var bitSetVar int8 + if bitSet { + bitSetVar = 1 + } + return bitSetVar +} diff --git a/receiver/nginxreceiver/vts_stats.go b/receiver/nginxreceiver/vts_stats.go new file mode 100644 index 000000000000..52eec4f9aa91 --- /dev/null +++ b/receiver/nginxreceiver/vts_stats.go @@ -0,0 +1,174 @@ +package nginxreceiver + +import ( + "encoding/json" + "fmt" + "io" +) + +type NginxVtsStatus struct { + HostName string `json:"hostName"` + ModuleVersion string `json:"moduleVersion"` + NginxVersion string `json:"nginxVersion"` + LoadMsec int64 `json:"loadMsec"` + NowMsec int64 `json:"nowMsec"` + Connections Connections `json:"connections"` + SharedZones SharedZones `json:"sharedZones"` + ServerZones ServerZones `json:"serverZones"` + FilterZones FilterZones `json:"filterZones"` + UpstreamZones UpstreamZones `json:"upstreamZones"` + CacheZones CacheZones `json:"cacheZones"` +} + +type Connections struct { + Active int64 `json:"active"` + Reading int64 `json:"reading"` + Writing int64 `json:"writing"` + Waiting int64 `json:"waiting"` + Accepted int64 `json:"accepted"` + Handled int64 `json:"handled"` + Requests int64 `json:"requests"` +} + +type SharedZones struct { + Name string `json:"name"` + MaxSize int64 `json:"maxSize"` + UsedSize int64 `json:"usedSize"` + UsedNode int64 `json:"usedNode"` +} + +type Responses struct { + Status1xx int64 `json:"1xx"` + Status2xx int64 `json:"2xx"` + Status3xx int64 `json:"3xx"` + Status4xx int64 `json:"4xx"` + Status5xx int64 `json:"5xx"` + Miss int64 `json:"miss"` + Bypass int64 `json:"bypass"` + Expired int64 `json:"expired"` + Stale int64 `json:"stale"` + Updating int64 `json:"updating"` + Revalidated int64 `json:"revalidated"` + Hit int64 `json:"hit"` + Scarce int64 `json:"scarce"` +} + +type RequestMetrics struct { + Times []int64 `json:"times"` + Msecs []int64 `json:"msecs"` +} + +type RequestBuckets struct { + Msecs []int64 `json:"msecs"` + Counters []int64 `json:"counters"` +} + +type ZoneStats struct { + RequestCounter int64 `json:"requestCounter"` + InBytes int64 `json:"inBytes"` + OutBytes int64 `json:"outBytes"` + Responses Responses `json:"responses"` + RequestMsecCounter int64 `json:"requestMsecCounter"` + RequestMsec int64 `json:"requestMsec"` + RequestMsecs RequestMetrics `json:"requestMsecs"` + RequestBuckets RequestBuckets `json:"requestBuckets"` +} + +type ServerZones map[string]ZoneStats + +type FilterZones map[string]map[string]ZoneStats + +type UpstreamServer struct { + ZoneStats + Server string `json:"server"` + ResponseMsecCounter int64 `json:"responseMsecCounter"` + ResponseMsec int64 `json:"responseMsec"` + ResponseMsecs RequestMetrics `json:"responseMsecs"` + ResponseBuckets RequestBuckets `json:"responseBuckets"` + Weight float64 `json:"weight"` + MaxFails int `json:"maxFails"` + FailTimeout int `json:"failTimeout"` + Backup bool `json:"backup"` + Down bool `json:"down"` +} + +type UpstreamZones map[string][]UpstreamServer + +type CacheZoneStats struct { + MaxSize int64 `json:"maxSize"` + UsedSize int64 `json:"usedSize"` + InBytes int64 `json:"inBytes"` + OutBytes int64 `json:"outBytes"` + Responses Responses `json:"responses"` +} + +type CacheZones map[string]CacheZoneStats + +func ParseVtsStats(r io.Reader) (*NginxVtsStatus, error) { + decoder := json.NewDecoder(r) + + // Create a map to store raw JSON first + var rawData map[string]interface{} + if err := decoder.Decode(&rawData); err != nil { + return nil, fmt.Errorf("failed to decode JSON: %w", err) + } + + // Marshal back to JSON bytes to ensure proper handling of numeric types + jsonBytes, err := json.Marshal(rawData) + if err != nil { + return nil, fmt.Errorf("failed to marshal intermediate JSON: %w", err) + } + + // Unmarshal into our structured type + var stats NginxVtsStatus + if err := json.Unmarshal(jsonBytes, &stats); err != nil { + return nil, fmt.Errorf("failed to unmarshal into NginxStatus: %w", err) + } + + // Validate required fields + if err := validateStats(&stats); err != nil { + return nil, fmt.Errorf("stats validation failed: %w", err) + } + + return &stats, nil +} + +func validateStats(stats *NginxVtsStatus) error { + if stats == nil { + return fmt.Errorf("stats cannot be nil") + } + + // Validate required string fields + if stats.HostName == "" { + return fmt.Errorf("hostName is required") + } + if stats.ModuleVersion == "" { + return fmt.Errorf("moduleVersion is required") + } + if stats.NginxVersion == "" { + return fmt.Errorf("nginxVersion is required") + } + + // Validate time fields + if stats.LoadMsec <= 0 { + return fmt.Errorf("loadMsec must be positive") + } + if stats.NowMsec <= 0 { + return fmt.Errorf("nowMsec must be positive") + } + + // Validate connections + if stats.Connections.Handled < 0 || + stats.Connections.Accepted < 0 || + stats.Connections.Active < 0 || + stats.Connections.Requests < 0 { + return fmt.Errorf("connection counts cannot be negative") + } + + // Basic validation of zones + if len(stats.ServerZones) == 0 { + return fmt.Errorf("serverZones cannot be empty") + } + + return nil +}