diff --git a/.chloggen/datadog-connector-memory-issue.yaml b/.chloggen/datadog-connector-memory-issue.yaml new file mode 100644 index 000000000000..faeca962e373 --- /dev/null +++ b/.chloggen/datadog-connector-memory-issue.yaml @@ -0,0 +1,18 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: "bug_fix" +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: "datadogconnector" +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add feature flag to address memory issue with Datadog Connector +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [29755] +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/deprecate_configschema.yaml b/.chloggen/deprecate_configschema.yaml new file mode 100755 index 000000000000..4ceb0f2a8fb4 --- /dev/null +++ b/.chloggen/deprecate_configschema.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: deprecation + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: configschema + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Deprecating configschema to prefer generating documentation as part of its metadata generation with mdatagen + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30187] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/rmtanzu.yaml b/.chloggen/rmtanzu.yaml new file mode 100755 index 000000000000..ed07c164dc26 --- /dev/null +++ b/.chloggen/rmtanzu.yaml @@ -0,0 +1,22 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'breaking' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: "tanzuexporter" + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove tanzuexporter, user can still use versions 0.91. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30184] + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/sparkmarshaluperror.yaml b/.chloggen/sparkmarshaluperror.yaml new file mode 100755 index 000000000000..5c9c21afde8b --- /dev/null +++ b/.chloggen/sparkmarshaluperror.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: apachesparkreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: propagate application list errors to reveal underlying issue + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30278] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/support_empty_values.yaml b/.chloggen/support_empty_values.yaml new file mode 100755 index 000000000000..5f4026389ae5 --- /dev/null +++ b/.chloggen/support_empty_values.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: haproxyreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Support empty values in haproxy stats. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30252] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/swap-mssqldriver.yaml b/.chloggen/swap-mssqldriver.yaml new file mode 100644 index 000000000000..c76c7f0865e3 --- /dev/null +++ b/.chloggen/swap-mssqldriver.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: sqlqueryreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Swap MS SQL Server driver from legacy 'denisenkom' to official Microsoft fork + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [27200] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/time-zulu.yaml b/.chloggen/time-zulu.yaml new file mode 100755 index 000000000000..afc01ce8e83a --- /dev/null +++ b/.chloggen/time-zulu.yaml @@ -0,0 +1,33 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: time + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: The `%z` strptime format now correctly parses `Z` as a valid timezone + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: +- 29929 + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + `strptime(3)` says that `%z` is "an RFC-822/ISO 8601 standard + timezone specification", but the previous code did not allow the + string "Z" to signify UTC time, as required by ISO 8601. Now, both + `+0000` and `Z` are recognized as UTC times in all components that + handle `strptime` format strings. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/use_testing_tb_golden.yaml b/.chloggen/use_testing_tb_golden.yaml new file mode 100755 index 000000000000..3c085a7dc4c1 --- /dev/null +++ b/.chloggen/use_testing_tb_golden.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: golden + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Use testing.TB for golden.WriteMetrics, golden.WriteTraces and golden.WriteLogs over *testing.T + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30277] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0d72f397dcc4..afc4fc338739 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -78,7 +78,6 @@ exporter/skywalkingexporter/ @open-telemetry/collect exporter/splunkhecexporter/ @open-telemetry/collector-contrib-approvers @atoulme @dmitryax exporter/sumologicexporter/ @open-telemetry/collector-contrib-approvers @sumo-drosiek exporter/syslogexporter/ @open-telemetry/collector-contrib-approvers @kkujawa-sumo @rnishtala-sumo @astencel-sumo -exporter/tanzuobservabilityexporter/ @open-telemetry/collector-contrib-approvers @oppegard @thepeterstone @keep94 exporter/tencentcloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @wgliang @yiyang5055 exporter/zipkinexporter/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @astencel-sumo @crobert-1 @@ -159,7 +158,7 @@ processor/logstransformprocessor/ @open-telemetry/collect processor/metricsgenerationprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 processor/metricstransformprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax processor/probabilisticsamplerprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling -processor/redactionprocessor/ @open-telemetry/collector-contrib-approvers @leonsp-ai @dmitryax @mx-psi @TylerHelmuth +processor/redactionprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax @mx-psi @TylerHelmuth processor/remotetapprocessor/ @open-telemetry/collector-contrib-approvers @pmcollins processor/resourcedetectionprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole processor/resourcedetectionprocessor/internal/azure/ @open-telemetry/collector-contrib-approvers @mx-psi @@ -192,7 +191,7 @@ receiver/bigipreceiver/ @open-telemetry/collect receiver/carbonreceiver/ @open-telemetry/collector-contrib-approvers @aboguszewski-sumo receiver/chronyreceiver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @jamesmoessis receiver/cloudflarereceiver/ @open-telemetry/collector-contrib-approvers @dehaansa @djaglowski -receiver/cloudfoundryreceiver/ @open-telemetry/collector-contrib-approvers @agoallikmaa @pellared @crobert-1 +receiver/cloudfoundryreceiver/ @open-telemetry/collector-contrib-approvers @pellared @crobert-1 receiver/collectdreceiver/ @open-telemetry/collector-contrib-approvers @atoulme receiver/couchdbreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski receiver/datadogreceiver/ @open-telemetry/collector-contrib-approvers @boostchicken @gouthamve @jpkrohling @MovieStoreGuy diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index d4f5549e32a4..016025230dc5 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -78,7 +78,6 @@ body: - exporter/splunkhec - exporter/sumologic - exporter/syslog - - exporter/tanzuobservability - exporter/tencentcloudlogservice - exporter/zipkin - extension/asapauth diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index a3682024b350..84f9e10ab81f 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -72,7 +72,6 @@ body: - exporter/splunkhec - exporter/sumologic - exporter/syslog - - exporter/tanzuobservability - exporter/tencentcloudlogservice - exporter/zipkin - extension/asapauth diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index 0f5dc889ad00..e6f57a20ed0f 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -72,7 +72,6 @@ body: - exporter/splunkhec - exporter/sumologic - exporter/syslog - - exporter/tanzuobservability - exporter/tencentcloudlogservice - exporter/zipkin - extension/asapauth diff --git a/.golangci.yml b/.golangci.yml index 8692ebe9e1ea..16fb19e4b5b3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -170,3 +170,6 @@ issues: - text: "G402:" linters: - gosec + - text: "SA1019: \"github.com/open-telemetry/opentelemetry-collector-contrib/cmd/configschema" + linters: + - staticcheck diff --git a/cmd/configschema/README.md b/cmd/configschema/README.md index 6031b2d628fd..16b99d0f2a93 100644 --- a/cmd/configschema/README.md +++ b/cmd/configschema/README.md @@ -1,3 +1,6 @@ +> Deprecated: [v0.92.0] This tool is deprecated and will be removed in a future release. +> See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 + # ConfigSchema API This package contains an API that can be used to introspect the configuration diff --git a/cmd/configschema/cfgmetadatagen/cfgmetadatagen/README.md b/cmd/configschema/cfgmetadatagen/cfgmetadatagen/README.md index 5886f1b84193..c3bc43745f84 100644 --- a/cmd/configschema/cfgmetadatagen/cfgmetadatagen/README.md +++ b/cmd/configschema/cfgmetadatagen/cfgmetadatagen/README.md @@ -1,3 +1,6 @@ +> Deprecated: [v0.92.0] This tool is deprecated and will be removed in a future release. +> See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 + # Config Metadata YAML Generator (alpha) This CLI application creates a configuration metadata YAML file for each diff --git a/cmd/configschema/cfgmetadatagen/cfgmetadatagen/cli.go b/cmd/configschema/cfgmetadatagen/cfgmetadatagen/cli.go index 426c3f7394a5..bf4142954d83 100644 --- a/cmd/configschema/cfgmetadatagen/cfgmetadatagen/cli.go +++ b/cmd/configschema/cfgmetadatagen/cfgmetadatagen/cli.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 package cfgmetadatagen import ( @@ -15,6 +17,8 @@ import ( // GenerateFiles is the entry point for cfgmetadatagen. Component factories are // passed in so it can be used by other distros. +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 func GenerateFiles(factories otelcol.Factories, sourceDir string, outputDir string) error { dr := configschema.NewDirResolver(sourceDir, configschema.DefaultModule) writer := newMetadataFileWriter(outputDir) diff --git a/cmd/configschema/cfgmetadatagen/main.go b/cmd/configschema/cfgmetadatagen/main.go index 3efc76ed03d5..e461b4fe7b05 100644 --- a/cmd/configschema/cfgmetadatagen/main.go +++ b/cmd/configschema/cfgmetadatagen/main.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 package main import ( @@ -14,6 +16,7 @@ import ( ) func main() { + sourceDir, outputDir := getFlags() c, err := components.Components() if err != nil { diff --git a/cmd/configschema/comments.go b/cmd/configschema/comments.go index c42da8a3338b..773f04780a87 100644 --- a/cmd/configschema/comments.go +++ b/cmd/configschema/comments.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 package configschema // import "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/configschema" import ( diff --git a/cmd/configschema/docsgen/README.md b/cmd/configschema/docsgen/README.md index 61365fb696e4..7b342699d92c 100644 --- a/cmd/configschema/docsgen/README.md +++ b/cmd/configschema/docsgen/README.md @@ -1,3 +1,6 @@ +> Deprecated: [v0.92.0] This tool is deprecated and will be removed in a future release. +> See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 + # Docsgen CLI Tool This package contains a CLI tool that generates markdown files for collector diff --git a/cmd/configschema/docsgen/docsgen/cli.go b/cmd/configschema/docsgen/docsgen/cli.go index f50af0fe9d05..5c3069c804c9 100644 --- a/cmd/configschema/docsgen/docsgen/cli.go +++ b/cmd/configschema/docsgen/docsgen/cli.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 package docsgen // import "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/configschema/docsgen/docsgen" import ( @@ -23,6 +25,8 @@ const mdFileName = "config.md" // CLI is the entrypoint for this package's functionality. It handles command- // line arguments for the docsgen executable and produces config documentation // for the specified components. +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 func CLI(factories otelcol.Factories, dr configschema.DirResolver) { tableTmpl, err := tableTemplate() if err != nil { diff --git a/cmd/configschema/docsgen/main.go b/cmd/configschema/docsgen/main.go index 03eb1705523c..2d1495cccc84 100644 --- a/cmd/configschema/docsgen/main.go +++ b/cmd/configschema/docsgen/main.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 package main import ( diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index ee25f5a443f9..077f13b757cd 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -1,3 +1,5 @@ +// Deprecated: [v0.92.0] This package is deprecated and will be removed in a future release. +// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30187 module github.com/open-telemetry/opentelemetry-collector-contrib/cmd/configschema go 1.20 @@ -283,7 +285,6 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/caio/go-tdigest/v4 v4.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -303,7 +304,6 @@ require ( github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/denisenkom/go-mssqldb v0.12.3 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/devigned/tab v0.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -313,7 +313,7 @@ require ( github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 // indirect github.com/eapache/go-resiliency v1.4.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect @@ -356,7 +356,7 @@ require ( github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect @@ -418,6 +418,7 @@ require ( github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -452,6 +453,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.19 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 // indirect + github.com/microsoft/go-mssqldb v1.6.0 // indirect github.com/miekg/dns v1.1.56 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect @@ -489,7 +491,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opensearchexporter v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.91.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/extension/asapauthextension v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.91.0 // indirect @@ -613,7 +614,6 @@ require ( github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e // indirect github.com/vmware/govmomi v0.34.1 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect - github.com/wavefronthq/wavefront-sdk-go v0.15.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect @@ -707,7 +707,7 @@ require ( k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kubelet v0.28.4 // indirect - k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect sigs.k8s.io/controller-runtime v0.16.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect @@ -830,8 +830,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splun replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter => ../../exporter/sumologicexporter -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter => ../../exporter/tanzuobservabilityexporter - replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter => ../../exporter/tencentcloudlogserviceexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter => ../../exporter/zipkinexporter diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index e1bac99e009c..41a36f526283 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -92,13 +92,10 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= @@ -114,6 +111,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 h1:yfJe15aSwEQ6Oo6J+gdfdulPNoZ3TEhmbhLIoxZcA+U= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= github.com/Azure/azure-storage-queue-go v0.0.0-20230531184854-c06a8eff66fe h1:HGuouUM1533rBXmMtR7qh5pYNSSjUZG90b/MgJAnb/A= @@ -408,8 +407,6 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/caio/go-tdigest/v4 v4.0.1 h1:sx4ZxjmIEcLROUPs2j1BGe2WhOtHD6VSe6NNbBdKYh4= -github.com/caio/go-tdigest/v4 v4.0.1/go.mod h1:Wsa+f0EZnV2gShdj1adgl0tQSoXRxtM0QioTgukFw8U= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -494,8 +491,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= -github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= @@ -511,7 +506,6 @@ github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TR github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= @@ -526,8 +520,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 h1:wHGPJSXvwKQVf/XfhjUPyrhpcPKWNy8F3ikH+eiwoBg= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0/go.mod h1:PseHFo8Leko7J4A/TfZ6kkHdkzKBLUta6hRZR/OEbbc= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -692,8 +686,8 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -812,7 +806,9 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -1070,7 +1066,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= @@ -1138,6 +1133,8 @@ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQth github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -1184,7 +1181,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mongodb-forks/digest v1.0.5 h1:EJu3wtLZcA0HCvsZpX5yuD193/sW9tHiNvrEM5apXMk= github.com/mongodb-forks/digest v1.0.5/go.mod h1:rb+EX8zotClD5Dj4NdgxnJXG9nwrlx3NWKJ8xttz1Dg= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -1302,7 +1298,6 @@ github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1552,8 +1547,6 @@ github.com/vmware/govmomi v0.34.1 h1:Hqu2Uke2itC+cNoIcFQBLEZvX9wBRTTOP04J7V1fqRw github.com/vmware/govmomi v0.34.1/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/wavefronthq/wavefront-sdk-go v0.15.0 h1:po9E3vh/0y7kOx8D9EtFp7kbSLLLKbmu/w/s1xGJAQU= -github.com/wavefronthq/wavefront-sdk-go v0.15.0/go.mod h1:V72c8e+bXuLK8HpA6ioW0ll5mK9IPD+4IHNNDY75ksA= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= @@ -1752,7 +1745,6 @@ golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1861,7 +1853,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -2339,8 +2330,8 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4Va k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw= k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/cmd/githubgen/allowlist.txt b/cmd/githubgen/allowlist.txt index 834669e83c64..b8d17b2f4365 100644 --- a/cmd/githubgen/allowlist.txt +++ b/cmd/githubgen/allowlist.txt @@ -1,21 +1,15 @@ Caleb-Hurshman -Doron-Bargo MaxKsyunz MitchellGale YANG-DB -agoallikmaa architjugran asaharn billmeyer emreyalvac -keep94 kiranmayib -leonsp-ai liqiangz -oded-dd shaochengwang svrakitin -thepeterstone yiyang5055 am-kinetica mcube8 diff --git a/cmd/githubgen/codeowners.go b/cmd/githubgen/codeowners.go index 668513631ea1..5eea67cb63e1 100644 --- a/cmd/githubgen/codeowners.go +++ b/cmd/githubgen/codeowners.go @@ -81,8 +81,13 @@ func (cg codeownersGenerator) generate(data *githubData) error { allowlistLines := strings.Split(string(allowlistData), "\n") allowlist := make(map[string]struct{}, len(allowlistLines)) + unusedAllowlist := make(map[string]struct{}, len(allowlistLines)) for _, line := range allowlistLines { + if line == "" { + continue + } allowlist[line] = struct{}{} + unusedAllowlist[line] = struct{}{} } var missingCodeowners []string var duplicateCodeowners []string @@ -95,6 +100,7 @@ func (cg codeownersGenerator) generate(data *githubData) error { if !present { _, allowed := allowlist[codeowner] + delete(unusedAllowlist, codeowner) allowed = allowed || strings.HasPrefix(codeowner, "open-telemetry/") if !allowed { missingCodeowners = append(missingCodeowners, codeowner) @@ -111,6 +117,14 @@ func (cg codeownersGenerator) generate(data *githubData) error { sort.Strings(duplicateCodeowners) return fmt.Errorf("codeowners members duplicate in allowlist: %s", strings.Join(duplicateCodeowners, ", ")) } + if len(unusedAllowlist) > 0 { + var unused []string + for k := range unusedAllowlist { + unused = append(unused, k) + } + sort.Strings(unused) + return fmt.Errorf("unused members in allowlist: %s", strings.Join(unused, ", ")) + } codeowners := codeownersHeader deprecatedList := "## DEPRECATED components\n" diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index e6701623ea4c..3acecaaec0a5 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -79,7 +79,6 @@ exporters: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.91.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter v0.91.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter v0.91.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter v0.91.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter v0.91.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.91.0 @@ -238,7 +237,6 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy => ../../extension/awsproxy - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin => ../../pkg/translator/zipkin - github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor => ../../processor/groupbyattrsprocessor - - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter => ../../exporter/tanzuobservabilityexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/skywalkingexporter => ../../exporter/skywalkingexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/mezmoexporter => ../../exporter/mezmoexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/clickhouseexporter => ../../exporter/clickhouseexporter diff --git a/cmd/otelcontribcol/components.go b/cmd/otelcontribcol/components.go index b889e8a1b727..6d231e2960d6 100644 --- a/cmd/otelcontribcol/components.go +++ b/cmd/otelcontribcol/components.go @@ -68,7 +68,6 @@ import ( splunkhecexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter" sumologicexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter" syslogexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter" - tanzuobservabilityexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" tencentcloudlogserviceexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter" zipkinexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" asapauthextension "github.com/open-telemetry/opentelemetry-collector-contrib/extension/asapauthextension" @@ -375,7 +374,6 @@ func components() (otelcol.Factories, error) { splunkhecexporter.NewFactory(), sumologicexporter.NewFactory(), syslogexporter.NewFactory(), - tanzuobservabilityexporter.NewFactory(), tencentcloudlogserviceexporter.NewFactory(), zipkinexporter.NewFactory(), ) diff --git a/cmd/otelcontribcol/exporters_test.go b/cmd/otelcontribcol/exporters_test.go index 456851a82c88..5fad65755f39 100644 --- a/cmd/otelcontribcol/exporters_test.go +++ b/cmd/otelcontribcol/exporters_test.go @@ -59,7 +59,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter" - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" @@ -565,19 +564,6 @@ func TestDefaultExporters(t *testing.T) { }, expectConsumeErr: true, }, - { - exporter: "tanzuobservability", - getConfigFn: func() component.Config { - cfg := expFactories["tanzuobservability"].CreateDefaultConfig().(*tanzuobservabilityexporter.Config) - cfg.Traces.Endpoint = "http://" + endpoint - cfg.Metrics.Endpoint = "http://" + endpoint - // disable queue to validate passing the test data synchronously - cfg.QueueSettings.Enabled = false - cfg.RetrySettings.Enabled = false - return cfg - }, - expectConsumeErr: true, - }, { exporter: "tencentcloud_logservice", getConfigFn: func() component.Config { diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index be21dbe49d3f..c126769c9acd 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -53,7 +53,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter v0.91.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/asapauthextension v0.91.0 @@ -330,7 +329,6 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/caio/go-tdigest/v4 v4.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -350,7 +348,6 @@ require ( github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/denisenkom/go-mssqldb v0.12.3 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/devigned/tab v0.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -360,7 +357,7 @@ require ( github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 // indirect github.com/eapache/go-resiliency v1.4.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect @@ -404,7 +401,7 @@ require ( github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect @@ -468,6 +465,7 @@ require ( github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -502,6 +500,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.19 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 // indirect + github.com/microsoft/go-mssqldb v1.6.0 // indirect github.com/miekg/dns v1.1.56 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect @@ -637,7 +636,6 @@ require ( github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e // indirect github.com/vmware/govmomi v0.34.1 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect - github.com/wavefronthq/wavefront-sdk-go v0.15.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect @@ -717,7 +715,7 @@ require ( k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kubelet v0.28.4 // indirect - k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect sigs.k8s.io/controller-runtime v0.16.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect @@ -785,8 +783,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor => ../../processor/groupbyattrsprocessor -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter => ../../exporter/tanzuobservabilityexporter - replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/skywalkingexporter => ../../exporter/skywalkingexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/mezmoexporter => ../../exporter/mezmoexporter diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index d6f3af49ec56..a00626364b40 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -91,13 +91,10 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= @@ -113,6 +110,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 h1:yfJe15aSwEQ6Oo6J+gdfdulPNoZ3TEhmbhLIoxZcA+U= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= github.com/Azure/azure-storage-queue-go v0.0.0-20230531184854-c06a8eff66fe h1:HGuouUM1533rBXmMtR7qh5pYNSSjUZG90b/MgJAnb/A= @@ -407,8 +406,6 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/caio/go-tdigest/v4 v4.0.1 h1:sx4ZxjmIEcLROUPs2j1BGe2WhOtHD6VSe6NNbBdKYh4= -github.com/caio/go-tdigest/v4 v4.0.1/go.mod h1:Wsa+f0EZnV2gShdj1adgl0tQSoXRxtM0QioTgukFw8U= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -493,8 +490,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= -github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= @@ -510,7 +505,6 @@ github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TR github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= @@ -525,8 +519,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 h1:wHGPJSXvwKQVf/XfhjUPyrhpcPKWNy8F3ikH+eiwoBg= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0/go.mod h1:PseHFo8Leko7J4A/TfZ6kkHdkzKBLUta6hRZR/OEbbc= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -689,8 +683,8 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -809,7 +803,9 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -1067,7 +1063,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/leoluk/perflib_exporter v0.2.1 h1:/3/ut1k/jFt5p4ypjLZKDHDqlXAK6ERZPVWtwdI389I= @@ -1134,6 +1129,8 @@ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQth github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -1180,7 +1177,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mongodb-forks/digest v1.0.5 h1:EJu3wtLZcA0HCvsZpX5yuD193/sW9tHiNvrEM5apXMk= github.com/mongodb-forks/digest v1.0.5/go.mod h1:rb+EX8zotClD5Dj4NdgxnJXG9nwrlx3NWKJ8xttz1Dg= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -1303,7 +1299,6 @@ github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1551,8 +1546,6 @@ github.com/vmware/govmomi v0.34.1 h1:Hqu2Uke2itC+cNoIcFQBLEZvX9wBRTTOP04J7V1fqRw github.com/vmware/govmomi v0.34.1/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/wavefronthq/wavefront-sdk-go v0.15.0 h1:po9E3vh/0y7kOx8D9EtFp7kbSLLLKbmu/w/s1xGJAQU= -github.com/wavefronthq/wavefront-sdk-go v0.15.0/go.mod h1:V72c8e+bXuLK8HpA6ioW0ll5mK9IPD+4IHNNDY75ksA= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= @@ -1751,7 +1744,6 @@ golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1860,7 +1852,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -2339,8 +2330,8 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4Va k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw= k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/connector/datadogconnector/README.md b/connector/datadogconnector/README.md index a1aae79cddbd..14e144da72bc 100644 --- a/connector/datadogconnector/README.md +++ b/connector/datadogconnector/README.md @@ -104,4 +104,9 @@ service: ``` -Here we have two traces pipelines that ingest the same data but one is being sampled. The one that is sampled has its data sent to the datadog backend for you to see the sampled subset of the total traces sent across. The other non-sampled pipeline of traces sends its data to the metrics pipeline to be used in the APM stats. This unsampled pipeline gives the full picture of how much data the application emits in traces. +Here we have two traces pipelines that ingest the same data but one is being sampled. The one that is sampled has its data sent to the datadog backend for you to see the sampled subset of the total traces sent across. The other non-sampled pipeline of traces sends its data to the metrics pipeline to be used in the APM stats. This unsampled pipeline gives the full picture of how much data the application emits in traces. + +## Feature Gate for Performance + +In case you are experiencing high memory usage with Datadog Connector, similar to [issue](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29755), use the feature gate `connector.datadogconnector.performance`. With the feature gate enabled, Datadog Connector takes OTLP traces and produces OTLP metric with the name `dd.internal.stats.payload`. This Metric has an attribute `dd.internal.stats.payload` that contains the bytes for StatsPayload. With the feature gate, we can use Datadog Connector only in conjunction with Datadog Exporter. Please enable the feature only if needed for performance reasons and higher throughput. Enable the feature gate on all collectors (especially in gateway deployment) in the pipeline that sends data to Datadog. We plan to refactor this component in the future so that the signals produced are usable in any metrics pipeline. + diff --git a/connector/datadogconnector/connector.go b/connector/datadogconnector/connector.go index a5bb5026a078..0309fa128888 100644 --- a/connector/datadogconnector/connector.go +++ b/connector/datadogconnector/connector.go @@ -12,6 +12,7 @@ import ( "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/otel/metric/noop" "go.uber.org/zap" @@ -113,8 +114,19 @@ func (c *connectorImp) run() { if len(stats.Stats) == 0 { continue } + var mx pmetric.Metrics + var err error + if datadog.ConnectorPerformanceFeatureGate.IsEnabled() { + c.logger.Debug("Received stats payload", zap.Any("stats", stats)) + mx, err = c.translator.StatsToMetrics(stats) + if err != nil { + c.logger.Error("Failed to convert stats to metrics", zap.Error(err)) + continue + } + } else { + mx = c.translator.StatsPayloadToMetrics(stats) + } // APM stats as metrics - mx := c.translator.StatsPayloadToMetrics(stats) ctx := context.TODO() // send metrics to the consumer or next component in pipeline diff --git a/exporter/awss3exporter/README.md b/exporter/awss3exporter/README.md index 05911cb538e6..93a90b4d0141 100644 --- a/exporter/awss3exporter/README.md +++ b/exporter/awss3exporter/README.md @@ -4,13 +4,14 @@ | Status | | | ------------- |-----------| | Stability | [alpha]: traces, metrics, logs | -| Distributions | [contrib], [observiq], [sumo] | +| Distributions | [contrib], [observiq], [splunk], [sumo] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fawss3%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fawss3) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fawss3%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fawss3) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@atoulme](https://www.github.com/atoulme), [@pdelewski](https://www.github.com/pdelewski) | [alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib [observiq]: https://github.com/observIQ/observiq-otel-collector +[splunk]: https://github.com/signalfx/splunk-otel-collector [sumo]: https://github.com/SumoLogic/sumologic-otel-collector diff --git a/exporter/awss3exporter/metadata.yaml b/exporter/awss3exporter/metadata.yaml index a0f95778f998..6e419a23de2b 100644 --- a/exporter/awss3exporter/metadata.yaml +++ b/exporter/awss3exporter/metadata.yaml @@ -4,7 +4,7 @@ status: class: exporter stability: alpha: [traces, metrics, logs] - distributions: [contrib, observiq, sumo] + distributions: [contrib, observiq, sumo, splunk] codeowners: active: [atoulme, pdelewski] tests: diff --git a/exporter/datadogexporter/factory.go b/exporter/datadogexporter/factory.go index 6b5f94f8964f..df3a05291602 100644 --- a/exporter/datadogexporter/factory.go +++ b/exporter/datadogexporter/factory.go @@ -6,9 +6,11 @@ package datadogexporter // import "github.com/open-telemetry/opentelemetry-colle import ( "context" "fmt" + "runtime" "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/agent" "github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" @@ -25,13 +27,15 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/otel/metric/noop" "go.uber.org/zap" + "google.golang.org/protobuf/proto" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" ) -var mertricExportNativeClientFeatureGate = featuregate.GlobalRegistry().MustRegister( +var metricExportNativeClientFeatureGate = featuregate.GlobalRegistry().MustRegister( "exporter.datadogexporter.metricexportnativeclient", featuregate.StageBeta, featuregate.WithRegisterDescription("When enabled, metric export in datadogexporter uses native Datadog client APIs instead of Zorkian APIs."), @@ -46,17 +50,17 @@ var noAPMStatsFeatureGate = featuregate.GlobalRegistry().MustRegister( // isMetricExportV2Enabled returns true if metric export in datadogexporter uses native Datadog client APIs, false if it uses Zorkian APIs func isMetricExportV2Enabled() bool { - return mertricExportNativeClientFeatureGate.IsEnabled() + return metricExportNativeClientFeatureGate.IsEnabled() } // enableNativeMetricExport switches metric export to call native Datadog APIs instead of Zorkian APIs. func enableNativeMetricExport() error { - return featuregate.GlobalRegistry().Set(mertricExportNativeClientFeatureGate.ID(), true) + return featuregate.GlobalRegistry().Set(metricExportNativeClientFeatureGate.ID(), true) } // enableZorkianMetricExport switches metric export to call Zorkian APIs instead of native Datadog APIs. func enableZorkianMetricExport() error { - return featuregate.GlobalRegistry().Set(mertricExportNativeClientFeatureGate.ID(), false) + return featuregate.GlobalRegistry().Set(metricExportNativeClientFeatureGate.ID(), false) } const metadataReporterPeriod = 30 * time.Minute @@ -232,6 +236,32 @@ func checkAndCastConfig(c component.Config, logger *zap.Logger) *Config { return cfg } +func (f *factory) consumeStatsPayload(ctx context.Context, out chan []byte, traceagent *agent.Agent, tracerVersion string, logger *zap.Logger) { + for i := 0; i < runtime.NumCPU(); i++ { + f.wg.Add(1) + go func() { + defer f.wg.Done() + for { + select { + case <-ctx.Done(): + return + case msg := <-out: + sp := &pb.StatsPayload{} + + err := proto.Unmarshal(msg, sp) + if err != nil { + logger.Error("failed to unmarshal stats payload", zap.Error(err)) + continue + } + for _, sc := range sp.Stats { + traceagent.ProcessStats(sc, "", tracerVersion) + } + } + } + }() + } +} + // createMetricsExporter creates a metrics exporter based on this config. func (f *factory) createMetricsExporter( ctx context.Context, @@ -239,7 +269,6 @@ func (f *factory) createMetricsExporter( c component.Config, ) (exporter.Metrics, error) { cfg := checkAndCastConfig(c, set.TelemetrySettings.Logger) - hostProvider, err := f.SourceProvider(set.TelemetrySettings, cfg.Hostname) if err != nil { return nil, fmt.Errorf("failed to build hostname provider: %w", err) @@ -253,7 +282,12 @@ func (f *factory) createMetricsExporter( cancel() return nil, fmt.Errorf("failed to start trace-agent: %w", err) } - + var statsOut chan []byte + if datadog.ConnectorPerformanceFeatureGate.IsEnabled() { + statsOut = make(chan []byte, 1000) + statsv := set.BuildInfo.Command + set.BuildInfo.Version + f.consumeStatsPayload(ctx, statsOut, traceagent, statsv, set.Logger) + } pcfg := newMetadataConfigfromConfig(cfg) metadataReporter, err := f.Reporter(set, pcfg) if err != nil { @@ -286,7 +320,7 @@ func (f *factory) createMetricsExporter( return nil } } else { - exp, metricsErr := newMetricsExporter(ctx, set, cfg, &f.onceMetadata, attrsTranslator, hostProvider, traceagent, metadataReporter) + exp, metricsErr := newMetricsExporter(ctx, set, cfg, &f.onceMetadata, attrsTranslator, hostProvider, traceagent, metadataReporter, statsOut) if metricsErr != nil { cancel() // first cancel context f.wg.Wait() // then wait for shutdown @@ -310,6 +344,9 @@ func (f *factory) createMetricsExporter( exporterhelper.WithShutdown(func(context.Context) error { cancel() f.StopReporter() + if statsOut != nil { + close(statsOut) + } return nil }), ) diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index 2cb908dbaee6..d011cd9b16af 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -20,6 +20,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.91.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.91.0 @@ -185,7 +186,6 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.91.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.91.0 // indirect diff --git a/exporter/datadogexporter/integrationtest/go.mod b/exporter/datadogexporter/integrationtest/go.mod index c13ba97fcdf8..6eaee37eda48 100644 --- a/exporter/datadogexporter/integrationtest/go.mod +++ b/exporter/datadogexporter/integrationtest/go.mod @@ -6,6 +6,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.50.1 github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.91.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.91.0 github.com/stretchr/testify v1.8.4 github.com/tinylib/msgp v1.1.9 @@ -14,6 +15,7 @@ require ( go.opentelemetry.io/collector/connector v0.91.0 go.opentelemetry.io/collector/exporter v0.91.0 go.opentelemetry.io/collector/exporter/debugexporter v0.91.0 + go.opentelemetry.io/collector/featuregate v1.0.0 go.opentelemetry.io/collector/otelcol v0.91.0 go.opentelemetry.io/collector/processor v0.91.0 go.opentelemetry.io/collector/processor/batchprocessor v0.91.0 @@ -113,7 +115,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.91.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.91.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.91.0 // indirect @@ -157,7 +158,6 @@ require ( go.opentelemetry.io/collector/consumer v0.91.0 // indirect go.opentelemetry.io/collector/extension v0.91.0 // indirect go.opentelemetry.io/collector/extension/auth v0.91.0 // indirect - go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0 // indirect go.opentelemetry.io/collector/semconv v0.91.0 // indirect go.opentelemetry.io/collector/service v0.91.0 // indirect diff --git a/exporter/datadogexporter/integrationtest/integration_test.go b/exporter/datadogexporter/integrationtest/integration_test.go index 12b9199d2ae7..e10f8bc399f9 100644 --- a/exporter/datadogexporter/integrationtest/integration_test.go +++ b/exporter/datadogexporter/integrationtest/integration_test.go @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/debugexporter" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/otelcol" "go.opentelemetry.io/collector/otelcol/otelcoltest" "go.opentelemetry.io/collector/processor" @@ -39,72 +40,97 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/testutil" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" ) func TestIntegration(t *testing.T) { - // 1. Set up mock Datadog server - // See also https://github.com/DataDog/datadog-agent/blob/49c16e0d4deab396626238fa1d572b684475a53f/cmd/trace-agent/test/backend.go - apmstatsRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.APMStatsEndpoint, ReqChan: make(chan []byte)} - tracesRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.TraceEndpoint, ReqChan: make(chan []byte)} - server := testutil.DatadogServerMock(apmstatsRec.HandlerFunc, tracesRec.HandlerFunc) - defer server.Close() + tests := []struct { + name string + featureGateEnabled bool + }{ + { + name: "with feature gate enabled", + featureGateEnabled: true, + }, + { + name: "with feature gate disabled", + featureGateEnabled: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // 1. Set up mock Datadog server + // See also https://github.com/DataDog/datadog-agent/blob/49c16e0d4deab396626238fa1d572b684475a53f/cmd/trace-agent/test/backend.go + apmstatsRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.APMStatsEndpoint, ReqChan: make(chan []byte)} + tracesRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.TraceEndpoint, ReqChan: make(chan []byte)} + server := testutil.DatadogServerMock(apmstatsRec.HandlerFunc, tracesRec.HandlerFunc) + defer server.Close() - // 2. Start in-process collector - factories := getIntegrationTestComponents(t) - app, confFilePath := getIntegrationTestCollector(t, server.URL, factories) - go func() { - assert.NoError(t, app.Run(context.Background())) - }() - defer app.Shutdown() - defer os.Remove(confFilePath) - waitForReadiness(app) + // 2. Start in-process collector + factories := getIntegrationTestComponents(t) + app, confFilePath := getIntegrationTestCollector(t, server.URL, factories) + if tt.featureGateEnabled { + err := featuregate.GlobalRegistry().Set(datadog.ConnectorPerformanceFeatureGate.ID(), true) + assert.NoError(t, err) + defer func() { + _ = featuregate.GlobalRegistry().Set(datadog.ConnectorPerformanceFeatureGate.ID(), false) + }() + } + go func() { + assert.NoError(t, app.Run(context.Background())) + }() + defer app.Shutdown() + defer os.Remove(confFilePath) + waitForReadiness(app) - // 3. Generate and send traces - sendTraces(t) + // 3. Generate and send traces + sendTraces(t) - // 4. Validate traces and APM stats from the mock server - var spans []*pb.Span - var stats []*pb.ClientGroupedStats + // 4. Validate traces and APM stats from the mock server + var spans []*pb.Span + var stats []*pb.ClientGroupedStats - // 5 sampled spans + APM stats on 10 spans are sent to datadog exporter - for len(spans) < 5 || len(stats) < 10 { - select { - case tracesBytes := <-tracesRec.ReqChan: - gz := getGzipReader(t, tracesBytes) - slurp, err := io.ReadAll(gz) - require.NoError(t, err) - var traces pb.AgentPayload - require.NoError(t, proto.Unmarshal(slurp, &traces)) - for _, tps := range traces.TracerPayloads { - for _, chunks := range tps.Chunks { - spans = append(spans, chunks.Spans...) - for _, span := range chunks.Spans { - assert.Equal(t, span.Meta["_dd.stats_computed"], "true") + // 5 sampled spans + APM stats on 10 spans are sent to datadog exporter + for len(spans) < 5 || len(stats) < 10 { + select { + case tracesBytes := <-tracesRec.ReqChan: + gz := getGzipReader(t, tracesBytes) + slurp, err := io.ReadAll(gz) + require.NoError(t, err) + var traces pb.AgentPayload + require.NoError(t, proto.Unmarshal(slurp, &traces)) + for _, tps := range traces.TracerPayloads { + for _, chunks := range tps.Chunks { + spans = append(spans, chunks.Spans...) + for _, span := range chunks.Spans { + assert.Equal(t, span.Meta["_dd.stats_computed"], "true") + } + } } - } - } - case apmstatsBytes := <-apmstatsRec.ReqChan: - gz := getGzipReader(t, apmstatsBytes) - var spl pb.StatsPayload - require.NoError(t, msgp.Decode(gz, &spl)) - for _, csps := range spl.Stats { - for _, csbs := range csps.Stats { - stats = append(stats, csbs.Stats...) - for _, stat := range csbs.Stats { - assert.True(t, strings.HasPrefix(stat.Resource, "TestSpan")) - assert.Equal(t, stat.Hits, uint64(1)) - assert.Equal(t, stat.TopLevelHits, uint64(1)) + case apmstatsBytes := <-apmstatsRec.ReqChan: + gz := getGzipReader(t, apmstatsBytes) + var spl pb.StatsPayload + require.NoError(t, msgp.Decode(gz, &spl)) + for _, csps := range spl.Stats { + for _, csbs := range csps.Stats { + stats = append(stats, csbs.Stats...) + for _, stat := range csbs.Stats { + assert.True(t, strings.HasPrefix(stat.Resource, "TestSpan")) + assert.Equal(t, stat.Hits, uint64(1)) + assert.Equal(t, stat.TopLevelHits, uint64(1)) + } + } } } } - } - } - // Verify we don't receive more than the expected numbers - assert.Len(t, spans, 5) - assert.Len(t, stats, 10) + // Verify we don't receive more than the expected numbers + assert.Len(t, spans, 5) + assert.Len(t, stats, 10) + }) + } } func getIntegrationTestComponents(t *testing.T) otelcol.Factories { @@ -185,6 +211,9 @@ exporters: endpoint: %q service: + telemetry: + metrics: + level: none pipelines: traces: receivers: [otlp] diff --git a/exporter/datadogexporter/metrics_exporter.go b/exporter/datadogexporter/metrics_exporter.go index c09450465952..3e99b6325706 100644 --- a/exporter/datadogexporter/metrics_exporter.go +++ b/exporter/datadogexporter/metrics_exporter.go @@ -31,6 +31,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/sketches" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/scrub" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog" ) type metricsExporter struct { @@ -52,7 +53,7 @@ type metricsExporter struct { } // translatorFromConfig creates a new metrics translator from the exporter -func translatorFromConfig(set component.TelemetrySettings, cfg *Config, attrsTranslator *attributes.Translator, sourceProvider source.Provider) (*otlpmetrics.Translator, error) { +func translatorFromConfig(set component.TelemetrySettings, cfg *Config, attrsTranslator *attributes.Translator, sourceProvider source.Provider, statsOut chan []byte) (*otlpmetrics.Translator, error) { options := []otlpmetrics.TranslatorOption{ otlpmetrics.WithDeltaTTL(cfg.Metrics.DeltaTTL), otlpmetrics.WithFallbackSourceProvider(sourceProvider), @@ -84,6 +85,9 @@ func translatorFromConfig(set component.TelemetrySettings, cfg *Config, attrsTra options = append(options, otlpmetrics.WithInitialCumulMonoValueMode( otlpmetrics.InitialCumulMonoValueMode(cfg.Metrics.SumConfig.InitialCumulativeMonotonicMode))) + if datadog.ConnectorPerformanceFeatureGate.IsEnabled() { + options = append(options, otlpmetrics.WithStatsOut(statsOut)) + } return otlpmetrics.NewTranslator(set, attrsTranslator, options...) } @@ -96,8 +100,9 @@ func newMetricsExporter( sourceProvider source.Provider, apmStatsProcessor api.StatsProcessor, metadataReporter *inframetadata.Reporter, + statsOut chan []byte, ) (*metricsExporter, error) { - tr, err := translatorFromConfig(params.TelemetrySettings, cfg, attrsTranslator, sourceProvider) + tr, err := translatorFromConfig(params.TelemetrySettings, cfg, attrsTranslator, sourceProvider, statsOut) if err != nil { return nil, err } diff --git a/exporter/datadogexporter/metrics_exporter_test.go b/exporter/datadogexporter/metrics_exporter_test.go index 5d3482a159f4..55eb3e71b1c3 100644 --- a/exporter/datadogexporter/metrics_exporter_test.go +++ b/exporter/datadogexporter/metrics_exporter_test.go @@ -315,6 +315,7 @@ func Test_metricsExporter_PushMetricsData(t *testing.T) { &testutil.MockSourceProvider{Src: tt.source}, &statsRecorder, reporter, + nil, ) if tt.expectedErr == nil { assert.NoError(t, err, "unexpected error") @@ -706,6 +707,7 @@ func Test_metricsExporter_PushMetricsData_Zorkian(t *testing.T) { &testutil.MockSourceProvider{Src: tt.source}, &statsRecorder, reporter, + nil, ) if tt.expectedErr == nil { assert.NoError(t, err, "unexpected error") diff --git a/exporter/loadbalancingexporter/go.mod b/exporter/loadbalancingexporter/go.mod index a0ce15fa45c1..575cb88cab2a 100644 --- a/exporter/loadbalancingexporter/go.mod +++ b/exporter/loadbalancingexporter/go.mod @@ -21,7 +21,7 @@ require ( k8s.io/api v0.28.4 k8s.io/apimachinery v0.28.4 k8s.io/client-go v0.28.4 - k8s.io/utils v0.0.0-20231127182322-b307cd553661 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e sigs.k8s.io/controller-runtime v0.16.3 ) diff --git a/exporter/loadbalancingexporter/go.sum b/exporter/loadbalancingexporter/go.sum index 62ab51dad3e5..101a81447e71 100644 --- a/exporter/loadbalancingexporter/go.sum +++ b/exporter/loadbalancingexporter/go.sum @@ -2082,8 +2082,8 @@ k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= diff --git a/exporter/logzioexporter/factory.go b/exporter/logzioexporter/factory.go index 8733ff082c71..048d3670a910 100644 --- a/exporter/logzioexporter/factory.go +++ b/exporter/logzioexporter/factory.go @@ -75,12 +75,12 @@ func getListenerURL(region string) string { } func generateEndpoint(cfg *Config) (string, error) { - defaultURL := fmt.Sprintf("%s/?token=%s", getListenerURL(""), cfg.Token) + defaultURL := fmt.Sprintf("%s/?token=%s", getListenerURL(""), string(cfg.Token)) switch { case cfg.HTTPClientSettings.Endpoint != "": return cfg.HTTPClientSettings.Endpoint, nil case cfg.Region != "": - return fmt.Sprintf("%s/?token=%s", getListenerURL(cfg.Region), cfg.Token), nil + return fmt.Sprintf("%s/?token=%s", getListenerURL(cfg.Region), string(cfg.Token)), nil case cfg.HTTPClientSettings.Endpoint == "" && cfg.Region == "": return defaultURL, errors.New("failed to generate endpoint, Endpoint or Region must be set") default: diff --git a/exporter/pulsarexporter/go.mod b/exporter/pulsarexporter/go.mod index 143187034ae5..0557b06478ad 100644 --- a/exporter/pulsarexporter/go.mod +++ b/exporter/pulsarexporter/go.mod @@ -34,7 +34,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/danieljoos/wincred v1.0.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/frankban/quicktest v1.14.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect diff --git a/exporter/pulsarexporter/go.sum b/exporter/pulsarexporter/go.sum index 9f209a1505cf..d78460a19f55 100644 --- a/exporter/pulsarexporter/go.sum +++ b/exporter/pulsarexporter/go.sum @@ -98,8 +98,9 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= -github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b h1:HBah4D48ypg3J7Np4N+HY/ZR76fx3HEUGxDU6Uk39oQ= github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -316,6 +317,7 @@ github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -823,6 +825,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/exporter/tanzuobservabilityexporter/Makefile b/exporter/tanzuobservabilityexporter/Makefile deleted file mode 100644 index ded7a36092dc..000000000000 --- a/exporter/tanzuobservabilityexporter/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../Makefile.Common diff --git a/exporter/tanzuobservabilityexporter/README.md b/exporter/tanzuobservabilityexporter/README.md deleted file mode 100644 index 65016f80d9e5..000000000000 --- a/exporter/tanzuobservabilityexporter/README.md +++ /dev/null @@ -1,264 +0,0 @@ -# Deprecated Tanzu Observability (Wavefront) Exporter - -> [!WARNING] -> Tanzu Observability (Wavefront) Proxy v11.3 supports native OpenTelemetry protocol (OTLP) ingestion of traces and metrics, released in June 2022. -> This vendor specific exporter is deprecated and will become unavailable after the end of 2023. -> -> Refer to our [documentation](https://docs.wavefront.com/opentelemetry_overview.html) for configuring the Proxy to receive traces and metrics via OTLP gRPC or OTLP HTTP. -> - - - -| Status | | -| ------------- |-----------| -| Stability | [deprecated]: traces, metrics | -| Distributions | [contrib] | -| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Ftanzuobservability%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Ftanzuobservability) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Ftanzuobservability%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Ftanzuobservability) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@oppegard](https://www.github.com/oppegard), [@thepeterstone](https://www.github.com/thepeterstone), [@keep94](https://www.github.com/keep94) | - -[deprecated]: https://github.com/open-telemetry/opentelemetry-collector#deprecated -[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib - - - -This exporter supports sending metrics and traces to [Tanzu Observability](https://tanzu.vmware.com/observability). - -## Prerequisites - -- [Obtain the Tanzu Observability by Wavefront API token.](https://docs.wavefront.com/wavefront_api.html#generating-an-api-token) -- [Set up and start a Tanzu Observability by Wavefront proxy](https://docs.wavefront.com/proxies_installing.html) and - configure it with the API token you obtained. -- To have the proxy generate [span RED metrics](https://docs.wavefront.com/trace_data_details.html#red-metrics) from - trace data, [configure](https://docs.wavefront.com/proxies_configuring.html) the proxy to receive traces by - setting `customTracingListenerPorts=30001`. For metrics, the proxy listens on port 2878 by default. - -## Configuration - -Given a Wavefront proxy at 10.10.10.10 configured with `customTracingListenerPorts=30001`, a basic configuration of -the Tanzu Observability exporter follows: - -```yaml -receivers: - examplereceiver: - -processors: - batch: - timeout: 10s - -exporters: - tanzuobservability: - traces: - endpoint: "http://10.10.10.10:30001" - metrics: - endpoint: "http://10.10.10.10:2878" - -service: - pipelines: - traces: - receivers: [ examplereceiver ] - processors: [ batch ] - exporters: [ tanzuobservability ] - metrics: - receivers: [ examplereceiver ] - processors: [ batch ] - exporters: [ tanzuobservability ] -``` - -## Advanced Configuration - -### Resource Attributes on Metrics - -Client programs using an OpenTelemetry SDK can be configured to wrap all emitted telemetry (metrics, spans, logs) with -a set of global key-value pairs, -called [resource attributes](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md) -. -By default, the Tanzu Observability Exporter includes resource attributes on spans but _excludes_ them on metrics. To -include resource attributes as tags on metrics, set the flag `resource_attrs_included` to `true` as per the example -below. - -**Note:** Tanzu Observability has a 254-character limit on tag key-value pairs. If a resource attribute exceeds this -limit, the metric will not show up in Tanzu Observability. - -### Application Resource Attributes on Metrics - -The Tanzu Observability Exporter will -include [application resource attributes](https://docs.wavefront.com/trace_data_details.html#application-tags) on -metrics (`application`, `service.name` -, `cluster`, and `shard`). To exclude these resource -attributes as tags on metrics, set the flag `app_tags_excluded` to `true` as per the example -below. - -**Note:** A tag `service.name`(if provided) becomes `service` on the transformed wavefront metric. However, if both the -tags (`service` & `service.name`) are provided then the `service` tag will be included. - -### Queuing and Retries - -This exporter uses OpenTelemetry Collector helpers to queue data and retry on failures. - -* `retry_on_failure` [Details and defaults here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration) - . -* `sending_queue` [Details and defaults here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration) - -### Recommended Pipeline Processors - -The memory_limiter processor is recommended to prevent out of memory situations on the collector. It allows performing -periodic checks of memory usage – if it exceeds defined limits it will begin dropping data and forcing garbage -collection to reduce memory -consumption. [Details and defaults here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiterprocessor/README.md) -. - -**Note:** The order matters when enabling multiple processors in a pipeline (e.g. the memory limiter and batch -processors in the example config below). Please refer to the -processors' [documentation](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor) -for more information. - -### Example Advanced Configuration - -```yaml -receivers: - examplereceiver: - -processors: - memory_limiter: - check_interval: 1s - limit_percentage: 50 - spike_limit_percentage: 30 - batch: - timeout: 10s - -exporters: - tanzuobservability: - traces: - endpoint: "http://10.10.10.10:30001" - metrics: - endpoint: "http://10.10.10.10:2878" - resource_attrs_included: true - app_tags_excluded: true - retry_on_failure: - max_elapsed_time: 3m - sending_queue: - queue_size: 10000 - -service: - pipelines: - traces: - receivers: [ examplereceiver ] - processors: [ memory_limiter, batch ] - exporters: [ tanzuobservability ] - metrics: - receivers: [ examplereceiver ] - processors: [ memory_limiter, batch ] - exporters: [ tanzuobservability ] -``` - -## Attributes Required by Tanzu Observability - -### Source - -A `source` field is required in Tanzu -Observability [spans](https://docs.wavefront.com/trace_data_details.html#span-fields) -and [metrics](https://docs.wavefront.com/wavefront_data_format.html#wavefront-data-format-fields). The source is set to -the -first matching OpenTelemetry Resource Attribute: - -1. `source` -2. `host.name` -3. `hostname` -4. `host.id` - -To reduce duplicate data, the matched attribute is excluded from the tags on the exported Tanzu Observability span or -metric. -If none of the above resource attributes exist, the OpenTelemetry Collector's hostname is used as a fallback for source. - -### Application Identity Tags on Spans - -[Application identity tags](https://docs.wavefront.com/trace_data_details.html#how-wavefront-uses-application-tags) of -`application` and `service` are required for all spans in Tanzu Observability. - -- `application` is set to the value of the attribute `application` on the OpenTelemetry Span or Resource. Default is " - defaultApp". -- `service` is set the value of the attribute `service` or `service.name` on the OpenTelemetry Span or Resource. Default - is "defaultService". - -## Data Conversion for Traces - -- Trace IDs and Span IDs are converted to UUIDs. For example, span IDs are left-padded with zeros to fit the correct - size. -- Events are converted to [Span Logs](https://docs.wavefront.com/trace_data_details.html#span-logs). -- Kind is converted to the `span.kind` tag. -- If a Span's status code is error, a tag of `error=true` is added. If the status also has a description, it's set - to `otel.status_description`. -- TraceState is converted to the `w3c.tracestate` tag. - -## Data Conversion for Metrics - -This section describes the process used by the Exporter when converting from -[OpenTelemetry Metrics](https://opentelemetry.io/docs/reference/specification/metrics/datamodel) to -[Tanzu Observability by Wavefront Metrics](https://docs.wavefront.com/metric_types.html). - -| OpenTelemetry Metric Type | Wavefront Metric Type | Notes | -| ------ | ------ | ------ | -| Gauge | Gauge | -| Cumulative Sum | Cumulative Counter | -| Delta Sum | Delta Counter | -| Cumulative Histogram (incl. Exponential) | Cumulative Counters | [Details below](#cumulative-histogram-conversion-incl-exponential). | -| Delta Histogram (incl. Exponential) | Histogram | -| Summary | Gauges | [Details below](#summary-conversion). - -### Cumulative Histogram Conversion (incl. Exponential) - -A cumulative histogram is converted to multiple counter metrics: one counter per bucket in the histogram. Each counter -has a special "le" tag that matches the upper bound of the corresponding bucket. The value of the counter metric is the -sum of the histogram's corresponding bucket and all the buckets before it. - -When working with OpenTelemetry Cumulative Histograms that have been converted to Wavefront Counters, these functions -will be of use: - -- [cumulativeHisto()](https://docs.wavefront.com/ts_cumulativeHisto.html) -- [cumulativePercentile()](https://docs.wavefront.com/ts_cumulativePercentile.html) - -#### Example - -Suppose a cumulative histogram named "http.response_times" has -the following buckets and values: - -| Bucket | Value | -| ------ | ----- | -| ≤ 100ms | 5 | -| > 100ms to ≤ 200ms | 20 | -| > 200ms | 100 | - -The exporter sends the following metrics to tanzuobservability: - -| Name | Tags | Value | -| ---- | ---- | ----- | -| http.response_times | le="100" | 5 | -| http.response_times | le="200" | 25 | -| http.response_times | le="+Inf" | 125 | - -#### Example WQL Query on a Cumulative Histogram - -Using the cumulative histogram from the section above, this WQL query will produce a graph showing -the 95th percentile of http response times in the last 15 minutes. - -``` -cumulativePercentile(95, mavg(15m, deriv(sum(ts(http.reponse_times), le)))) -``` - -The sum function aggregates the http response times and groups them by the le tag. Since -http.response_times has three buckets, the sum() function will graph three lines, one for each bucket. -deriv() shows the per second rate of change in the three lines from sum. The mavg function averages -the rates of change of the three lines over the last 15 minutes. Since the rates of change are per -second, if you multiply the average rate of change for a bucket by 900, you get the number of new -http requests falling into that bucket in the last 15 minutes. Finally, cumulativePercentile -uses the values of the `le` tags, which are http response times, and linear interpolation of the -bucket counts to estimate the 95th percentile of http.response_times over the last 15 minutes. - -### Summary Conversion - -A summary is converted to multiple gauge metrics: one gauge for every quantile in the summary. A special "quantile" tag -contains avalue between 0 and 1 indicating the quantile for which the value belongs. - -[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta - -[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/exporter/tanzuobservabilityexporter/config.go b/exporter/tanzuobservabilityexporter/config.go deleted file mode 100644 index cbf01377cbc2..000000000000 --- a/exporter/tanzuobservabilityexporter/config.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" - -import ( - "errors" - "fmt" - "net/url" - "strconv" - - "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/exporter/exporterhelper" -) - -type TracesConfig struct { - confighttp.HTTPClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. -} - -type MetricsConfig struct { - confighttp.HTTPClientSettings `mapstructure:",squash"` - ResourceAttrsIncluded bool `mapstructure:"resource_attrs_included"` - // AppTagsExcluded will exclude the Resource Attributes `application`, `service.name` -> (service), - // `cluster`, and `shard` from the transformed TObs metric if set to true. - AppTagsExcluded bool `mapstructure:"app_tags_excluded"` -} - -// Config defines configuration options for the exporter. -type Config struct { - exporterhelper.QueueSettings `mapstructure:"sending_queue"` - exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` - - // Traces defines the Traces exporter specific configuration - Traces TracesConfig `mapstructure:"traces"` - Metrics MetricsConfig `mapstructure:"metrics"` -} - -func (c *Config) hasMetricsEndpoint() bool { - return c.Metrics.Endpoint != "" -} - -func (c *Config) hasTracesEndpoint() bool { - return c.Traces.Endpoint != "" -} - -func (c *Config) parseMetricsEndpoint() (hostName string, port int, err error) { - return parseEndpoint(c.Metrics.Endpoint) -} - -func (c *Config) parseTracesEndpoint() (hostName string, port int, err error) { - return parseEndpoint(c.Traces.Endpoint) -} - -func (c *Config) Validate() error { - var tracesHostName, metricsHostName string - var err error - if c.hasTracesEndpoint() { - tracesHostName, _, err = c.parseTracesEndpoint() - if err != nil { - return fmt.Errorf("failed to parse traces.endpoint: %w", err) - } - } - if c.hasMetricsEndpoint() { - metricsHostName, _, err = c.parseMetricsEndpoint() - if err != nil { - return fmt.Errorf("failed to parse metrics.endpoint: %w", err) - } - } - if c.hasTracesEndpoint() && c.hasMetricsEndpoint() && tracesHostName != metricsHostName { - return errors.New("host for metrics and traces must be the same") - } - return nil -} - -func parseEndpoint(endpoint string) (hostName string, port int, err error) { - if endpoint == "" { - return "", 0, errors.New("a non-empty endpoint is required") - } - u, err := url.Parse(endpoint) - if err != nil { - return "", 0, err - } - port, err = strconv.Atoi(u.Port()) - if err != nil { - return "", 0, errors.New("valid port required") - } - hostName = u.Hostname() - return hostName, port, nil -} diff --git a/exporter/tanzuobservabilityexporter/config_test.go b/exporter/tanzuobservabilityexporter/config_test.go deleted file mode 100644 index 6e31d90a97cf..000000000000 --- a/exporter/tanzuobservabilityexporter/config_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter - -import ( - "path/filepath" - "testing" - "time" - - "github.com/cenkalti/backoff/v4" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/otelcol/otelcoltest" -) - -func TestLoadConfig(t *testing.T) { - factories, err := otelcoltest.NopFactories() - require.NoError(t, err) - - factory := NewFactory() - factories.Exporters[exporterType] = factory - cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) - - require.NoError(t, err) - require.NotNil(t, cfg) - - actual, ok := cfg.Exporters[component.NewID("tanzuobservability")] - require.True(t, ok) - expected := &Config{ - Traces: TracesConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http://localhost:40001"}, - }, - Metrics: MetricsConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http://localhost:2916"}, - ResourceAttrsIncluded: true, - AppTagsExcluded: true, - }, - QueueSettings: exporterhelper.QueueSettings{ - Enabled: true, - NumConsumers: 2, - QueueSize: 10, - }, - RetrySettings: exporterhelper.RetrySettings{ - Enabled: true, - InitialInterval: 10 * time.Second, - MaxInterval: 60 * time.Second, - MaxElapsedTime: 10 * time.Minute, - RandomizationFactor: backoff.DefaultRandomizationFactor, - Multiplier: backoff.DefaultMultiplier, - }, - } - assert.Equal(t, expected, actual) -} - -func TestConfigRequiresValidEndpointUrl(t *testing.T) { - c := &Config{ - Traces: TracesConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http#$%^&#$%&#"}, - }, - } - assert.Error(t, c.Validate()) -} - -func TestMetricsConfigRequiresValidEndpointUrl(t *testing.T) { - c := &Config{ - Metrics: MetricsConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http#$%^&#$%&#"}, - }, - } - - assert.Error(t, c.Validate()) -} - -func TestDifferentHostNames(t *testing.T) { - c := &Config{ - Traces: TracesConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http://localhost:30001"}, - }, - Metrics: MetricsConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http://foo.com:2878"}, - }, - } - assert.Error(t, c.Validate()) -} - -func TestConfigNormal(t *testing.T) { - c := &Config{ - Traces: TracesConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http://localhost:40001"}, - }, - Metrics: MetricsConfig{ - HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: "http://localhost:2916"}, - }, - } - assert.NoError(t, c.Validate()) -} - -func TestMetricConfig(t *testing.T) { - c := &Config{ - Metrics: MetricsConfig{}, - } - assert.NoError(t, c.Validate()) - assert.False(t, c.Metrics.ResourceAttrsIncluded) - assert.False(t, c.Metrics.AppTagsExcluded) - - c = &Config{ - Metrics: MetricsConfig{ - ResourceAttrsIncluded: true, - AppTagsExcluded: true, - }, - } - assert.NoError(t, c.Validate()) - assert.True(t, c.Metrics.ResourceAttrsIncluded) - assert.True(t, c.Metrics.AppTagsExcluded) -} diff --git a/exporter/tanzuobservabilityexporter/factory.go b/exporter/tanzuobservabilityexporter/factory.go deleted file mode 100644 index a978899cb28a..000000000000 --- a/exporter/tanzuobservabilityexporter/factory.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:generate mdatagen metadata.yaml - -package tanzuobservabilityexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" - -import ( - "context" - "fmt" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter/internal/metadata" -) - -const ( - exporterType = "tanzuobservability" -) - -// NewFactory creates a factory for the exporter. -func NewFactory() exporter.Factory { - return exporter.NewFactory( - exporterType, - createDefaultConfig, - exporter.WithTraces(createTracesExporter, metadata.TracesStability), - exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), - ) -} - -func createDefaultConfig() component.Config { - return &Config{ - QueueSettings: exporterhelper.NewDefaultQueueSettings(), - RetrySettings: exporterhelper.NewDefaultRetrySettings(), - } -} - -// createTracesExporter implements exporterhelper.CreateTracesExporter and creates -// an exporter for traces using this configuration -func createTracesExporter( - ctx context.Context, - set exporter.CreateSettings, - cfg component.Config, -) (exporter.Traces, error) { - exp, err := newTracesExporter(set, cfg) - if err != nil { - return nil, err - } - - tobsCfg, ok := cfg.(*Config) - if !ok { - return nil, fmt.Errorf("invalid config: %#v", cfg) - } - - return exporterhelper.NewTracesExporter( - ctx, - set, - cfg, - exp.pushTraceData, - exporterhelper.WithQueue(tobsCfg.QueueSettings), - exporterhelper.WithRetry(tobsCfg.RetrySettings), - exporterhelper.WithShutdown(exp.shutdown), - ) -} - -func createMetricsExporter( - ctx context.Context, - set exporter.CreateSettings, - cfg component.Config, -) (exporter.Metrics, error) { - tobsCfg, ok := cfg.(*Config) - if !ok { - return nil, fmt.Errorf("invalid config: %#v", cfg) - } - exp, err := newMetricsExporter(set, tobsCfg, createMetricsConsumer) - if err != nil { - return nil, err - } - - exporter, err := exporterhelper.NewMetricsExporter( - ctx, - set, - cfg, - exp.pushMetricsData, - exporterhelper.WithQueue(tobsCfg.QueueSettings), - exporterhelper.WithRetry(tobsCfg.RetrySettings), - exporterhelper.WithShutdown(exp.shutdown), - ) - if err != nil { - return nil, err - } - - return exporter, nil -} diff --git a/exporter/tanzuobservabilityexporter/factory_test.go b/exporter/tanzuobservabilityexporter/factory_test.go deleted file mode 100644 index ea3f7e175ff1..000000000000 --- a/exporter/tanzuobservabilityexporter/factory_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/exporter/exportertest" -) - -func TestCreateDefaultConfig(t *testing.T) { - cfg := createDefaultConfig() - assert.NotNil(t, cfg, "failed to create default config") - require.NoError(t, componenttest.CheckConfigStruct(cfg)) - - actual, ok := cfg.(*Config) - require.True(t, ok, "invalid Config: %#v", cfg) - assert.False(t, actual.hasMetricsEndpoint()) - assert.False(t, actual.hasTracesEndpoint()) - assert.False(t, actual.Metrics.ResourceAttrsIncluded) - assert.False(t, actual.Metrics.AppTagsExcluded) -} - -func TestCreateExporter(t *testing.T) { - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - params := exportertest.NewNopCreateSettings() - cfg.Traces.Endpoint = "http://localhost:30001" - te, err := createTracesExporter(context.Background(), params, cfg) - assert.Nil(t, err) - assert.NotNil(t, te, "failed to create trace exporter") -} - -func TestCreateMetricsExporter(t *testing.T) { - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - params := exportertest.NewNopCreateSettings() - cfg.Metrics.Endpoint = "http://localhost:2878" - te, err := createMetricsExporter(context.Background(), params, cfg) - assert.NoError(t, err) - assert.NotNil(t, te, "failed to create metrics exporter") -} - -func TestCreateTraceExporterNilConfigError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - _, err := createTracesExporter(context.Background(), params, nil) - assert.Error(t, err) -} - -func TestCreateMetricsExporterNilConfigError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - _, err := createMetricsExporter(context.Background(), params, nil) - assert.Error(t, err) -} - -func TestCreateTraceExporterInvalidEndpointError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - cfg.Traces.Endpoint = "http:#$%^&#$%&#" - _, err := createTracesExporter(context.Background(), params, cfg) - assert.Error(t, err) -} - -func TestCreateMetricsExporterInvalidEndpointError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - cfg.Metrics.Endpoint = "http:#$%^&#$%&#" - _, err := createMetricsExporter(context.Background(), params, cfg) - assert.Error(t, err) -} - -func TestCreateTraceExporterMissingPortError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - cfg.Traces.Endpoint = "http://localhost" - _, err := createTracesExporter(context.Background(), params, cfg) - assert.Error(t, err) -} - -func TestCreateMetricsExporterMissingPortError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - cfg.Metrics.Endpoint = "http://localhost" - _, err := createMetricsExporter(context.Background(), params, cfg) - assert.Error(t, err) -} - -func TestCreateTraceExporterInvalidPortError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - cfg.Traces.Endpoint = "http://localhost:c42a" - _, err := createTracesExporter(context.Background(), params, cfg) - assert.Error(t, err) -} - -func TestCreateMetricsExporterInvalidPortError(t *testing.T) { - params := exportertest.NewNopCreateSettings() - defaultConfig := createDefaultConfig() - cfg := defaultConfig.(*Config) - cfg.Metrics.Endpoint = "http://localhost:c42a" - _, err := createMetricsExporter(context.Background(), params, cfg) - assert.Error(t, err) -} diff --git a/exporter/tanzuobservabilityexporter/go.mod b/exporter/tanzuobservabilityexporter/go.mod deleted file mode 100644 index ffa2bbd8fe4b..000000000000 --- a/exporter/tanzuobservabilityexporter/go.mod +++ /dev/null @@ -1,128 +0,0 @@ -// Deprecated: the Tanzu Observability Exporter is deprecated and will be removed in future versions. Please see the README for more information. -module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter - -go 1.20 - -require ( - github.com/cenkalti/backoff/v4 v4.2.1 - github.com/google/uuid v1.5.0 - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.91.0 - github.com/stretchr/testify v1.8.4 - github.com/wavefronthq/wavefront-sdk-go v0.15.0 - go.opentelemetry.io/collector/component v0.91.0 - go.opentelemetry.io/collector/config/confighttp v0.91.0 - go.opentelemetry.io/collector/exporter v0.91.0 - go.opentelemetry.io/collector/otelcol v0.91.0 - go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/semconv v0.91.0 - go.opentelemetry.io/otel/metric v1.21.0 - go.opentelemetry.io/otel/trace v1.21.0 - go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.26.0 -) - -require ( - contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/caio/go-tdigest/v4 v4.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.3.0 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.4 // indirect - github.com/knadh/koanf/maps v0.1.1 // indirect - github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.0.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/prometheus/statsd_exporter v0.22.7 // indirect - github.com/rs/cors v1.10.1 // indirect - github.com/shirou/gopsutil/v3 v3.23.11 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/cobra v1.8.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.91.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.91.0 // indirect - go.opentelemetry.io/collector/config/configcompression v0.91.0 // indirect - go.opentelemetry.io/collector/config/configopaque v0.91.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.91.0 // indirect - go.opentelemetry.io/collector/config/internal v0.91.0 // indirect - go.opentelemetry.io/collector/confmap v0.91.0 // indirect - go.opentelemetry.io/collector/connector v0.91.0 // indirect - go.opentelemetry.io/collector/consumer v0.91.0 // indirect - go.opentelemetry.io/collector/extension v0.91.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.91.0 // indirect - go.opentelemetry.io/collector/featuregate v1.0.0 // indirect - go.opentelemetry.io/collector/processor v0.91.0 // indirect - go.opentelemetry.io/collector/receiver v0.91.0 // indirect - go.opentelemetry.io/collector/service v0.91.0 // indirect - go.opentelemetry.io/contrib/config v0.1.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect - gonum.org/v1/gonum v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal - -retract ( - v0.76.2 - v0.76.1 - v0.65.0 -) - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden diff --git a/exporter/tanzuobservabilityexporter/go.sum b/exporter/tanzuobservabilityexporter/go.sum deleted file mode 100644 index 9a25465a0477..000000000000 --- a/exporter/tanzuobservabilityexporter/go.sum +++ /dev/null @@ -1,714 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= -contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/caio/go-tdigest/v4 v4.0.1 h1:sx4ZxjmIEcLROUPs2j1BGe2WhOtHD6VSe6NNbBdKYh4= -github.com/caio/go-tdigest/v4 v4.0.1/go.mod h1:Wsa+f0EZnV2gShdj1adgl0tQSoXRxtM0QioTgukFw8U= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= -github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= -github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= -github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= -github.com/knadh/koanf/v2 v2.0.1/go.mod h1:ZeiIlIDXTE7w1lMT6UVcNiRAS2/rCeLn/GdLNvY1Dus= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= -github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= -github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/wavefronthq/wavefront-sdk-go v0.15.0 h1:po9E3vh/0y7kOx8D9EtFp7kbSLLLKbmu/w/s1xGJAQU= -github.com/wavefronthq/wavefront-sdk-go v0.15.0/go.mod h1:V72c8e+bXuLK8HpA6ioW0ll5mK9IPD+4IHNNDY75ksA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.91.0 h1:C7sGUJDJ5nwm+CkWpAaVP3lNsuYpwSRbkmLncFjkmO8= -go.opentelemetry.io/collector v0.91.0/go.mod h1:YhQpIDZsn+bICAAqgBwXk9wqK8GKZDv+aogfG52zUuE= -go.opentelemetry.io/collector/component v0.91.0 h1:aBT1i2zGyfh9PalYJLfXVvQp+osHyalwyDFselI1CtA= -go.opentelemetry.io/collector/component v0.91.0/go.mod h1:2KBHvjNFdU7oOjsObQeC4Ta2Ef607OISU5obznW00fw= -go.opentelemetry.io/collector/config/configauth v0.91.0 h1:SjWKimuqlpfS3sIlFpfzdkSY/AmMMCEmn9+KRcjEU+s= -go.opentelemetry.io/collector/config/configauth v0.91.0/go.mod h1:wmmMYqv6PxwY+/h7qqvd/LP0XN/wzXoECDu6PYz2Of0= -go.opentelemetry.io/collector/config/configcompression v0.91.0 h1:v+jEpFhLgfJDCUCPsSF03gjoFEvm77PofTCqHKKgXTs= -go.opentelemetry.io/collector/config/configcompression v0.91.0/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= -go.opentelemetry.io/collector/config/confighttp v0.91.0 h1:YAOyXcDaLDnF3UqPHH4kYU8lx8BqXJ7hS3Ou8GcmqpQ= -go.opentelemetry.io/collector/config/confighttp v0.91.0/go.mod h1:R6y8KSJzqDe6CE6JsYwt4CTZ2B4AlqRA+V74OJPX3vE= -go.opentelemetry.io/collector/config/confignet v0.91.0 h1:3huNXh04O3wXaN4qPhmmiefyz4dYbOlNcR/OKMByqig= -go.opentelemetry.io/collector/config/configopaque v0.91.0 h1:bQgJPyARbuXAsU2p6h2YbEm1kHb1stS6hg42ekyMZmI= -go.opentelemetry.io/collector/config/configopaque v0.91.0/go.mod h1:TPCHaU+QXiEV+JXbgyr6mSErTI9chwQyasDVMdJr3eY= -go.opentelemetry.io/collector/config/configtelemetry v0.91.0 h1:mEwvqrYfwUJ7LwYfpcF9M8z7LHFoYaKhEPhnERD/88E= -go.opentelemetry.io/collector/config/configtelemetry v0.91.0/go.mod h1:+LAXM5WFMW/UbTlAuSs6L/W72WC+q8TBJt/6z39FPOU= -go.opentelemetry.io/collector/config/configtls v0.91.0 h1:lZromNeOslPwyVlTPMOzF2q++SY+VONvfH3cDqA0kKk= -go.opentelemetry.io/collector/config/configtls v0.91.0/go.mod h1:E+CW5gZoH8V3z5aSlZxwiof7GAcayzn1HRM+uRILLEI= -go.opentelemetry.io/collector/config/internal v0.91.0 h1:Yx17oFdXOPnY83Jfe1oiXhvfYW7RX/xh3/kpV/iYibM= -go.opentelemetry.io/collector/config/internal v0.91.0/go.mod h1:42VsQ/1kP2qnvzjNi+dfNP+KyCFRADejyrJ8m2GVL3M= -go.opentelemetry.io/collector/confmap v0.91.0 h1:7U2MT+u74oEzq/WWrpXSLKB7nX5jPNC4drwtQdYfwKk= -go.opentelemetry.io/collector/confmap v0.91.0/go.mod h1:uxV+fZ85kG31oovL6Cl3fAMQ3RRPwUvfAbbA9WT1Yhk= -go.opentelemetry.io/collector/connector v0.91.0 h1:p2YsgBtr26guCib99ExQHtGuNmX+awgmH1Mbz23wYGU= -go.opentelemetry.io/collector/connector v0.91.0/go.mod h1:dG34jAOATtXFdqzp8IxZJsFwFLaUIkYK69WT1WQ03ZY= -go.opentelemetry.io/collector/consumer v0.91.0 h1:0nU1lUe2S0b8iOmF3w3R/9Dt24n413thRTbXz/nJgrM= -go.opentelemetry.io/collector/consumer v0.91.0/go.mod h1:phTUQmr7hpYfwXyDXo4mFHVjYrlSbZE+nZYlKlbVxGs= -go.opentelemetry.io/collector/exporter v0.91.0 h1:guWcGflFjaenp3BMxAmAKjb8RQG80jQQKjuUFouS+z8= -go.opentelemetry.io/collector/exporter v0.91.0/go.mod h1:hkOBunNNWu6CaTtkRsCJ/OJ509REJZg+DDElevFIQCQ= -go.opentelemetry.io/collector/extension v0.91.0 h1:bkoSLgnWm4g6n+RLmyKG6Up7dr8KmJy68quonoLZnr0= -go.opentelemetry.io/collector/extension v0.91.0/go.mod h1:F3r0fVTTh4sYR0GVv51Qez8lk8v77kTDPdyMOp6A2kg= -go.opentelemetry.io/collector/extension/auth v0.91.0 h1:28Hv5W0GZgv2jR5IiFdJzutTs91KmXFh8DUfVTjwwmI= -go.opentelemetry.io/collector/extension/auth v0.91.0/go.mod h1:diY6Sw7cOAn2qivKipZk4niBFzCCFBj7swAXiG2h9ro= -go.opentelemetry.io/collector/extension/zpagesextension v0.91.0 h1:d787uZ6cNHN1uaJ3pIgFlcrkHVB4ML6AlXBiCQRJ9t0= -go.opentelemetry.io/collector/featuregate v1.0.0 h1:5MGqe2v5zxaoo73BUOvUTunftX5J8RGrbFsC2Ha7N3g= -go.opentelemetry.io/collector/featuregate v1.0.0/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= -go.opentelemetry.io/collector/otelcol v0.91.0 h1:gq/PY/tHgkTr2fuMLp+F+NAm+03iiU+j/Eilvx38p5w= -go.opentelemetry.io/collector/otelcol v0.91.0/go.mod h1:9j8gKdZvuEgZoeUhz5f5D/fwCxx7M7Tg+6D207/KjNs= -go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= -go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= -go.opentelemetry.io/collector/processor v0.91.0 h1:Xi52gYMXTG4zYmNhsqJ8ly/9f7b0n0crMhKxVVI9HpY= -go.opentelemetry.io/collector/processor v0.91.0/go.mod h1:naTuusZNfzM5MSqoTVzkKbR1MaJ8oD8v5ginR5JreDE= -go.opentelemetry.io/collector/receiver v0.91.0 h1:0TZF/0OXoJtxgm+mvOinRRXo9LgVyOsOgCQfWkNGXJA= -go.opentelemetry.io/collector/receiver v0.91.0/go.mod h1:d5qo2mpovqKoi47hrMxj5BLdLzOXM0mUHL5CKrjfWNM= -go.opentelemetry.io/collector/semconv v0.91.0 h1:TRd+yDDfKQl+aNtS24wmEbJp1/QE/xAFV9SB5zWGxpE= -go.opentelemetry.io/collector/semconv v0.91.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= -go.opentelemetry.io/collector/service v0.91.0 h1:AQS6YAiZO6V+ohcMX4upfyp/Ydi6y862yQEbFt8t+fQ= -go.opentelemetry.io/collector/service v0.91.0/go.mod h1:8Pf8mPo3YqcTNwOzXa0Ok/o+g2+d8hSCaGxRqR/c6CY= -go.opentelemetry.io/contrib/config v0.1.1 h1:lIUTrMWkfDE0GvzBLhwv6ATDB1vntrnTsRvUMkZKnfQ= -go.opentelemetry.io/contrib/config v0.1.1/go.mod h1:rDrK4+PS6Cs+WIphU/GO5Sk4TGV36lEQqk/Z1vZkaLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/contrib/propagators/b3 v1.21.1 h1:WPYiUgmw3+b7b3sQ1bFBFAf0q+Di9dvNc3AtYfnT4RQ= -go.opentelemetry.io/contrib/propagators/b3 v1.21.1/go.mod h1:EmzokPoSqsYMBVK4nRnhsfm5mbn8J1eDuz/U1UaQaWg= -go.opentelemetry.io/contrib/zpages v0.46.1 h1:U8Hh84dc+vJTVgRnL+QKWtWD2iqTSKibrQ85EeQqsNg= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/bridge/opencensus v0.44.0 h1:/inELPJztkn6Xx3ap9qw8i8XdeWF0B/OjGHOdRTePZ8= -go.opentelemetry.io/otel/bridge/opencensus v0.44.0/go.mod h1:dQTBJVBx1xahrXEFBV1BGPAnGuXC92LCj55fxIrtj7I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2 h1:TnhkxGJ5qPHAMIMI4r+HPT/BbpoHxqn4xONJrok054o= -go.opentelemetry.io/otel/exporters/prometheus v0.44.1-0.20231201153405-6027c1ae76f2/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 h1:dEZWPjVN22urgYCza3PXRUGEyCB++y1sAqm6guWFesk= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0/go.mod h1:kB3ufRbfU+CQ4MlUcqtW8Z7YEOBeK2DJ6CmR5rYYF3E= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= -go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 h1:FqrVOBQxQ8r/UwwXibI0KMolVhvFiGobSfdE33deHJM= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= -gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/exporter/tanzuobservabilityexporter/internal/metadata/generated_status.go b/exporter/tanzuobservabilityexporter/internal/metadata/generated_status.go deleted file mode 100644 index 16776d5cb790..000000000000 --- a/exporter/tanzuobservabilityexporter/internal/metadata/generated_status.go +++ /dev/null @@ -1,23 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -const ( - Type = "tanzuobservability" - TracesStability = component.StabilityLevelDeprecated - MetricsStability = component.StabilityLevelDeprecated -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/tanzuobservability") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/tanzuobservability") -} diff --git a/exporter/tanzuobservabilityexporter/metadata.yaml b/exporter/tanzuobservabilityexporter/metadata.yaml deleted file mode 100644 index c934fb5585b1..000000000000 --- a/exporter/tanzuobservabilityexporter/metadata.yaml +++ /dev/null @@ -1,9 +0,0 @@ -type: tanzuobservability - -status: - class: exporter - stability: - deprecated: [traces, metrics] - distributions: [contrib] - codeowners: - active: [oppegard, thepeterstone, keep94] diff --git a/exporter/tanzuobservabilityexporter/metrics.go b/exporter/tanzuobservabilityexporter/metrics.go deleted file mode 100644 index f406f9fbb0be..000000000000 --- a/exporter/tanzuobservabilityexporter/metrics.go +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" - -import ( - "context" - "errors" - "fmt" - "math" - "strconv" - "sync/atomic" - - "github.com/wavefronthq/wavefront-sdk-go/histogram" - "github.com/wavefronthq/wavefront-sdk-go/senders" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.uber.org/multierr" - "go.uber.org/zap" -) - -const ( - missingValueMetricName = "~sdk.otel.collector.missing_values" - metricNameString = "metric name" - metricTypeString = "metric type" - malformedHistogramMetricName = "~sdk.otel.collector.malformed_histogram" - noAggregationTemporalityMetricName = "~sdk.otel.collector.no_aggregation_temporality" -) - -const ( - histogramDataPointInvalid = "Histogram data point invalid" -) - -var ( - typeIsGaugeTags = map[string]string{"type": "gauge"} - typeIsSumTags = map[string]string{"type": "sum"} - typeIsHistogramTags = map[string]string{"type": "histogram"} -) - -var ( - allGranularity = map[histogram.Granularity]bool{histogram.DAY: true, histogram.HOUR: true, histogram.MINUTE: true} -) - -var ( - regularHistogram histogramSpecification = regularHistogramSpecification{} - exponentialHistogram histogramSpecification = exponentialHistogramSpecification{} -) - -// metricsConsumer instances consume OTEL metrics -type metricsConsumer struct { - consumerMap map[pmetric.MetricType]typedMetricConsumer - sender flushCloser - reportInternalMetrics bool - config MetricsConfig -} - -type metricInfo struct { - pmetric.Metric - Source string - SourceKey string - ResourceAttrs map[string]string -} - -// newMetricsConsumer returns a new metricsConsumer. consumers are the -// consumers responsible for consuming each type of metric. The Consume method -// of returned consumer calls the Flush method on sender after consuming -// all the metrics. Calling Close on the returned metricsConsumer calls Close -// on sender. sender can be nil. reportInternalMetrics controls whether -// returned metricsConsumer reports internal metrics. -func newMetricsConsumer( - consumers []typedMetricConsumer, - sender flushCloser, - reportInternalMetrics bool, - config MetricsConfig, -) *metricsConsumer { - consumerMap := make(map[pmetric.MetricType]typedMetricConsumer, len(consumers)) - for _, consumer := range consumers { - if consumerMap[consumer.Type()] != nil { - panic("duplicate consumer type detected: " + consumer.Type().String()) - } - consumerMap[consumer.Type()] = consumer - } - return &metricsConsumer{ - consumerMap: consumerMap, - sender: sender, - reportInternalMetrics: reportInternalMetrics, - config: config, - } -} - -// Consume consumes OTEL metrics. For each metric in md, it delegates to the -// typedMetricConsumer that consumes that type of metric. Once Consume consumes -// all the metrics, it calls Flush() on the sender passed to -// newMetricsConsumer. -func (c *metricsConsumer) Consume(ctx context.Context, md pmetric.Metrics) error { - var errs []error - rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - resAttrs := rms.At(i).Resource().Attributes() - source, sourceKey := getSourceAndKey(resAttrs) - ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { - ms := ilms.At(j).Metrics() - for k := 0; k < ms.Len(); k++ { - m := ms.At(k) - var resAttrsMap map[string]string - if c.config.ResourceAttrsIncluded { - resAttrsMap = attributesToTags(resAttrs) - } else if !c.config.AppTagsExcluded { - resAttrsMap = appAttributesToTags(resAttrs) - } - mi := metricInfo{Metric: m, Source: source, SourceKey: sourceKey, ResourceAttrs: resAttrsMap} - select { - case <-ctx.Done(): - return multierr.Combine(append(errs, errors.New("context canceled"))...) - default: - c.pushSingleMetric(mi, &errs) - } - } - } - } - if c.reportInternalMetrics { - c.pushInternalMetrics(&errs) - } - if c.sender != nil { - if err := c.sender.Flush(); err != nil { - errs = append(errs, err) - } - } - return multierr.Combine(errs...) -} - -// Close closes this metricsConsumer by calling Close on the sender passed -// to newMetricsConsumer. -func (c *metricsConsumer) Close() { - if c.sender != nil { - c.sender.Close() - } -} - -func (c *metricsConsumer) pushInternalMetrics(errs *[]error) { - for _, consumer := range c.consumerMap { - consumer.PushInternalMetrics(errs) - } -} - -func (c *metricsConsumer) pushSingleMetric(mi metricInfo, errs *[]error) { - dataType := mi.Type() - consumer := c.consumerMap[dataType] - if consumer == nil { - *errs = append( - *errs, fmt.Errorf("no support for metric type %v", dataType)) - - } else { - consumer.Consume(mi, errs) - } -} - -// typedMetricConsumer consumes one specific type of OTEL metric -type typedMetricConsumer interface { - - // Type returns the type of metric this consumer consumes. For example - // Gauge, Sum, or Histogram - Type() pmetric.MetricType - - // Consume consumes the metric from the metricInfo and appends any errors encountered to errs - Consume(mi metricInfo, errs *[]error) - - // PushInternalMetrics sends internal metrics for this consumer to tanzu observability - // and appends any errors encountered to errs. The Consume method of metricsConsumer calls - // PushInternalMetrics on each registered typedMetricConsumer after it has consumed all the - // metrics but before it calls Flush on the sender. - PushInternalMetrics(errs *[]error) -} - -// flushCloser is the interface for the Flush and Close method -type flushCloser interface { - Flush() error - Close() -} - -// report the counter to tanzu observability. name is the name of -// the metric to be reported. tags is the tags for the metric. sender is what -// sends the metric to tanzu observability. Any errors get added to errs. -func report(count *atomic.Int64, name string, tags map[string]string, sender gaugeSender, errs *[]error) { - err := sender.SendMetric(name, float64(count.Load()), 0, "", tags) - if err != nil { - *errs = append(*errs, err) - } -} - -// logMissingValue keeps track of metrics with missing values. metric is the -// metric with the missing value. settings logs the missing value. count counts -// metrics with missing values. -func logMissingValue(metric pmetric.Metric, settings component.TelemetrySettings, count *atomic.Int64) { - namef := zap.String(metricNameString, metric.Name()) - typef := zap.String(metricTypeString, metric.Type().String()) - settings.Logger.Debug("Metric missing value", namef, typef) - count.Add(1) -} - -// getValue gets the floating point value out of a NumberDataPoint -func getValue(numberDataPoint pmetric.NumberDataPoint) (float64, error) { - switch numberDataPoint.ValueType() { - case pmetric.NumberDataPointValueTypeInt: - return float64(numberDataPoint.IntValue()), nil - case pmetric.NumberDataPointValueTypeDouble: - return numberDataPoint.DoubleValue(), nil - default: - return 0.0, errors.New("unsupported metric value type") - } -} - -// pushGaugeNumberDataPoint sends a metric as a gauge metric to tanzu -// observability. metric is the metric to send. numberDataPoint is the value -// of the metric. Any errors get appended to errs. sender is what sends the -// gauge metric to tanzu observability. settings logs problems. missingValues -// keeps track of metrics with missing values. -func pushGaugeNumberDataPoint( - mi metricInfo, - numberDataPoint pmetric.NumberDataPoint, - errs *[]error, - sender gaugeSender, - settings component.TelemetrySettings, - missingValues *atomic.Int64, -) { - tags := pointAndResAttrsToTagsAndFixSource(mi.SourceKey, numberDataPoint.Attributes(), newMap(mi.ResourceAttrs)) - ts := numberDataPoint.Timestamp().AsTime().Unix() - value, err := getValue(numberDataPoint) - if err != nil { - logMissingValue(mi.Metric, settings, missingValues) - return - } - err = sender.SendMetric(mi.Name(), value, ts, mi.Source, tags) - if err != nil { - *errs = append(*errs, err) - } -} - -// gaugeSender sends gauge metrics to tanzu observability -type gaugeSender interface { - SendMetric(name string, value float64, ts int64, source string, tags map[string]string) error -} - -type gaugeConsumer struct { - sender gaugeSender - settings component.TelemetrySettings - missingValues *atomic.Int64 -} - -// newGaugeConsumer returns a typedMetricConsumer that consumes gauge metrics -// by sending them to tanzu observability. -func newGaugeConsumer( - sender gaugeSender, settings component.TelemetrySettings) typedMetricConsumer { - return &gaugeConsumer{ - sender: sender, - settings: settings, - missingValues: &atomic.Int64{}, - } -} - -func (g *gaugeConsumer) Type() pmetric.MetricType { - return pmetric.MetricTypeGauge -} - -func (g *gaugeConsumer) Consume(mi metricInfo, errs *[]error) { - gauge := mi.Gauge() - numberDataPoints := gauge.DataPoints() - for i := 0; i < numberDataPoints.Len(); i++ { - pushGaugeNumberDataPoint( - mi, - numberDataPoints.At(i), - errs, - g.sender, - g.settings, - g.missingValues) - } -} - -func (g *gaugeConsumer) PushInternalMetrics(errs *[]error) { - report(g.missingValues, missingValueMetricName, typeIsGaugeTags, g.sender, errs) -} - -type sumConsumer struct { - sender senders.MetricSender - settings component.TelemetrySettings - missingValues *atomic.Int64 -} - -// newSumConsumer returns a typedMetricConsumer that consumes sum metrics -// by sending them to tanzu observability. -func newSumConsumer( - sender senders.MetricSender, settings component.TelemetrySettings) typedMetricConsumer { - return &sumConsumer{ - sender: sender, - settings: settings, - missingValues: &atomic.Int64{}, - } -} - -func (s *sumConsumer) Type() pmetric.MetricType { - return pmetric.MetricTypeSum -} - -func (s *sumConsumer) Consume(mi metricInfo, errs *[]error) { - sum := mi.Sum() - isDelta := sum.AggregationTemporality() == pmetric.AggregationTemporalityDelta - numberDataPoints := sum.DataPoints() - for i := 0; i < numberDataPoints.Len(); i++ { - // If sum is a delta type, send it to tanzu observability as a - // delta counter. Otherwise, send it to tanzu observability as a gauge - // metric. - if isDelta { - s.pushNumberDataPoint(mi, numberDataPoints.At(i), errs) - } else { - pushGaugeNumberDataPoint( - mi, numberDataPoints.At(i), errs, s.sender, s.settings, s.missingValues) - } - } -} - -func (s *sumConsumer) PushInternalMetrics(errs *[]error) { - report(s.missingValues, missingValueMetricName, typeIsSumTags, s.sender, errs) -} - -func (s *sumConsumer) pushNumberDataPoint(mi metricInfo, numberDataPoint pmetric.NumberDataPoint, errs *[]error) { - tags := pointAndResAttrsToTagsAndFixSource(mi.SourceKey, numberDataPoint.Attributes(), newMap(mi.ResourceAttrs)) - value, err := getValue(numberDataPoint) - if err != nil { - logMissingValue(mi.Metric, s.settings, s.missingValues) - return - } - err = s.sender.SendDeltaCounter(mi.Name(), value, mi.Source, tags) - if err != nil { - *errs = append(*errs, err) - } -} - -// histogramReporting takes care of logging and internal metrics for histograms -type histogramReporting struct { - settings component.TelemetrySettings - malformedHistograms *atomic.Int64 - noAggregationTemporality *atomic.Int64 -} - -// newHistogramReporting returns a new histogramReporting instance. -func newHistogramReporting(settings component.TelemetrySettings) *histogramReporting { - return &histogramReporting{ - settings: settings, - malformedHistograms: &atomic.Int64{}, - noAggregationTemporality: &atomic.Int64{}, - } -} - -// Malformed returns the number of malformed histogram data points. -func (r *histogramReporting) Malformed() int64 { - return r.malformedHistograms.Load() -} - -// NoAggregationTemporality returns the number of histogram metrics that have no -// aggregation temporality. -func (r *histogramReporting) NoAggregationTemporality() int64 { - return r.noAggregationTemporality.Load() -} - -// LogMalformed logs seeing one malformed data point. -func (r *histogramReporting) LogMalformed(metric pmetric.Metric) { - namef := zap.String(metricNameString, metric.Name()) - r.settings.Logger.Debug("Malformed histogram", namef) - r.malformedHistograms.Add(1) -} - -// LogNoAggregationTemporality logs seeing a histogram metric with no aggregation temporality -func (r *histogramReporting) LogNoAggregationTemporality(metric pmetric.Metric) { - namef := zap.String(metricNameString, metric.Name()) - r.settings.Logger.Debug("histogram metric missing aggregation temporality", namef) - r.noAggregationTemporality.Add(1) -} - -// Report sends the counts in this instance to wavefront. -// sender is what sends to wavefront. Any errors sending get added to errs. -func (r *histogramReporting) Report(sender gaugeSender, errs *[]error) { - report(r.malformedHistograms, malformedHistogramMetricName, nil, sender, errs) - report(r.noAggregationTemporality, noAggregationTemporalityMetricName, typeIsHistogramTags, sender, errs) -} - -type histogramConsumer struct { - cumulative histogramDataPointConsumer - delta histogramDataPointConsumer - sender gaugeSender - reporting *histogramReporting - spec histogramSpecification -} - -// newHistogramConsumer returns a metricConsumer that consumes histograms. -// cumulative and delta handle cumulative and delta histograms respectively. -// sender sends internal metrics to wavefront. -func newHistogramConsumer( - cumulative, delta histogramDataPointConsumer, - sender gaugeSender, - spec histogramSpecification, - settings component.TelemetrySettings, -) typedMetricConsumer { - return &histogramConsumer{ - cumulative: cumulative, - delta: delta, - sender: sender, - reporting: newHistogramReporting(settings), - spec: spec, - } -} - -func (h *histogramConsumer) Type() pmetric.MetricType { - return h.spec.Type() -} - -func (h *histogramConsumer) Consume(mi metricInfo, errs *[]error) { - aggregationTemporality := h.spec.AggregationTemporality(mi.Metric) - var consumer histogramDataPointConsumer - switch aggregationTemporality { - case pmetric.AggregationTemporalityDelta: - consumer = h.delta - case pmetric.AggregationTemporalityCumulative: - consumer = h.cumulative - default: - h.reporting.LogNoAggregationTemporality(mi.Metric) - return - } - points := h.spec.DataPoints(mi.Metric) - for _, point := range points { - consumer.Consume(mi, point, errs, h.reporting) - } -} - -func (h *histogramConsumer) PushInternalMetrics(errs *[]error) { - h.reporting.Report(h.sender, errs) -} - -// histogramDataPointConsumer consumes one histogram data point. There is one -// implementation for delta histograms and one for cumulative histograms. -type histogramDataPointConsumer interface { - - // Consume consumes a BucketHistogramDataPoint. - // mi is the metricInfo which encloses metric; point is the BucketHistogramDataPoint; - // errors get appended to errs; reporting keeps track of special situations - Consume( - mi metricInfo, - point bucketHistogramDataPoint, - errs *[]error, - reporting *histogramReporting, - ) -} - -type cumulativeHistogramDataPointConsumer struct { - sender gaugeSender -} - -// newCumulativeHistogramDataPointConsumer returns a consumer for cumulative -// histogram data points. -func newCumulativeHistogramDataPointConsumer(sender gaugeSender) histogramDataPointConsumer { - return &cumulativeHistogramDataPointConsumer{sender: sender} -} - -func (c *cumulativeHistogramDataPointConsumer) Consume( - mi metricInfo, - point bucketHistogramDataPoint, - errs *[]error, - reporting *histogramReporting, -) { - if !point.Valid() { - reporting.LogMalformed(mi.Metric) - return - } - name := mi.Name() - tags := pointAndResAttrsToTagsAndFixSource(mi.SourceKey, point.Attributes, newMap(mi.ResourceAttrs)) - if leTag, ok := tags["le"]; ok { - tags["_le"] = leTag - } - buckets := point.AsCumulative() - for _, bucket := range buckets { - tags["le"] = bucket.Tag - err := c.sender.SendMetric( - name, float64(bucket.Count), point.SecondsSinceEpoch, mi.Source, tags) - if err != nil { - *errs = append(*errs, err) - } - } -} - -type deltaHistogramDataPointConsumer struct { - sender senders.DistributionSender -} - -// newDeltaHistogramDataPointConsumer returns a consumer for delta -// histogram data points. -func newDeltaHistogramDataPointConsumer( - sender senders.DistributionSender) histogramDataPointConsumer { - return &deltaHistogramDataPointConsumer{sender: sender} -} - -func (d *deltaHistogramDataPointConsumer) Consume( - mi metricInfo, - point bucketHistogramDataPoint, - errs *[]error, - reporting *histogramReporting) { - if !point.Valid() { - reporting.LogMalformed(mi.Metric) - return - } - name := mi.Name() - tags := pointAndResAttrsToTagsAndFixSource(mi.SourceKey, point.Attributes, newMap(mi.ResourceAttrs)) - err := d.sender.SendDistribution( - name, point.AsDelta(), allGranularity, point.SecondsSinceEpoch, mi.Source, tags) - if err != nil { - *errs = append(*errs, err) - } -} - -type summaryConsumer struct { - sender gaugeSender - settings component.TelemetrySettings -} - -// newSummaryConsumer returns a typedMetricConsumer that consumes summary metrics -// by sending them to tanzu observability. -func newSummaryConsumer( - sender gaugeSender, settings component.TelemetrySettings, -) typedMetricConsumer { - return &summaryConsumer{sender: sender, settings: settings} -} - -func (s *summaryConsumer) Type() pmetric.MetricType { - return pmetric.MetricTypeSummary -} - -func (s *summaryConsumer) Consume(mi metricInfo, errs *[]error) { - summary := mi.Summary() - summaryDataPoints := summary.DataPoints() - for i := 0; i < summaryDataPoints.Len(); i++ { - s.sendSummaryDataPoint(mi, summaryDataPoints.At(i), errs) - } -} - -// PushInternalMetrics is here so that summaryConsumer implements typedMetricConsumer -func (*summaryConsumer) PushInternalMetrics(*[]error) { - // Do nothing -} - -func (s *summaryConsumer) sendSummaryDataPoint( - mi metricInfo, summaryDataPoint pmetric.SummaryDataPoint, errs *[]error, -) { - name := mi.Name() - ts := summaryDataPoint.Timestamp().AsTime().Unix() - tags := pointAndResAttrsToTagsAndFixSource(mi.SourceKey, summaryDataPoint.Attributes(), newMap(mi.ResourceAttrs)) - count := summaryDataPoint.Count() - sum := summaryDataPoint.Sum() - - if quantileTag, ok := tags["quantile"]; ok { - tags["_quantile"] = quantileTag - delete(tags, "quantile") - } - s.sendMetric(name+"_count", float64(count), ts, tags, errs, mi.Source) - s.sendMetric(name+"_sum", sum, ts, tags, errs, mi.Source) - quantileValues := summaryDataPoint.QuantileValues() - for i := 0; i < quantileValues.Len(); i++ { - quantileValue := quantileValues.At(i) - tags["quantile"] = quantileTagValue(quantileValue.Quantile()) - s.sendMetric(name, quantileValue.Value(), ts, tags, errs, mi.Source) - } -} - -func (s *summaryConsumer) sendMetric( - name string, - value float64, - ts int64, - tags map[string]string, - errs *[]error, - source string) { - err := s.sender.SendMetric(name, value, ts, source, tags) - if err != nil { - *errs = append(*errs, err) - } -} - -func quantileTagValue(quantile float64) string { - return strconv.FormatFloat(quantile, 'f', -1, 64) -} - -// cumulativeBucket represents a cumulative histogram bucket -type cumulativeBucket struct { - - // The value of the "le" tag - Tag string - - // The count of values less than or equal to the "le" tag - Count uint64 -} - -// bucketHistogramDataPoint represents a single histogram data point -type bucketHistogramDataPoint struct { - Attributes pcommon.Map - SecondsSinceEpoch int64 - - // The bucket counts. For exponential histograms, the first and last element of bucketCounts - // are always 0. - bucketCounts []uint64 - - // The explicit bounds len(explicitBounds) + 1 == len(bucketCounts) - // If explicitBounds = {10, 20} and bucketCounts = {1, 2, 3} it means that 1 value is <= 10; - // 2 values are between 10 and 20; and 3 values are > 20 - explicitBounds []float64 - - // true if data point came from an exponential histogram. - exponential bool -} - -// Valid returns true if this is a valid data point. -func (b *bucketHistogramDataPoint) Valid() bool { - return len(b.bucketCounts) == len(b.explicitBounds)+1 -} - -// AsCumulative returns the buckets for a cumulative histogram -func (b *bucketHistogramDataPoint) AsCumulative() []cumulativeBucket { - if !b.Valid() { - panic(histogramDataPointInvalid) - } - - // For exponential histograms, we ignore the first bucket which always has count 0 - // but include the last bucket for +Inf. - if b.exponential { - return b.asCumulative(1, len(b.bucketCounts)) - } - return b.asCumulative(0, len(b.bucketCounts)) -} - -// AsDelta returns the centroids for a delta histogram -func (b *bucketHistogramDataPoint) AsDelta() []histogram.Centroid { - if !b.Valid() { - panic(histogramDataPointInvalid) - } - - // For exponential histograms, we ignore the first and last centroids which always have a - // count of 0. - if b.exponential { - return b.asDelta(1, len(b.bucketCounts)-1) - } - return b.asDelta(0, len(b.bucketCounts)) -} - -func (b *bucketHistogramDataPoint) asCumulative( - startBucketIndex, endBucketIndex int) []cumulativeBucket { - result := make([]cumulativeBucket, 0, endBucketIndex-startBucketIndex) - var leCount uint64 - for i := startBucketIndex; i < endBucketIndex; i++ { - leCount += b.bucketCounts[i] - result = append(result, cumulativeBucket{Tag: b.leTagValue(i), Count: leCount}) - } - return result -} - -func (b *bucketHistogramDataPoint) asDelta( - startBucketIndex, endBucketIndex int) []histogram.Centroid { - result := make([]histogram.Centroid, 0, endBucketIndex-startBucketIndex) - for i := startBucketIndex; i < endBucketIndex; i++ { - result = append( - result, - histogram.Centroid{Value: b.centroidValue(i), Count: int(b.bucketCounts[i])}) - } - return result -} - -func (b *bucketHistogramDataPoint) leTagValue(bucketIndex int) string { - if bucketIndex == len(b.explicitBounds) { - return "+Inf" - } - return strconv.FormatFloat(b.explicitBounds[bucketIndex], 'f', -1, 64) -} - -func (b *bucketHistogramDataPoint) centroidValue(index int) float64 { - length := len(b.explicitBounds) - if length == 0 { - // This is the best we can do. - return 0.0 - } - if index == 0 { - return b.explicitBounds[0] - } - if index == length { - return b.explicitBounds[length-1] - } - return (b.explicitBounds[index-1] + b.explicitBounds[index]) / 2.0 -} - -type histogramSpecification interface { - Type() pmetric.MetricType - AggregationTemporality(metric pmetric.Metric) pmetric.AggregationTemporality - DataPoints(metric pmetric.Metric) []bucketHistogramDataPoint -} - -type regularHistogramSpecification struct { -} - -func (regularHistogramSpecification) Type() pmetric.MetricType { - return pmetric.MetricTypeHistogram -} - -func (regularHistogramSpecification) AggregationTemporality( - metric pmetric.Metric) pmetric.AggregationTemporality { - return metric.Histogram().AggregationTemporality() -} - -func (regularHistogramSpecification) DataPoints(metric pmetric.Metric) []bucketHistogramDataPoint { - return fromOtelHistogram(metric.Histogram().DataPoints()) -} - -type exponentialHistogramSpecification struct { -} - -func (exponentialHistogramSpecification) Type() pmetric.MetricType { - return pmetric.MetricTypeExponentialHistogram -} - -func (exponentialHistogramSpecification) AggregationTemporality( - metric pmetric.Metric) pmetric.AggregationTemporality { - return metric.ExponentialHistogram().AggregationTemporality() -} - -func (exponentialHistogramSpecification) DataPoints( - metric pmetric.Metric) []bucketHistogramDataPoint { - return fromOtelExponentialHistogram(metric.ExponentialHistogram().DataPoints()) -} - -// fromOtelHistogram converts a regular histogram metric into a slice of data points. -func fromOtelHistogram(points pmetric.HistogramDataPointSlice) []bucketHistogramDataPoint { - result := make([]bucketHistogramDataPoint, points.Len()) - for i := 0; i < points.Len(); i++ { - result[i] = fromOtelHistogramDataPoint(points.At(i)) - } - return result -} - -// fromOtelExponentialHistogram converts an exponential histogram into a slice of data points. -func fromOtelExponentialHistogram( - points pmetric.ExponentialHistogramDataPointSlice) []bucketHistogramDataPoint { - result := make([]bucketHistogramDataPoint, points.Len()) - for i := 0; i < points.Len(); i++ { - result[i] = fromOtelExponentialHistogramDataPoint(points.At(i)) - } - return result -} - -func fromOtelHistogramDataPoint(point pmetric.HistogramDataPoint) bucketHistogramDataPoint { - return bucketHistogramDataPoint{ - Attributes: point.Attributes(), - SecondsSinceEpoch: point.Timestamp().AsTime().Unix(), - bucketCounts: point.BucketCounts().AsRaw(), - explicitBounds: point.ExplicitBounds().AsRaw(), - } -} - -func fromOtelExponentialHistogramDataPoint( - point pmetric.ExponentialHistogramDataPoint) bucketHistogramDataPoint { - - // Base is the factor by which the explicit bounds increase from bucket to bucket. - // This formula comes from the documentation here: - // https://github.com/open-telemetry/opentelemetry-proto/blob/8ba33cceb4a6704af68a4022d17868a7ac1d94f4/opentelemetry/proto/metrics/v1/metrics.proto#L487 - base := math.Pow(2.0, math.Pow(2.0, -float64(point.Scale()))) - - // ExponentialHistogramDataPoints have buckets with negative explicit bounds, buckets with - // positive explicit bounds, and a "zero" bucket. Our job is to merge these bucket groups into - // a single list of buckets and explicit bounds. - negativeBucketCounts := point.Negative().BucketCounts().AsRaw() - positiveBucketCounts := point.Positive().BucketCounts().AsRaw() - - // The total number of buckets is the number of negative buckets + the number of positive - // buckets + 1 for the zero bucket + 1 bucket for negative infinity up to the smallest negative explicit bound - // + 1 bucket for the largest positive explicit bound up to positive infinity. - numBucketCounts := 1 + len(negativeBucketCounts) + 1 + len(positiveBucketCounts) + 1 - - // We pre-allocate the slice setting its length to 0 so that GO doesn't have to keep - // re-allocating the slice as it grows. - bucketCounts := make([]uint64, 0, numBucketCounts) - - // The number of explicit bounds is always 1 less than the number of buckets. This is how - // explicit bounds work. If you have 2 explicit bounds say {2.0, 5.0} then you have 3 buckets: - // one for values less than 2.0; one for values between 2.0 and 5.0; and one for values greater - // than 5.0. - explicitBounds := make([]float64, 0, numBucketCounts-1) - - appendNegativeBucketsAndExplicitBounds( - point.Negative().Offset(), base, negativeBucketCounts, &bucketCounts, &explicitBounds) - appendZeroBucketAndExplicitBound( - point.Positive().Offset(), base, point.ZeroCount(), &bucketCounts, &explicitBounds) - appendPositiveBucketsAndExplicitBounds( - point.Positive().Offset(), base, positiveBucketCounts, &bucketCounts, &explicitBounds) - return bucketHistogramDataPoint{ - Attributes: point.Attributes(), - SecondsSinceEpoch: point.Timestamp().AsTime().Unix(), - bucketCounts: bucketCounts, - explicitBounds: explicitBounds, - exponential: true, - } -} - -// appendNegativeBucketsAndExplicitBounds appends negative buckets and explicit bounds to -// bucketCounts and explicitBounds respectively. The largest negative explicit bound (the one -// with the smallest magnitude) is -1*base^negativeOffset -func appendNegativeBucketsAndExplicitBounds( - negativeOffset int32, - base float64, - negativeBucketCounts []uint64, - bucketCounts *[]uint64, - explicitBounds *[]float64, -) { - // The count in the first bucket which includes negative infinity is always 0. - *bucketCounts = append(*bucketCounts, 0) - - // The smallest negative explicit bound. - le := -math.Pow(base, float64(negativeOffset)+float64(len(negativeBucketCounts))) - *explicitBounds = append(*explicitBounds, le) - - // The first negativeBucketCount has a negative explicit bound with the smallest magnitude; - // the last negativeBucketCount has a negative explicit bound with the largest magnitude. - // Therefore, to go in order from smallest to largest explicit bound, we have to start with - // the last element in the negativeBucketCounts array. - for i := len(negativeBucketCounts) - 1; i >= 0; i-- { - *bucketCounts = append(*bucketCounts, negativeBucketCounts[i]) - le /= base // We divide by base because our explicit bounds are getting larger as we go - *explicitBounds = append(*explicitBounds, le) - } -} - -// appendZeroBucketAndExplicitBound appends the "zero" bucket and explicit bound to bucketCounts -// and explicitBounds respectively. The smallest positive explicit bound is base^positiveOffset. -func appendZeroBucketAndExplicitBound( - positiveOffset int32, - base float64, - zeroBucketCount uint64, - bucketCounts *[]uint64, - explicitBounds *[]float64, -) { - *bucketCounts = append(*bucketCounts, zeroBucketCount) - - // The explicit bound of the zeroBucketCount is the smallest positive explicit bound - *explicitBounds = append(*explicitBounds, math.Pow(base, float64(positiveOffset))) -} - -// appendPositiveBucketsAndExplicitBounds appends positive buckets and explicit bounds to -// bucketCounts and explicitBounds respectively. The smallest positive explicit bound is -// base^positiveOffset. -func appendPositiveBucketsAndExplicitBounds( - positiveOffset int32, - base float64, - positiveBucketCounts []uint64, - bucketCounts *[]uint64, - explicitBounds *[]float64, -) { - le := math.Pow(base, float64(positiveOffset)) - for _, bucketCount := range positiveBucketCounts { - *bucketCounts = append(*bucketCounts, bucketCount) - le *= base - *explicitBounds = append(*explicitBounds, le) - } - // Last bucket count for positive infinity is always 0. - *bucketCounts = append(*bucketCounts, 0) -} diff --git a/exporter/tanzuobservabilityexporter/metrics_exporter.go b/exporter/tanzuobservabilityexporter/metrics_exporter.go deleted file mode 100644 index ba2f248f5f20..000000000000 --- a/exporter/tanzuobservabilityexporter/metrics_exporter.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" - -import ( - "context" - "fmt" - - "github.com/wavefronthq/wavefront-sdk-go/senders" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -type metricsExporter struct { - consumer *metricsConsumer -} - -func createMetricsConsumer(config MetricsConfig, settings component.TelemetrySettings, otelVersion string) (*metricsConsumer, error) { - s, err := senders.NewSender(config.Endpoint, - senders.FlushIntervalSeconds(60), - senders.SDKMetricsTags(map[string]string{"otel.metrics.collector_version": otelVersion}), - ) - if err != nil { - return nil, fmt.Errorf("failed to create proxy sender: %w", err) - } - cumulative := newCumulativeHistogramDataPointConsumer(s) - delta := newDeltaHistogramDataPointConsumer(s) - return newMetricsConsumer( - []typedMetricConsumer{ - newGaugeConsumer(s, settings), - newSumConsumer(s, settings), - newHistogramConsumer(cumulative, delta, s, regularHistogram, settings), - newHistogramConsumer(cumulative, delta, s, exponentialHistogram, settings), - newSummaryConsumer(s, settings), - }, - s, - true, config), nil -} - -type metricsConsumerCreator func(config MetricsConfig, settings component.TelemetrySettings, otelVersion string) ( - *metricsConsumer, error) - -func newMetricsExporter(settings exporter.CreateSettings, c component.Config, creator metricsConsumerCreator) (*metricsExporter, error) { - cfg, ok := c.(*Config) - if !ok { - return nil, fmt.Errorf("invalid config: %#v", c) - } - if !cfg.hasMetricsEndpoint() { - return nil, fmt.Errorf("metrics.endpoint required") - } - if _, _, err := cfg.parseMetricsEndpoint(); err != nil { - return nil, fmt.Errorf("failed to parse metrics.endpoint: %w", err) - } - consumer, err := creator(cfg.Metrics, settings.TelemetrySettings, settings.BuildInfo.Version) - if err != nil { - return nil, err - } - return &metricsExporter{ - consumer: consumer, - }, nil -} - -func (e *metricsExporter) pushMetricsData(ctx context.Context, md pmetric.Metrics) error { - return e.consumer.Consume(ctx, md) -} - -func (e *metricsExporter) shutdown(_ context.Context) error { - e.consumer.Close() - return nil -} diff --git a/exporter/tanzuobservabilityexporter/metrics_exporter_test.go b/exporter/tanzuobservabilityexporter/metrics_exporter_test.go deleted file mode 100644 index 77eca0a5c020..000000000000 --- a/exporter/tanzuobservabilityexporter/metrics_exporter_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter - -import ( - "context" - "errors" - "log" - "testing" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/exporter/exportertest" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -func TestPushMetricsData(t *testing.T) { - assert.NoError(t, verifyPushMetricsData(t, false)) -} - -func TestPushMetricsDataErrorOnSend(t *testing.T) { - assert.Error(t, verifyPushMetricsData(t, true)) -} - -func verifyPushMetricsData(t *testing.T, errorOnSend bool) error { - metric := newMetric("test.metric", pmetric.MetricTypeGauge) - dataPoints := metric.Gauge().DataPoints() - dataPoints.EnsureCapacity(1) - addDataPoint( - 7, - 1631205001, - map[string]any{ - "env": "prod", - "bucket": 73, - }, - dataPoints, - ) - metrics := constructMetrics(metric) - sender := &mockMetricSender{errorOnSend: errorOnSend} - result := consumeMetrics(metrics, sender) - assert.Equal(t, 1, sender.numFlushCalls) - assert.Equal(t, 1, sender.numCloseCalls) - assert.Equal(t, 1, sender.numSendMetricCalls) - return result -} - -func createMockMetricsExporter( - sender *mockMetricSender) (exporter.Metrics, error) { - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - tobsConfig.Metrics.Endpoint = "http://localhost:2878" - creator := func( - metricsConfig MetricsConfig, settings component.TelemetrySettings, otelVersion string) (*metricsConsumer, error) { - return newMetricsConsumer( - []typedMetricConsumer{ - newGaugeConsumer(sender, settings), - }, - sender, - false, - tobsConfig.Metrics, - ), nil - } - - exp, err := newMetricsExporter(exportertest.NewNopCreateSettings(), exporterConfig, creator) - if err != nil { - return nil, err - } - return exporterhelper.NewMetricsExporter( - context.Background(), - exportertest.NewNopCreateSettings(), - exporterConfig, - exp.pushMetricsData, - exporterhelper.WithShutdown(exp.shutdown), - ) -} - -func consumeMetrics(metrics pmetric.Metrics, sender *mockMetricSender) error { - ctx := context.Background() - mockOTelMetricsExporter, err := createMockMetricsExporter(sender) - if err != nil { - return err - } - defer func() { - if err := mockOTelMetricsExporter.Shutdown(ctx); err != nil { - log.Fatalln(err) - } - }() - return mockOTelMetricsExporter.ConsumeMetrics(ctx, metrics) -} - -type mockMetricSender struct { - errorOnSend bool - numFlushCalls int - numCloseCalls int - numSendMetricCalls int -} - -func (m *mockMetricSender) SendMetric( - _ string, _ float64, _ int64, _ string, _ map[string]string) error { - m.numSendMetricCalls++ - if m.errorOnSend { - return errors.New("error sending") - } - return nil -} - -func (m *mockMetricSender) Flush() error { - m.numFlushCalls++ - return nil -} - -func (m *mockMetricSender) Close() { m.numCloseCalls++ } diff --git a/exporter/tanzuobservabilityexporter/metrics_test.go b/exporter/tanzuobservabilityexporter/metrics_test.go deleted file mode 100644 index d37f85942894..000000000000 --- a/exporter/tanzuobservabilityexporter/metrics_test.go +++ /dev/null @@ -1,1688 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter - -import ( - "context" - "errors" - "math" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/wavefronthq/wavefront-sdk-go/histogram" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.uber.org/zap" - "go.uber.org/zap/zaptest/observer" -) - -func TestEndToEndGaugeConsumer(t *testing.T) { - gauge := newMetric("gauge", pmetric.MetricTypeGauge) - dataPoints := gauge.Gauge().DataPoints() - dataPoints.EnsureCapacity(1) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - // Here we test what happens with default config. ResourceAttrsIncluded = false AppTagsExcluded = false - addDataPoint( - 432.25, - 1640123456, - map[string]any{"source": "renamed", "host.name": "my_source", "env": "prod"}, - dataPoints, - ) - // test if service.name gets converted to service - resourceAttributes := map[string]string{"host.name": "my_source", "res_attr_key": "res_attr_value", "application": "test_app", "service.name": "test_service", "shard": "test_shard", "cluster": "test_cluster"} - metrics := constructMetricsWithTags(resourceAttributes, gauge) - sender := &mockGaugeSender{} - gaugeConsumer := newGaugeConsumer(sender, componenttest.NewNopTelemetrySettings()) - consumer := newMetricsConsumer( - []typedMetricConsumer{gaugeConsumer}, &mockFlushCloser{}, true, tobsConfig.Metrics) - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - - // The "host.name" tag gets filtered out as it contains our source, and the "source" - // tag gets renamed to "_source" - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: "gauge", - Ts: 1640123456, - Value: 432.25, - Tags: map[string]string{"_source": "renamed", "env": "prod", "application": "test_app", "service": "test_service", "shard": "test_shard", "cluster": "test_cluster"}, - Source: "my_source", - }, - ) - - // Since internal metrics are coming from the exporter itself, we send - // them with an empty source which defaults to the host name of the - // exporter. - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: missingValueMetricName, - Ts: 0, - Value: 0.0, - Tags: typeIsGaugeTags, - Source: "", - }, - ) -} - -func TestEndToEndGaugeConsumerWithResAttrsIncluded(t *testing.T) { - gauge := newMetric("gauge", pmetric.MetricTypeGauge) - dataPoints := gauge.Gauge().DataPoints() - dataPoints.EnsureCapacity(1) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - tobsConfig.Metrics.ResourceAttrsIncluded = true - // Here we test what happens if ResourceAttrsIncluded = true - addDataPoint( - 432.25, - 1640123456, - map[string]any{"source": "renamed", "host.name": "my_source", "env": "prod"}, - dataPoints, - ) - // test if service.name gets converted to service - resourceAttributes := map[string]string{"host.name": "my_source", "res_attr_key": "res_attr_value", "service.name": "test_service"} - metrics := constructMetricsWithTags(resourceAttributes, gauge) - sender := &mockGaugeSender{} - gaugeConsumer := newGaugeConsumer(sender, componenttest.NewNopTelemetrySettings()) - consumer := newMetricsConsumer( - []typedMetricConsumer{gaugeConsumer}, &mockFlushCloser{}, true, tobsConfig.Metrics) - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - - // The "host.name" tag gets filtered out as it contains our source, and the "source" - // tag gets renamed to "_source" - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: "gauge", - Ts: 1640123456, - Value: 432.25, - Tags: map[string]string{"_source": "renamed", "env": "prod", "res_attr_key": "res_attr_value", "service": "test_service"}, - Source: "my_source", - }, - ) - - // Since internal metrics are coming from the exporter itself, we send - // them with an empty source which defaults to the host name of the - // exporter. - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: missingValueMetricName, - Ts: 0, - Value: 0.0, - Tags: typeIsGaugeTags, - Source: "", - }, - ) -} - -func TestEndToEndGaugeConsumerWithAppResAttrsExcluded(t *testing.T) { - gauge := newMetric("gauge", pmetric.MetricTypeGauge) - dataPoints := gauge.Gauge().DataPoints() - dataPoints.EnsureCapacity(1) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - tobsConfig.Metrics.AppTagsExcluded = true - // Here we test what happens if ResourceAttrsIncluded = false AppTagsExcluded = true - addDataPoint( - 432.25, - 1640123456, - map[string]any{"source": "renamed", "host.name": "my_source", "env": "prod"}, - dataPoints, - ) - // test if service.name gets converted to service - resourceAttributes := map[string]string{"host.name": "my_source", "res_attr_key": "res_attr_value", "application": "test_app", "service": "test_service", "service.name": "test_service.name", "shard": "test_shard", "cluster": "test_cluster"} - metrics := constructMetricsWithTags(resourceAttributes, gauge) - sender := &mockGaugeSender{} - gaugeConsumer := newGaugeConsumer(sender, componenttest.NewNopTelemetrySettings()) - consumer := newMetricsConsumer( - []typedMetricConsumer{gaugeConsumer}, &mockFlushCloser{}, true, tobsConfig.Metrics) - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - - // The "host.name" tag gets filtered out as it contains our source, and the "source" - // tag gets renamed to "_source" - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: "gauge", - Ts: 1640123456, - Value: 432.25, - Tags: map[string]string{"_source": "renamed", "env": "prod"}, - Source: "my_source", - }, - ) - - // Since internal metrics are coming from the exporter itself, we send - // them with an empty source which defaults to the host name of the - // exporter. - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: missingValueMetricName, - Ts: 0, - Value: 0.0, - Tags: typeIsGaugeTags, - Source: "", - }, - ) -} - -func TestMetricsConsumerNormal(t *testing.T) { - gauge1 := newMetric("gauge1", pmetric.MetricTypeGauge) - sum1 := newMetric("sum1", pmetric.MetricTypeSum) - gauge2 := newMetric("gauge2", pmetric.MetricTypeGauge) - sum2 := newMetric("sum2", pmetric.MetricTypeSum) - mockGaugeConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricTypeGauge} - mockSumConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricTypeSum} - sender := &mockFlushCloser{} - metrics := constructMetrics(gauge1, sum1, gauge2, sum2) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer( - []typedMetricConsumer{mockGaugeConsumer, mockSumConsumer}, sender, true, tobsConfig.Metrics) - - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - - assert.ElementsMatch(t, []string{"gauge1", "gauge2"}, mockGaugeConsumer.names) - assert.ElementsMatch(t, []string{"sum1", "sum2"}, mockSumConsumer.names) - assert.Equal(t, 1, mockGaugeConsumer.pushInternalMetricsCallCount) - assert.Equal(t, 1, mockSumConsumer.pushInternalMetricsCallCount) - assert.Equal(t, 1, sender.numFlushCalls) - assert.Equal(t, 0, sender.numCloseCalls) - - consumer.Close() - assert.Equal(t, 1, sender.numCloseCalls) -} - -func TestMetricsConsumerNormalWithSourceTag(t *testing.T) { - sum := newMetric("sum", pmetric.MetricTypeSum) - mockSumConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricTypeSum} - sender := &mockFlushCloser{} - tags := map[string]string{"source": "test_source", "test_key": "test_value"} - metrics := constructMetricsWithTags(tags, sum) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer( - []typedMetricConsumer{mockSumConsumer}, sender, true, tobsConfig.Metrics) - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - - assert.ElementsMatch(t, []string{"sum"}, mockSumConsumer.names) - assert.ElementsMatch(t, []string{"test_source"}, mockSumConsumer.sources) - assert.ElementsMatch(t, []string{"source"}, mockSumConsumer.sourceKeys) - - assert.Equal(t, 1, mockSumConsumer.pushInternalMetricsCallCount) - assert.Equal(t, 1, sender.numFlushCalls) - assert.Equal(t, 0, sender.numCloseCalls) - - consumer.Close() - assert.Equal(t, 1, sender.numCloseCalls) -} - -func TestMetricsConsumerNormalWithHostnameTag(t *testing.T) { - sum := newMetric("sum", pmetric.MetricTypeSum) - mockSumConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricTypeSum} - sender := &mockFlushCloser{} - tags := map[string]string{"host.name": "test_host.name", "hostname": "test_hostname"} - metrics := constructMetricsWithTags(tags, sum) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer( - []typedMetricConsumer{mockSumConsumer}, sender, true, tobsConfig.Metrics) - - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - - assert.ElementsMatch(t, []string{"sum"}, mockSumConsumer.names) - assert.ElementsMatch(t, []string{"test_host.name"}, mockSumConsumer.sources) - assert.ElementsMatch(t, []string{"host.name"}, mockSumConsumer.sourceKeys) - - assert.Equal(t, 1, mockSumConsumer.pushInternalMetricsCallCount) - assert.Equal(t, 1, sender.numFlushCalls) - assert.Equal(t, 0, sender.numCloseCalls) - - consumer.Close() - assert.Equal(t, 1, sender.numCloseCalls) -} - -func TestMetricsConsumerNone(t *testing.T) { - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer(nil, nil, true, tobsConfig.Metrics) - metrics := constructMetrics() - - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - - consumer.Close() -} - -func TestNewMetricsConsumerPanicsWithDuplicateMetricType(t *testing.T) { - mockGaugeConsumer1 := &mockTypedMetricConsumer{typ: pmetric.MetricTypeGauge} - mockGaugeConsumer2 := &mockTypedMetricConsumer{typ: pmetric.MetricTypeGauge} - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - assert.Panics(t, func() { - newMetricsConsumer( - []typedMetricConsumer{mockGaugeConsumer1, mockGaugeConsumer2}, - nil, - true, tobsConfig.Metrics) - }) -} - -func TestMetricsConsumerPropagatesErrorsOnFlush(t *testing.T) { - sender := &mockFlushCloser{errorOnFlush: true} - metrics := constructMetrics() - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer(nil, sender, true, tobsConfig.Metrics) - - assert.Error(t, consumer.Consume(context.Background(), metrics)) - assert.Equal(t, 1, sender.numFlushCalls) -} - -func TestMetricsConsumerErrorsWithUnregisteredMetricType(t *testing.T) { - gauge1 := newMetric("gauge1", pmetric.MetricTypeGauge) - metrics := constructMetrics(gauge1) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer(nil, nil, true, tobsConfig.Metrics) - - assert.Error(t, consumer.Consume(context.Background(), metrics)) -} - -func TestMetricsConsumerErrorConsuming(t *testing.T) { - gauge1 := newMetric("gauge1", pmetric.MetricTypeGauge) - mockGaugeConsumer := &mockTypedMetricConsumer{ - typ: pmetric.MetricTypeGauge, - errorOnConsume: true} - metrics := constructMetrics(gauge1) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer( - []typedMetricConsumer{mockGaugeConsumer}, nil, true, tobsConfig.Metrics) - - assert.Error(t, consumer.Consume(context.Background(), metrics)) - assert.Len(t, mockGaugeConsumer.names, 1) - assert.Equal(t, 1, mockGaugeConsumer.pushInternalMetricsCallCount) -} - -func TestMetricsConsumerNoReportingInternalMetrics(t *testing.T) { - gauge1 := newMetric("gauge1", pmetric.MetricTypeGauge) - mockGaugeConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricTypeGauge} - metrics := constructMetrics(gauge1) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer( - []typedMetricConsumer{mockGaugeConsumer}, nil, false, tobsConfig.Metrics) - assert.NoError(t, consumer.Consume(context.Background(), metrics)) - assert.Len(t, mockGaugeConsumer.names, 1) - assert.Equal(t, 0, mockGaugeConsumer.pushInternalMetricsCallCount) -} - -func TestMetricsConsumerErrorConsumingInternal(t *testing.T) { - gauge1 := newMetric("gauge1", pmetric.MetricTypeGauge) - mockGaugeConsumer := &mockTypedMetricConsumer{ - typ: pmetric.MetricTypeGauge, errorOnPushInternalMetrics: true} - metrics := constructMetrics(gauge1) - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer( - []typedMetricConsumer{mockGaugeConsumer}, nil, true, tobsConfig.Metrics) - - assert.Error(t, consumer.Consume(context.Background(), metrics)) - assert.Len(t, mockGaugeConsumer.names, 1) - assert.Equal(t, 1, mockGaugeConsumer.pushInternalMetricsCallCount) -} - -func TestMetricsConsumerRespectContext(t *testing.T) { - sender := &mockFlushCloser{} - gauge1 := newMetric("gauge1", pmetric.MetricTypeGauge) - mockGaugeConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricTypeGauge} - exporterConfig := createDefaultConfig() - tobsConfig := exporterConfig.(*Config) - - consumer := newMetricsConsumer( - []typedMetricConsumer{mockGaugeConsumer}, sender, true, tobsConfig.Metrics) - ctx, cancel := context.WithCancel(context.Background()) - - cancel() - assert.Error(t, consumer.Consume(ctx, constructMetrics(gauge1))) - - assert.Zero(t, sender.numFlushCalls) - assert.Empty(t, mockGaugeConsumer.names) - assert.Zero(t, mockGaugeConsumer.pushInternalMetricsCallCount) -} - -func TestGaugeConsumerNormal(t *testing.T) { - verifyGaugeConsumer(t, false) -} - -func TestGaugeConsumerErrorSending(t *testing.T) { - verifyGaugeConsumer(t, true) -} - -func TestGaugeConsumerMissingValue(t *testing.T) { - metric := newMetric("missing.value.metric", pmetric.MetricTypeGauge) - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} - dataPoints := metric.Gauge().DataPoints() - dataPoints.EnsureCapacity(1) - addDataPoint( - nil, - 1633123456, - nil, - dataPoints, - ) - // Sending to tanzu observability should fail - sender := &mockGaugeSender{errorOnSend: true} - observedZapCore, observedLogs := observer.New(zap.DebugLevel) - settings := componenttest.NewNopTelemetrySettings() - settings.Logger = zap.New(observedZapCore) - consumer := newGaugeConsumer(sender, settings) - var errs []error - expectedMissingValueCount := 2 - for i := 0; i < expectedMissingValueCount; i++ { - // This call to Consume does not emit any metrics to tanzuobservability - // because the metric is missing its value. - consumer.Consume(mi, &errs) - } - assert.Empty(t, errs) - - // This call adds one error to errs because it emits a metric to - // tanzu observability and emitting there is set up to fail. - consumer.PushInternalMetrics(&errs) - - // One error from emitting the internal metric - assert.Len(t, errs, 1) - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: missingValueMetricName, - Value: float64(expectedMissingValueCount), - Tags: map[string]string{"type": "gauge"}, - }, - ) - allLogs := observedLogs.All() - assert.Len(t, allLogs, expectedMissingValueCount) -} - -func TestSumConsumerDelta(t *testing.T) { - deltaMetric := newMetric( - "test.delta.metric", pmetric.MetricTypeSum) - sum := deltaMetric.Sum() - mi := metricInfo{Metric: deltaMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - dataPoints := sum.DataPoints() - dataPoints.EnsureCapacity(2) - addDataPoint( - 35, - 1635205001, - map[string]any{ - "env": "dev", - }, - dataPoints, - ) - addDataPoint( - 52.375, - 1635205002, - map[string]any{ - "env": "prod", - }, - dataPoints, - ) - - sender := &mockSumSender{} - consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pmetric.MetricTypeSum, consumer.Type()) - var errs []error - - // delta sums get treated as delta counters - consumer.Consume(mi, &errs) - - expected := []tobsMetric{ - { - Name: "test.delta.metric", - Value: 35.0, - Source: "test_source", - Tags: map[string]string{"env": "dev", "res_attr_key": "res_attr_value"}, - }, - { - Name: "test.delta.metric", - Value: 52.375, - Source: "test_source", - Tags: map[string]string{"env": "prod", "res_attr_key": "res_attr_value"}, - }, - } - assert.ElementsMatch(t, expected, sender.deltaMetrics) - assert.Empty(t, sender.metrics) - assert.Empty(t, errs) -} - -func TestSumConsumerErrorOnSend(t *testing.T) { - deltaMetric := newMetric( - "test.delta.metric", pmetric.MetricTypeSum) - sum := deltaMetric.Sum() - mi := metricInfo{Metric: deltaMetric, Source: "test_source", SourceKey: "host.name"} - sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - dataPoints := sum.DataPoints() - dataPoints.EnsureCapacity(2) - addDataPoint( - 35, - 1635205001, - map[string]any{ - "env": "dev", - }, - dataPoints, - ) - addDataPoint( - 52.375, - 1635205002, - map[string]any{ - "env": "prod", - }, - dataPoints, - ) - - sender := &mockSumSender{errorOnSend: true} - consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pmetric.MetricTypeSum, consumer.Type()) - var errs []error - - // delta sums get treated as delta counters - consumer.Consume(mi, &errs) - - assert.Len(t, errs, 2) -} - -func TestSumConsumerCumulative(t *testing.T) { - cumulativeMetric := newMetric( - "test.cumulative.metric", pmetric.MetricTypeSum) - sum := cumulativeMetric.Sum() - mi := metricInfo{Metric: cumulativeMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - dataPoints := sum.DataPoints() - dataPoints.EnsureCapacity(1) - addDataPoint( - 62.25, - 1634205001, - map[string]any{ - "env": "dev", - }, - dataPoints, - ) - sender := &mockSumSender{} - consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pmetric.MetricTypeSum, consumer.Type()) - var errs []error - - // cumulative sums get treated as regular wavefront metrics - consumer.Consume(mi, &errs) - - expected := []tobsMetric{ - { - Name: "test.cumulative.metric", - Value: 62.25, - Ts: 1634205001, - Source: "test_source", - Tags: map[string]string{"env": "dev", "res_attr_key": "res_attr_value"}, - }, - } - assert.ElementsMatch(t, expected, sender.metrics) - assert.Empty(t, sender.deltaMetrics) - assert.Empty(t, errs) -} - -func TestSumConsumerUnspecified(t *testing.T) { - cumulativeMetric := newMetric( - "test.unspecified.metric", pmetric.MetricTypeSum) - sum := cumulativeMetric.Sum() - mi := metricInfo{Metric: cumulativeMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - sum.SetAggregationTemporality(pmetric.AggregationTemporalityUnspecified) - dataPoints := sum.DataPoints() - dataPoints.EnsureCapacity(1) - addDataPoint( - 72.25, - 1634206001, - map[string]any{ - "env": "qa", - }, - dataPoints, - ) - sender := &mockSumSender{} - consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pmetric.MetricTypeSum, consumer.Type()) - var errs []error - - // unspecified sums get treated as regular wavefront metrics - consumer.Consume(mi, &errs) - - expected := []tobsMetric{ - { - Name: "test.unspecified.metric", - Value: 72.25, - Ts: 1634206001, - Source: "test_source", - Tags: map[string]string{"env": "qa", "res_attr_key": "res_attr_value"}, - }, - } - assert.ElementsMatch(t, expected, sender.metrics) - assert.Empty(t, sender.deltaMetrics) - assert.Empty(t, errs) -} - -func TestSumConsumerMissingValue(t *testing.T) { - metric := newMetric("missing.value.metric", pmetric.MetricTypeSum) - sum := metric.Sum() - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - dataPoints := sum.DataPoints() - dataPoints.EnsureCapacity(1) - addDataPoint( - nil, - 1633123456, - nil, - dataPoints, - ) - sender := &mockSumSender{} - observedZapCore, observedLogs := observer.New(zap.DebugLevel) - settings := componenttest.NewNopTelemetrySettings() - settings.Logger = zap.New(observedZapCore) - consumer := newSumConsumer(sender, settings) - var errs []error - - expectedMissingValueCount := 2 - for i := 0; i < expectedMissingValueCount; i++ { - consumer.Consume(mi, &errs) - } - consumer.PushInternalMetrics(&errs) - - assert.Len(t, errs, 0) - assert.Empty(t, sender.deltaMetrics) - assert.Contains(t, sender.metrics, tobsMetric{ - Name: missingValueMetricName, - Value: float64(expectedMissingValueCount), - Tags: map[string]string{"type": "sum"}, - }) - allLogs := observedLogs.All() - assert.Len(t, allLogs, expectedMissingValueCount) -} - -// Tests that the histogramConsumer correctly delegates to its -// histogramDataPointConsumers. This tests delta histograms -func TestHistogramConsumerDeltaAggregation(t *testing.T) { - numBucketCountsForEachDataPoint := []int{2, 5, 10} - deltaMetric := newHistogramMetricWithDataPoints( - "delta.metric", - pmetric.AggregationTemporalityDelta, - numBucketCountsForEachDataPoint) - mi := metricInfo{Metric: deltaMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - sender := &mockGaugeSender{} - cumulativeConsumer := &mockHistogramDataPointConsumer{} - deltaConsumer := &mockHistogramDataPointConsumer{} - consumer := newHistogramConsumer( - cumulativeConsumer, - deltaConsumer, - sender, - regularHistogram, - componenttest.NewNopTelemetrySettings()) - var errs []error - consumer.Consume(mi, &errs) - - assert.Empty(t, errs) - - // We had three datapoints. Our mock just captures the metric name of - // each data point consumed. - assert.Equal( - t, []string{"delta.metric", "delta.metric", "delta.metric"}, deltaConsumer.names) - assert.Equal(t, numBucketCountsForEachDataPoint, deltaConsumer.counts) - assert.Empty(t, cumulativeConsumer.names) - assert.Empty(t, cumulativeConsumer.counts) -} - -// Tests that the histogramConsumer correctly delegates to its -// histogramDataPointConsumers. This tests cumulative histograms -func TestHistogramConsumerCumulativeAggregation(t *testing.T) { - numBucketCountsForEachDataPoint := []int{2, 5, 10} - cumulativeMetric := newHistogramMetricWithDataPoints( - "cumulative.metric", - pmetric.AggregationTemporalityCumulative, - numBucketCountsForEachDataPoint) - mi := metricInfo{Metric: cumulativeMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - sender := &mockGaugeSender{} - cumulativeConsumer := &mockHistogramDataPointConsumer{} - deltaConsumer := &mockHistogramDataPointConsumer{} - consumer := newHistogramConsumer( - cumulativeConsumer, - deltaConsumer, - sender, - regularHistogram, - componenttest.NewNopTelemetrySettings()) - var errs []error - - consumer.Consume(mi, &errs) - - assert.Empty(t, errs) - - // We had three datapoints. Our mock just captures the metric name of - // each data point consumed. - assert.Equal( - t, - []string{"cumulative.metric", "cumulative.metric", "cumulative.metric"}, - cumulativeConsumer.names) - assert.Equal(t, numBucketCountsForEachDataPoint, cumulativeConsumer.counts) - assert.Empty(t, deltaConsumer.names) - assert.Empty(t, deltaConsumer.counts) -} - -// This tests that the histogram consumer correctly counts and logs -// histogram metrics with missing aggregation attribute. -func TestHistogramConsumerNoAggregation(t *testing.T) { - - // Create a histogram metric with missing aggregation attribute - metric := newHistogramMetricWithDataPoints( - "missing.aggregation.metric", - pmetric.AggregationTemporalityUnspecified, - nil) - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - sender := &mockGaugeSender{} - observedZapCore, observedLogs := observer.New(zap.DebugLevel) - settings := componenttest.NewNopTelemetrySettings() - settings.Logger = zap.New(observedZapCore) - consumer := newHistogramConsumer( - &mockHistogramDataPointConsumer{}, - &mockHistogramDataPointConsumer{}, - sender, - regularHistogram, - settings, - ) - assert.Equal(t, pmetric.MetricTypeHistogram, consumer.Type()) - var errs []error - expectedNoAggregationCount := 3 - for i := 0; i < expectedNoAggregationCount; i++ { - consumer.Consume(mi, &errs) - } - consumer.PushInternalMetrics(&errs) - - assert.Len(t, errs, 0) - assert.Contains(t, sender.metrics, tobsMetric{ - Name: noAggregationTemporalityMetricName, - Value: float64(expectedNoAggregationCount), - Tags: map[string]string{"type": "histogram"}, - }) - allLogs := observedLogs.All() - assert.Len(t, allLogs, expectedNoAggregationCount) -} - -func TestHistogramReporting(t *testing.T) { - observedZapCore, observedLogs := observer.New(zap.DebugLevel) - settings := componenttest.NewNopTelemetrySettings() - settings.Logger = zap.New(observedZapCore) - report := newHistogramReporting(settings) - metric := newMetric("a.metric", pmetric.MetricTypeHistogram) - malformedCount := 3 - for i := 0; i < malformedCount; i++ { - report.LogMalformed(metric) - } - noAggregationTemporalityCount := 5 - for i := 0; i < noAggregationTemporalityCount; i++ { - report.LogNoAggregationTemporality(metric) - } - - assert.Equal(t, int64(malformedCount), report.Malformed()) - assert.Equal(t, int64(noAggregationTemporalityCount), report.NoAggregationTemporality()) - assert.Equal( - t, - malformedCount+noAggregationTemporalityCount, - observedLogs.Len()) - - sender := &mockGaugeSender{} - var errs []error - - report.Report(sender, &errs) - - assert.Empty(t, errs) - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: malformedHistogramMetricName, - Value: float64(malformedCount), - }) - assert.Contains( - t, - sender.metrics, - tobsMetric{ - Name: noAggregationTemporalityMetricName, - Value: float64(noAggregationTemporalityCount), - Tags: typeIsHistogramTags, - }) -} - -func TestHistogramReportingError(t *testing.T) { - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - sender := &mockGaugeSender{errorOnSend: true} - var errs []error - - report.Report(sender, &errs) - - assert.NotEmpty(t, errs) -} - -func TestCumulativeHistogramDataPointConsumer(t *testing.T) { - metric := newMetric("a.metric", pmetric.MetricTypeHistogram) - histogramDataPoint := pmetric.NewHistogramDataPoint() - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf - histogramDataPoint.ExplicitBounds().FromRaw([]float64{2.0, 5.0, 10.0}) - histogramDataPoint.BucketCounts().FromRaw([]uint64{5, 1, 3, 2}) - histogramDataPoint.Attributes().PutStr("foo", "bar") - sender := &mockGaugeSender{} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newCumulativeHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - assert.Empty(t, errs) - assert.Equal( - t, - []tobsMetric{ - { - Name: "a.metric", - Value: 5.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "le": "2", "res_attr_key": "res_attr_value"}, - }, - { - Name: "a.metric", - Value: 6.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "le": "5", "res_attr_key": "res_attr_value"}, - }, - { - Name: "a.metric", - Value: 9.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "le": "10", "res_attr_key": "res_attr_value"}, - }, - { - Name: "a.metric", - Value: 11.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "le": "+Inf", "res_attr_key": "res_attr_value"}, - }, - }, - sender.metrics, - ) -} - -func TestCumulativeHistogramDataPointConsumerError(t *testing.T) { - metric := newMetric("a.metric", pmetric.MetricTypeHistogram) - histogramDataPoint := pmetric.NewHistogramDataPoint() - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf - histogramDataPoint.ExplicitBounds().FromRaw([]float64{2.0, 5.0, 10.0}) - histogramDataPoint.BucketCounts().FromRaw([]uint64{5, 1, 3, 2}) - sender := &mockGaugeSender{errorOnSend: true} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newCumulativeHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - // We tried to send 4 metrics. We get 4 errors. - assert.Len(t, errs, 4) -} - -func TestCumulativeHistogramDataPointConsumerLeInUse(t *testing.T) { - metric := newMetric("a.metric", pmetric.MetricTypeHistogram) - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - histogramDataPoint := pmetric.NewHistogramDataPoint() - histogramDataPoint.ExplicitBounds().FromRaw([]float64{10.0}) - histogramDataPoint.BucketCounts().FromRaw([]uint64{4, 12}) - histogramDataPoint.Attributes().PutInt("le", 8) - sender := &mockGaugeSender{} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newCumulativeHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - assert.Empty(t, errs) - assert.Equal( - t, - []tobsMetric{ - { - Name: "a.metric", - Value: 4.0, - Source: "test_source", - Tags: map[string]string{"_le": "8", "le": "10", "res_attr_key": "res_attr_value"}, - }, - { - Name: "a.metric", - Value: 16.0, - Source: "test_source", - Tags: map[string]string{"_le": "8", "le": "+Inf", "res_attr_key": "res_attr_value"}, - }, - }, - sender.metrics, - ) -} - -func TestCumulativeHistogramDataPointConsumerMissingBuckets(t *testing.T) { - metric := newMetric("a.metric", pmetric.MetricTypeHistogram) - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - histogramDataPoint := pmetric.NewHistogramDataPoint() - sender := &mockGaugeSender{} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newCumulativeHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - assert.Empty(t, errs) - assert.Empty(t, sender.metrics) - assert.Equal(t, int64(1), report.Malformed()) -} - -func TestDeltaHistogramDataPointConsumer(t *testing.T) { - metric := newMetric("a.delta.histogram", pmetric.MetricTypeHistogram) - histogramDataPoint := pmetric.NewHistogramDataPoint() - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf - histogramDataPoint.ExplicitBounds().FromRaw([]float64{2.0, 5.0, 10.0}) - histogramDataPoint.BucketCounts().FromRaw([]uint64{5, 1, 3, 2}) - setDataPointTimestamp(1631234567, histogramDataPoint) - histogramDataPoint.Attributes().PutStr("bar", "baz") - sender := &mockDistributionSender{} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newDeltaHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - assert.Empty(t, errs) - - assert.Equal( - t, - []tobsDistribution{ - { - Name: "a.delta.histogram", - Centroids: []histogram.Centroid{ - {Value: 2.0, Count: 5}, - {Value: 3.5, Count: 1}, - {Value: 7.5, Count: 3}, - {Value: 10.0, Count: 2}}, - Granularity: allGranularity, - Ts: 1631234567, - Source: "test_source", - Tags: map[string]string{"bar": "baz", "res_attr_key": "res_attr_value"}, - }, - }, - sender.distributions, - ) - assert.Equal(t, int64(0), report.Malformed()) -} - -func TestDeltaHistogramDataPointConsumer_OneBucket(t *testing.T) { - metric := newMetric("one.bucket.delta.histogram", pmetric.MetricTypeHistogram) - histogramDataPoint := pmetric.NewHistogramDataPoint() - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf - histogramDataPoint.ExplicitBounds().FromRaw([]float64{}) - histogramDataPoint.BucketCounts().FromRaw([]uint64{17}) - setDataPointTimestamp(1641234567, histogramDataPoint) - sender := &mockDistributionSender{} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newDeltaHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - assert.Empty(t, errs) - - assert.Equal( - t, - []tobsDistribution{ - { - Name: "one.bucket.delta.histogram", - Centroids: []histogram.Centroid{{Value: 0.0, Count: 17}}, - Granularity: allGranularity, - Ts: 1641234567, - Source: "test_source", - Tags: map[string]string{"res_attr_key": "res_attr_value"}, - }, - }, - sender.distributions, - ) - assert.Equal(t, int64(0), report.Malformed()) -} - -func TestDeltaHistogramDataPointConsumerError(t *testing.T) { - metric := newMetric("a.delta.histogram", pmetric.MetricTypeHistogram) - histogramDataPoint := pmetric.NewHistogramDataPoint() - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf - histogramDataPoint.ExplicitBounds().FromRaw([]float64{2.0, 5.0, 10.0}) - histogramDataPoint.BucketCounts().FromRaw([]uint64{5, 1, 3, 2}) - sender := &mockDistributionSender{errorOnSend: true} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newDeltaHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - assert.Len(t, errs, 1) -} - -func TestDeltaHistogramDataPointConsumerMissingBuckets(t *testing.T) { - metric := newMetric("a.metric", pmetric.MetricTypeHistogram) - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - histogramDataPoint := pmetric.NewHistogramDataPoint() - sender := &mockDistributionSender{} - report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) - consumer := newDeltaHistogramDataPointConsumer(sender) - var errs []error - - consumer.Consume(mi, fromOtelHistogramDataPoint(histogramDataPoint), &errs, report) - - assert.Empty(t, errs) - assert.Empty(t, sender.distributions) - assert.Equal(t, int64(1), report.Malformed()) -} - -func TestSummaries(t *testing.T) { - summaryMetric := newMetric("test.summary", pmetric.MetricTypeSummary) - summary := summaryMetric.Summary() - dataPoints := summary.DataPoints() - dataPoints.EnsureCapacity(2) - - mi := metricInfo{Metric: summaryMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - dataPoint := dataPoints.AppendEmpty() - setQuantileValues(dataPoint, 0.1, 100.0, 0.5, 200.0, 0.9, 300.0, 0.99, 400.0) - dataPoint.Attributes().PutStr("foo", "bar") - dataPoint.SetCount(10) - dataPoint.SetSum(5000.0) - setDataPointTimestamp(1645123456, dataPoint) - - dataPoint = dataPoints.AppendEmpty() - setQuantileValues(dataPoint, 0.2, 75.0, 0.5, 125.0, 0.8, 175.0, 0.95, 225.0) - dataPoint.Attributes().PutStr("bar", "baz") - dataPoint.SetCount(15) - dataPoint.SetSum(3000.0) - setDataPointTimestamp(1645123556, dataPoint) - - sender := &mockGaugeSender{} - consumer := newSummaryConsumer(sender, componenttest.NewNopTelemetrySettings()) - - assert.Equal(t, pmetric.MetricTypeSummary, consumer.Type()) - - var errs []error - consumer.Consume(mi, &errs) - - assert.Empty(t, errs) - - expected := []tobsMetric{ - { - Name: "test.summary", - Value: 100.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "quantile": "0.1", "res_attr_key": "res_attr_value"}, - Ts: 1645123456, - }, - { - Name: "test.summary", - Value: 200.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "quantile": "0.5", "res_attr_key": "res_attr_value"}, - Ts: 1645123456, - }, - { - Name: "test.summary", - Value: 300.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "quantile": "0.9", "res_attr_key": "res_attr_value"}, - Ts: 1645123456, - }, - { - Name: "test.summary", - Value: 400.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "quantile": "0.99", "res_attr_key": "res_attr_value"}, - Ts: 1645123456, - }, - { - Name: "test.summary_count", - Value: 10.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "res_attr_key": "res_attr_value"}, - Ts: 1645123456, - }, - { - Name: "test.summary_sum", - Value: 5000.0, - Source: "test_source", - Tags: map[string]string{"foo": "bar", "res_attr_key": "res_attr_value"}, - Ts: 1645123456, - }, - { - Name: "test.summary", - Value: 75.0, - Source: "test_source", - Tags: map[string]string{"bar": "baz", "quantile": "0.2", "res_attr_key": "res_attr_value"}, - Ts: 1645123556, - }, - { - Name: "test.summary", - Value: 125.0, - Source: "test_source", - Tags: map[string]string{"bar": "baz", "quantile": "0.5", "res_attr_key": "res_attr_value"}, - Ts: 1645123556, - }, - { - Name: "test.summary", - Value: 175.0, - Source: "test_source", - Tags: map[string]string{"bar": "baz", "quantile": "0.8", "res_attr_key": "res_attr_value"}, - Ts: 1645123556, - }, - { - Name: "test.summary", - Value: 225.0, - Source: "test_source", - Tags: map[string]string{"bar": "baz", "quantile": "0.95", "res_attr_key": "res_attr_value"}, - Ts: 1645123556, - }, - { - Name: "test.summary_count", - Value: 15.0, - Source: "test_source", - Tags: map[string]string{"bar": "baz", "res_attr_key": "res_attr_value"}, - Ts: 1645123556, - }, - { - Name: "test.summary_sum", - Value: 3000.0, - Source: "test_source", - Tags: map[string]string{"bar": "baz", "res_attr_key": "res_attr_value"}, - Ts: 1645123556, - }, - } - assert.ElementsMatch(t, expected, sender.metrics) -} - -func TestSummaries_QuantileTagExists(t *testing.T) { - summaryMetric := newMetric("test.summary.quantile.tag", pmetric.MetricTypeSummary) - summary := summaryMetric.Summary() - dataPoints := summary.DataPoints() - dataPoints.EnsureCapacity(1) - - mi := metricInfo{Metric: summaryMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - dataPoint := dataPoints.AppendEmpty() - setQuantileValues(dataPoint, 0.5, 300.0) - dataPoint.Attributes().PutStr("quantile", "exists") - dataPoint.SetCount(12) - dataPoint.SetSum(4000.0) - setDataPointTimestamp(1650123456, dataPoint) - - sender := &mockGaugeSender{} - consumer := newSummaryConsumer(sender, componenttest.NewNopTelemetrySettings()) - var errs []error - consumer.Consume(mi, &errs) - assert.Empty(t, errs) - - expected := []tobsMetric{ - { - Name: "test.summary.quantile.tag", - Value: 300.0, - Source: "test_source", - Tags: map[string]string{"_quantile": "exists", "quantile": "0.5", "res_attr_key": "res_attr_value"}, - Ts: 1650123456, - }, - { - Name: "test.summary.quantile.tag_count", - Value: 12.0, - Source: "test_source", - Tags: map[string]string{"_quantile": "exists", "res_attr_key": "res_attr_value"}, - Ts: 1650123456, - }, - { - Name: "test.summary.quantile.tag_sum", - Value: 4000.0, - Source: "test_source", - Tags: map[string]string{"_quantile": "exists", "res_attr_key": "res_attr_value"}, - Ts: 1650123456, - }, - } - assert.ElementsMatch(t, expected, sender.metrics) -} - -func TestSummariesConsumer_ErrorSending(t *testing.T) { - summaryMetric := newMetric("test.summary.error", pmetric.MetricTypeSummary) - summary := summaryMetric.Summary() - mi := metricInfo{Metric: summaryMetric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - dataPoints := summary.DataPoints() - dataPoints.EnsureCapacity(1) - - dataPoint := dataPoints.AppendEmpty() - dataPoint.SetCount(13) - dataPoint.SetSum(3900.0) - - sender := &mockGaugeSender{errorOnSend: true} - consumer := newSummaryConsumer(sender, componenttest.NewNopTelemetrySettings()) - var errs []error - consumer.Consume(mi, &errs) - assert.NotEmpty(t, errs) -} - -// Sets quantile values for a summary data point -func setQuantileValues(dataPoint pmetric.SummaryDataPoint, quantileValues ...float64) { - if len(quantileValues)%2 != 0 { - panic("quantileValues must be quantile, value, quantile, value, ...") - } - length := len(quantileValues) / 2 - quantileValuesSlice := dataPoint.QuantileValues() - quantileValuesSlice.EnsureCapacity(length) - for i := 0; i < length; i++ { - quantileValueObj := quantileValuesSlice.AppendEmpty() - quantileValueObj.SetQuantile(quantileValues[2*i]) - quantileValueObj.SetValue(quantileValues[2*i+1]) - } -} - -func TestExponentialHistogramConsumerSpec(t *testing.T) { - metric := newExponentialHistogramMetricWithDataPoints( - "a.metric", pmetric.AggregationTemporalityDelta, []uint64{4, 7, 11}) - assert.Equal(t, pmetric.MetricTypeExponentialHistogram, exponentialHistogram.Type()) - assert.Equal( - t, - pmetric.AggregationTemporalityDelta, - exponentialHistogram.AggregationTemporality(metric)) - points := exponentialHistogram.DataPoints(metric) - assert.Len(t, points, 3) - - // 4 + 4 + 2 - assert.Len(t, points[0].AsCumulative(), 10) - - // 7 + 7 + 2 - assert.Len(t, points[1].AsCumulative(), 16) - - // 11 + 11 + 2 - assert.Len(t, points[2].AsCumulative(), 24) -} - -func TestExponentialHistogramDataPoint(t *testing.T) { - dataPoint := pmetric.NewExponentialHistogramDataPoint() - dataPoint.SetScale(1) - dataPoint.Negative().SetOffset(6) - dataPoint.Negative().BucketCounts().FromRaw([]uint64{15, 16, 17}) - dataPoint.Positive().SetOffset(3) - dataPoint.Positive().BucketCounts().FromRaw([]uint64{5, 6, 7, 8}) - dataPoint.SetZeroCount(2) - dataPoint.Attributes().PutStr("foo", "bar") - dataPoint.Attributes().PutStr("baz", "7") - setDataPointTimestamp(1640198765, dataPoint) - point := fromOtelExponentialHistogramDataPoint(dataPoint) - assertBuckets( - t, - []cumulativeBucket{ - {Tag: "-16", Count: 17}, - {Tag: "-11.3137", Count: 33}, - {Tag: "-8", Count: 48}, - {Tag: "2.8284", Count: 50}, - {Tag: "4", Count: 55}, - {Tag: "5.6569", Count: 61}, - {Tag: "8", Count: 68}, - {Tag: "11.3137", Count: 76}, - {Tag: "+Inf", Count: 76}, - }, - point.AsCumulative()) - assertCentroids( - t, - []histogram.Centroid{ - {Value: -19.3137, Count: 17}, - {Value: -13.6569, Count: 16}, - {Value: -9.6569, Count: 15}, - {Value: -2.5858, Count: 2}, - {Value: 3.4142, Count: 5}, - {Value: 4.8284, Count: 6}, - {Value: 6.8284, Count: 7}, - {Value: 9.6569, Count: 8}, - }, - point.AsDelta()) - assert.Equal(t, map[string]string{"foo": "bar", "baz": "7"}, attributesToTags(point.Attributes)) - assert.Equal(t, int64(1640198765), point.SecondsSinceEpoch) -} - -func TestExponentialHistogramDataPoint_ZeroOnly(t *testing.T) { - dataPoint := pmetric.NewExponentialHistogramDataPoint() - dataPoint.SetScale(0) - dataPoint.Negative().SetOffset(2) - dataPoint.Positive().SetOffset(1) - dataPoint.SetZeroCount(5) - point := fromOtelExponentialHistogramDataPoint(dataPoint) - assertBuckets( - t, - []cumulativeBucket{ - {Tag: "2.0", Count: 5}, - {Tag: "+Inf", Count: 5}, - }, - point.AsCumulative()) - assertCentroids( - t, - []histogram.Centroid{ - {Value: -1.0, Count: 5}, - }, - point.AsDelta()) -} - -// Creates a histogram metric with len(numBucketCountsForEachDataPoint) -// datapoints. name is the name of the histogram metric; temporality -// is the temporality of the histogram metric; -// numBucketCountsForEachDataPoint contains the number of buckets for each -// data point. -func newHistogramMetricWithDataPoints( - name string, - temporality pmetric.AggregationTemporality, - numBucketCountsForEachDataPoint []int, -) pmetric.Metric { - result := newMetric(name, pmetric.MetricTypeHistogram) - aHistogram := result.Histogram() - aHistogram.SetAggregationTemporality(temporality) - aHistogram.DataPoints().EnsureCapacity(len(numBucketCountsForEachDataPoint)) - for _, count := range numBucketCountsForEachDataPoint { - point := aHistogram.DataPoints().AppendEmpty() - point.BucketCounts().FromRaw(make([]uint64, count)) - point.ExplicitBounds().FromRaw(make([]float64, count-1)) - } - return result -} - -// Works like newHistogramMetricWithDataPoints but creates an exponential histogram metric -func newExponentialHistogramMetricWithDataPoints( - name string, - temporality pmetric.AggregationTemporality, - positiveAndNegativeBucketCountsForEachDataPoint []uint64, -) pmetric.Metric { - result := newMetric(name, pmetric.MetricTypeExponentialHistogram) - aHistogram := result.ExponentialHistogram() - aHistogram.SetAggregationTemporality(temporality) - aHistogram.DataPoints().EnsureCapacity(len(positiveAndNegativeBucketCountsForEachDataPoint)) - for _, count := range positiveAndNegativeBucketCountsForEachDataPoint { - point := aHistogram.DataPoints().AppendEmpty() - point.Negative().BucketCounts().FromRaw(make([]uint64, count)) - point.Positive().BucketCounts().FromRaw(make([]uint64, count)) - } - return result -} - -func verifyGaugeConsumer(t *testing.T, errorOnSend bool) { - metric := newMetric("test.metric", pmetric.MetricTypeGauge) - mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name", ResourceAttrs: map[string]string{"res_attr_key": "res_attr_value"}} - dataPoints := metric.Gauge().DataPoints() - dataPoints.EnsureCapacity(2) - addDataPoint( - 7, - 1631205001, - map[string]any{"env": "prod", "bucket": 73}, - dataPoints, - ) - addDataPoint( - 7.5, - 1631205002, - map[string]any{"env": "prod", "bucket": 73}, - dataPoints, - ) - expected := []tobsMetric{ - { - Name: "test.metric", - Value: 7.0, - Ts: 1631205001, - Source: "test_source", - Tags: map[string]string{"env": "prod", "bucket": "73", "res_attr_key": "res_attr_value"}, - }, - { - Name: "test.metric", - Value: 7.5, - Ts: 1631205002, - Source: "test_source", - Tags: map[string]string{"env": "prod", "bucket": "73", "res_attr_key": "res_attr_value"}, - }, - } - sender := &mockGaugeSender{errorOnSend: errorOnSend} - consumer := newGaugeConsumer(sender, componenttest.NewNopTelemetrySettings()) - - assert.Equal(t, pmetric.MetricTypeGauge, consumer.Type()) - var errs []error - consumer.Consume(mi, &errs) - assert.ElementsMatch(t, expected, sender.metrics) - if errorOnSend { - assert.Len(t, errs, len(expected)) - } else { - assert.Empty(t, errs) - } -} - -func constructMetrics(metricList ...pmetric.Metric) pmetric.Metrics { - result := pmetric.NewMetrics() - result.ResourceMetrics().EnsureCapacity(1) - rm := result.ResourceMetrics().AppendEmpty() - rm.ScopeMetrics().EnsureCapacity(1) - ilm := rm.ScopeMetrics().AppendEmpty() - ilm.Metrics().EnsureCapacity(len(metricList)) - for _, metric := range metricList { - metric.CopyTo(ilm.Metrics().AppendEmpty()) - } - return result -} - -func constructMetricsWithTags(tags map[string]string, metricList ...pmetric.Metric) pmetric.Metrics { - result := pmetric.NewMetrics() - result.ResourceMetrics().EnsureCapacity(1) - rm := result.ResourceMetrics().AppendEmpty() - for key, val := range tags { - rm.Resource().Attributes().PutStr(key, val) - } - rm.ScopeMetrics().EnsureCapacity(1) - ilm := rm.ScopeMetrics().AppendEmpty() - ilm.Metrics().EnsureCapacity(len(metricList)) - for _, metric := range metricList { - metric.CopyTo(ilm.Metrics().AppendEmpty()) - } - return result -} - -func newMetric(name string, typ pmetric.MetricType) pmetric.Metric { - result := pmetric.NewMetric() - result.SetName(name) - switch typ { - case pmetric.MetricTypeGauge: - result.SetEmptyGauge() - case pmetric.MetricTypeSum: - result.SetEmptySum() - case pmetric.MetricTypeHistogram: - result.SetEmptyHistogram() - case pmetric.MetricTypeExponentialHistogram: - result.SetEmptyExponentialHistogram() - case pmetric.MetricTypeSummary: - result.SetEmptySummary() - } - return result -} - -func addDataPoint( - value any, - ts int64, - tags map[string]any, - slice pmetric.NumberDataPointSlice, -) { - dataPoint := slice.AppendEmpty() - if value != nil { - setDataPointValue(value, dataPoint) - } - setDataPointTimestamp(ts, dataPoint) - //nolint:errcheck - dataPoint.Attributes().FromRaw(tags) -} - -type dataPointWithTimestamp interface { - SetTimestamp(v pcommon.Timestamp) -} - -func setDataPointTimestamp(ts int64, dataPoint dataPointWithTimestamp) { - dataPoint.SetTimestamp( - pcommon.NewTimestampFromTime(time.Unix(ts, 0))) -} - -func setDataPointValue(value any, dataPoint pmetric.NumberDataPoint) { - switch v := value.(type) { - case int: - dataPoint.SetIntValue(int64(v)) - case int64: - dataPoint.SetIntValue(v) - case float64: - dataPoint.SetDoubleValue(v) - default: - panic("Unsupported value type") - } -} - -type tobsMetric struct { - Name string - Value float64 - Ts int64 - Source string - Tags map[string]string -} - -type mockGaugeSender struct { - errorOnSend bool - metrics []tobsMetric -} - -func (m *mockGaugeSender) SendMetric( - name string, value float64, ts int64, source string, tags map[string]string, -) error { - m.metrics = append(m.metrics, tobsMetric{ - Name: name, - Value: value, - Ts: ts, - Source: source, - Tags: copyTags(tags), - }) - if m.errorOnSend { - return errors.New("error sending") - } - return nil -} - -type tobsDistribution struct { - Name string - Centroids []histogram.Centroid - Granularity map[histogram.Granularity]bool - Ts int64 - Source string - Tags map[string]string -} - -type mockDistributionSender struct { - errorOnSend bool - distributions []tobsDistribution -} - -func (m *mockDistributionSender) SendDistribution( - name string, - centroids []histogram.Centroid, - granularity map[histogram.Granularity]bool, - ts int64, - source string, - tags map[string]string, -) error { - m.distributions = append(m.distributions, tobsDistribution{ - Name: name, - Centroids: copyCentroids(centroids), - Granularity: copyGranularity(granularity), - Ts: ts, - Source: source, - Tags: copyTags(tags), - }) - if m.errorOnSend { - return errors.New("error sending") - } - return nil -} - -type mockTypedMetricConsumer struct { - typ pmetric.MetricType - errorOnConsume bool - errorOnPushInternalMetrics bool - names []string - sources []string - sourceKeys []string - pushInternalMetricsCallCount int -} - -func (m *mockTypedMetricConsumer) Type() pmetric.MetricType { - return m.typ -} - -func (m *mockTypedMetricConsumer) Consume(mi metricInfo, errs *[]error) { - m.names = append(m.names, mi.Name()) - m.sources = append(m.sources, mi.Source) - m.sourceKeys = append(m.sourceKeys, mi.SourceKey) - if m.errorOnConsume { - *errs = append(*errs, errors.New("error in consume")) - } -} - -func (m *mockTypedMetricConsumer) PushInternalMetrics(errs *[]error) { - m.pushInternalMetricsCallCount++ - if m.errorOnPushInternalMetrics { - *errs = append(*errs, errors.New("error in consume")) - } -} - -type mockFlushCloser struct { - errorOnFlush bool - numFlushCalls int - numCloseCalls int -} - -func (m *mockFlushCloser) Flush() error { - m.numFlushCalls++ - if m.errorOnFlush { - return errors.New("error flushing") - } - return nil -} - -func (m *mockFlushCloser) Close() { - m.numCloseCalls++ -} - -type mockHistogramDataPointConsumer struct { - names []string - counts []int -} - -func (m *mockHistogramDataPointConsumer) Consume( - mi metricInfo, point bucketHistogramDataPoint, _ *[]error, _ *histogramReporting) { - m.names = append(m.names, mi.Name()) - m.counts = append(m.counts, len(point.AsCumulative())) -} - -func copyTags(tags map[string]string) map[string]string { - if len(tags) == 0 { - return nil - } - tagsCopy := make(map[string]string, len(tags)) - for k, v := range tags { - tagsCopy[k] = v - } - return tagsCopy -} - -type mockSumSender struct { - errorOnSend bool - metrics []tobsMetric - deltaMetrics []tobsMetric -} - -func (m *mockSumSender) SendMetric( - name string, value float64, ts int64, source string, tags map[string]string, -) error { - m.metrics = append(m.metrics, tobsMetric{ - Name: name, - Value: value, - Ts: ts, - Source: source, - Tags: copyTags(tags), - }) - if m.errorOnSend { - return errors.New("error sending") - } - return nil -} - -func (m *mockSumSender) SendDeltaCounter( - name string, value float64, source string, tags map[string]string, -) error { - m.deltaMetrics = append(m.deltaMetrics, tobsMetric{ - Name: name, - Value: value, - Source: source, - Tags: copyTags(tags), - }) - if m.errorOnSend { - return errors.New("error sending") - } - return nil -} - -func copyCentroids(centroids []histogram.Centroid) []histogram.Centroid { - if centroids == nil { - return nil - } - result := make([]histogram.Centroid, len(centroids)) - copy(result, centroids) - return result -} - -func copyGranularity( - granularity map[histogram.Granularity]bool) map[histogram.Granularity]bool { - if granularity == nil { - return nil - } - result := make(map[histogram.Granularity]bool, len(granularity)) - for k, v := range granularity { - result[k] = v - } - return result -} - -func assertBuckets(t *testing.T, expected, actual []cumulativeBucket) { - assert.Equal(t, len(expected), len(actual), "len") - for i := range expected { - assert.Equal(t, expected[i].Count, actual[i].Count, "count") - assert.True(t, tagsEqual(expected[i].Tag, actual[i].Tag), "tag") - } -} - -func tagsEqual(expected, actual string) bool { - if expected == actual { - return true - } - expectedF, errE := strconv.ParseFloat(expected, 64) - actualF, errF := strconv.ParseFloat(actual, 64) - if errE != nil || errF != nil { - return false - } - return math.Abs(expectedF-actualF) < 0.0001 -} - -func assertCentroids(t *testing.T, expected, actual []histogram.Centroid) { - assert.Equal(t, len(expected), len(actual), "len") - for i := range expected { - assert.Equal(t, expected[i].Count, actual[i].Count, "count") - assert.InDelta(t, expected[i].Value, actual[i].Value, 0.0001, "value") - } -} diff --git a/exporter/tanzuobservabilityexporter/testdata/config.yaml b/exporter/tanzuobservabilityexporter/testdata/config.yaml deleted file mode 100644 index d2893dd98b9e..000000000000 --- a/exporter/tanzuobservabilityexporter/testdata/config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -receivers: - nop: - -processors: - nop: - -exporters: - tanzuobservability: - traces: - endpoint: "http://localhost:40001" - metrics: - endpoint: "http://localhost:2916" - resource_attrs_included: true - app_tags_excluded: true - retry_on_failure: - enabled: true - initial_interval: 10s - max_interval: 60s - max_elapsed_time: 10m - sending_queue: - enabled: true - num_consumers: 2 - queue_size: 10 - -service: - pipelines: - traces: - receivers: [ nop ] - processors: [ nop ] - exporters: [ tanzuobservability ] diff --git a/exporter/tanzuobservabilityexporter/trace_exporter.go b/exporter/tanzuobservabilityexporter/trace_exporter.go deleted file mode 100644 index a8989b62374f..000000000000 --- a/exporter/tanzuobservabilityexporter/trace_exporter.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" - -import ( - "context" - "errors" - "fmt" - - "github.com/google/uuid" - "github.com/wavefronthq/wavefront-sdk-go/senders" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/pdata/ptrace" - "go.uber.org/multierr" - "go.uber.org/zap" -) - -const ( - defaultApplicationName = "defaultApp" - defaultServiceName = "defaultService" - defaultMetricsPort = 2878 - labelApplication = "application" - labelCluster = "cluster" - labelShard = "shard" - labelError = "error" - labelEventName = "name" - labelService = "service" - labelSpanKind = "span.kind" - labelSource = "source" - labelDroppedEventsCount = "otel.dropped_events_count" - labelDroppedLinksCount = "otel.dropped_links_count" - labelDroppedAttrsCount = "otel.dropped_attributes_count" - labelOtelScopeName = "otel.scope.name" - labelOtelScopeVersion = "otel.scope.version" -) - -// spanSender Interface for sending tracing spans to Tanzu Observability -type spanSender interface { - // SendSpan mirrors sender.SpanSender from wavefront-sdk-go. - // traceId, spanId, parentIds and preceding spanIds are expected to be UUID strings. - // parents and preceding spans can be empty for a root span. - // span tag keys can be repeated (example: "user"="foo" and "user"="bar") - SendSpan(name string, startMillis, durationMillis int64, source, traceID, spanID string, parents, followsFrom []string, tags []senders.SpanTag, spanLogs []senders.SpanLog) error - Flush() error - Close() -} - -type tracesExporter struct { - cfg *Config - sender spanSender - logger *zap.Logger -} - -func newTracesExporter(settings exporter.CreateSettings, c component.Config) (*tracesExporter, error) { - cfg, ok := c.(*Config) - if !ok { - return nil, fmt.Errorf("invalid config: %#v", c) - } - if !cfg.hasTracesEndpoint() { - return nil, fmt.Errorf("traces.endpoint required") - } - _, _, err := cfg.parseTracesEndpoint() - if err != nil { - return nil, fmt.Errorf("failed to parse traces.endpoint: %w", err) - } - metricsPort := defaultMetricsPort - if cfg.hasMetricsEndpoint() { - _, metricsPort, err = cfg.parseMetricsEndpoint() - if err != nil { - return nil, fmt.Errorf("failed to parse metrics.endpoint: %w", err) - } - } - - // we specify a MetricsPort so the SDK can report its internal metrics - // but don't currently export any metrics from the pipeline - s, err := senders.NewSender(cfg.Traces.Endpoint, - senders.MetricsPort(metricsPort), - senders.FlushIntervalSeconds(60), - senders.SDKMetricsTags(map[string]string{"otel.traces.collector_version": settings.BuildInfo.Version}), - ) - if err != nil { - return nil, fmt.Errorf("failed to create proxy sender: %w", err) - } - - return &tracesExporter{ - cfg: cfg, - sender: s, - logger: settings.Logger, - }, nil -} - -func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { - var errs error - - for i := 0; i < td.ResourceSpans().Len(); i++ { - rspans := td.ResourceSpans().At(i) - resource := rspans.Resource() - for j := 0; j < rspans.ScopeSpans().Len(); j++ { - ispans := rspans.ScopeSpans().At(j) - transform := newTraceTransformer(resource) - - libraryName := ispans.Scope().Name() - libraryVersion := ispans.Scope().Version() - - for k := 0; k < ispans.Spans().Len(); k++ { - select { - case <-ctx.Done(): - return multierr.Append(errs, errors.New("context canceled")) - default: - transformedSpan, err := transform.Span(ispans.Spans().At(k)) - if err != nil { - errs = multierr.Append(errs, err) - continue - } - - if libraryName != "" { - transformedSpan.Tags[labelOtelScopeName] = libraryName - } - - if libraryVersion != "" { - transformedSpan.Tags[labelOtelScopeVersion] = libraryVersion - } - - if err := e.recordSpan(transformedSpan); err != nil { - errs = multierr.Append(errs, err) - continue - } - } - } - } - } - - errs = multierr.Append(errs, e.sender.Flush()) - return errs -} - -func (e *tracesExporter) recordSpan(span span) error { - var parents []string - if span.ParentSpanID != uuid.Nil { - parents = []string{span.ParentSpanID.String()} - } - - return e.sender.SendSpan( - span.Name, - span.StartMillis, - span.DurationMillis, - span.Source, - span.TraceID.String(), - span.SpanID.String(), - parents, - nil, - mapToSpanTags(span.Tags), - span.SpanLogs, - ) -} - -func (e *tracesExporter) shutdown(_ context.Context) error { - e.sender.Close() - return nil -} - -func mapToSpanTags(tags map[string]string) []senders.SpanTag { - spanTags := make([]senders.SpanTag, 0, len(tags)) - for k, v := range tags { - spanTags = append(spanTags, senders.SpanTag{ - Key: k, - Value: v, - }) - } - return spanTags -} diff --git a/exporter/tanzuobservabilityexporter/trace_exporter_test.go b/exporter/tanzuobservabilityexporter/trace_exporter_test.go deleted file mode 100644 index ec85f5328b0e..000000000000 --- a/exporter/tanzuobservabilityexporter/trace_exporter_test.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter - -import ( - "context" - "strings" - "testing" - - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/wavefronthq/wavefront-sdk-go/senders" - "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/exporter/exportertest" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" - "go.uber.org/zap" -) - -func TestSpansRequireTraceAndSpanIDs(t *testing.T) { - spanWithNoTraceID := ptrace.NewSpan() - spanWithNoTraceID.SetSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}) - spanWithNoSpanID := ptrace.NewSpan() - spanWithNoSpanID.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - traces := constructTraces([]ptrace.Span{spanWithNoTraceID, spanWithNoSpanID}) - - _, err := consumeTraces(traces) - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), errInvalidSpanID.Error())) - assert.True(t, strings.Contains(err.Error(), errInvalidTraceID.Error())) -} - -func TestExportTraceDataMinimum(t *testing.T) { - // source= - // getAllUsers source=localhost traceId=7b3bf470-9456-11e8-9eb6-529269fb1459 spanId=0313bafe-9457-11e8-9eb6-529269fb1459 parent=2f64e538-9457-11e8-9eb6-529269fb1459 application=Wavefront service=auth cluster=us-west-2 shard=secondary http.method=GET 1552949776000 343 - minSpan := createSpan( - "root", - [16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, - [8]byte{9, 9, 9, 9, 9, 9, 9, 9}, - pcommon.SpanID{}, - ) - traces := constructTraces([]ptrace.Span{minSpan}) - - expected := []*span{{ - Name: "root", - TraceID: uuid.MustParse("01010101-0101-0101-0101-010101010101"), - SpanID: uuid.MustParse("00000000-0000-0000-0909-090909090909"), - Tags: map[string]string{ - labelApplication: "defaultApp", - labelService: "defaultService", - }, - }} - - validateTraces(t, expected, traces) -} - -func TestExportTraceDataFullTrace(t *testing.T) { - traceID := pcommon.TraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - - rootSpan := createSpan( - "root", - traceID, - [8]byte{0, 0, 0, 0, 0, 0, 0, 1}, - pcommon.SpanID{}, - ) - - clientSpan := createSpan( - "client", - traceID, - [8]byte{0, 0, 0, 0, 0, 0, 0, 2}, - rootSpan.SpanID(), - ) - - clientSpan.SetKind(ptrace.SpanKindClient) - event := ptrace.NewSpanEvent() - event.SetName("client-event") - event.CopyTo(clientSpan.Events().AppendEmpty()) - - status := ptrace.NewStatus() - status.SetCode(ptrace.StatusCodeError) - status.SetMessage("an error event occurred") - status.CopyTo(clientSpan.Status()) - - clientSpan.Attributes().PutStr(labelApplication, "test-app") - - serverSpan := createSpan( - "server", - traceID, - pcommon.SpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3}), - clientSpan.SpanID(), - ) - serverSpan.SetKind(ptrace.SpanKindServer) - serverSpan.TraceState().FromRaw("key=val") - serverAttrs := serverSpan.Attributes() - serverAttrs.PutStr(conventions.AttributeServiceName, "the-server") - serverAttrs.PutStr(conventions.AttributeHTTPMethod, "POST") - serverAttrs.PutInt(conventions.AttributeHTTPStatusCode, 403) - serverAttrs.PutStr(labelSource, "test_source") - - traces := constructTraces([]ptrace.Span{rootSpan, clientSpan, serverSpan}) - resourceAttrs := traces.ResourceSpans().At(0).Resource().Attributes() - resourceAttrs.PutStr("resource", "R1") - resourceAttrs.PutStr(conventions.AttributeServiceName, "test-service") - resourceAttrs.PutStr(labelSource, "test-source") - - expected := []*span{ - { - Name: "root", - SpanID: uuid.MustParse("00000000000000000000000000000001"), - TraceID: uuid.MustParse("01010101010101010101010101010101"), - Source: "test-source", - Tags: map[string]string{ - "resource": "R1", - labelApplication: "defaultApp", - labelService: "test-service", - }, - }, - { - Name: "client", - SpanID: uuid.MustParse("00000000000000000000000000000002"), - TraceID: uuid.MustParse("01010101010101010101010101010101"), - ParentSpanID: uuid.MustParse("00000000000000000000000000000001"), - Source: "test-source", - Tags: map[string]string{ - "resource": "R1", - labelApplication: "test-app", - labelService: "test-service", - "otel.status_description": "an error event occurred", - "error": "true", - labelSpanKind: "client", - }, - SpanLogs: []senders.SpanLog{{ - Fields: map[string]string{labelEventName: "client-event"}, - }}, - }, - { - Name: "server", - SpanID: uuid.MustParse("00000000000000000000000000000003"), - TraceID: uuid.MustParse("01010101010101010101010101010101"), - ParentSpanID: uuid.MustParse("00000000000000000000000000000002"), - Source: "test-source", - Tags: map[string]string{ - "resource": "R1", - labelApplication: "defaultApp", - labelService: "the-server", - labelSpanKind: "server", - conventions.AttributeHTTPStatusCode: "403", - conventions.AttributeHTTPMethod: "POST", - "w3c.tracestate": "key=val", - }, - }, - } - - validateTraces(t, expected, traces) -} - -func validateTraces(t *testing.T, expected []*span, traces ptrace.Traces) { - actual, err := consumeTraces(traces) - require.NoError(t, err) - require.Equal(t, len(expected), len(actual)) - for i := 0; i < len(expected); i++ { - assert.Equal(t, expected[i].Name, actual[i].Name) - assert.Equal(t, expected[i].TraceID, actual[i].TraceID) - assert.Equal(t, expected[i].SpanID, actual[i].SpanID) - assert.Equal(t, expected[i].ParentSpanID, actual[i].ParentSpanID) - for k, v := range expected[i].Tags { - a, ok := actual[i].Tags[k] - assert.True(t, ok, "tag '"+k+"' not found") - assert.Equal(t, v, a) - } - assert.Equal(t, expected[i].StartMillis, actual[i].StartMillis) - assert.Equal(t, expected[i].DurationMillis, actual[i].DurationMillis) - assert.Equal(t, expected[i].SpanLogs, actual[i].SpanLogs) - assert.Equal(t, expected[i].Source, actual[i].Source) - } -} - -func TestExportTraceDataWithInstrumentationDetails(t *testing.T) { - minSpan := createSpan( - "root", - pcommon.TraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - pcommon.SpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), - pcommon.SpanID{}, - ) - traces := constructTraces([]ptrace.Span{minSpan}) - - scope := traces.ResourceSpans().At(0).ScopeSpans().At(0).Scope() - scope.SetName("instrumentation_name") - scope.SetVersion("v0.0.1") - - expected := []*span{{ - Name: "root", - TraceID: uuid.MustParse("01010101-0101-0101-0101-010101010101"), - SpanID: uuid.MustParse("00000000-0000-0000-0909-090909090909"), - Tags: map[string]string{ - labelApplication: "defaultApp", - labelService: "defaultService", - labelOtelScopeName: "instrumentation_name", - labelOtelScopeVersion: "v0.0.1", - }, - }} - - validateTraces(t, expected, traces) -} - -func TestExportTraceDataRespectsContext(t *testing.T) { - traces := constructTraces([]ptrace.Span{createSpan( - "root", - pcommon.TraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - pcommon.SpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), - pcommon.SpanID{}, - )}) - - sender := &mockSender{} - cfg := createDefaultConfig() - exp := tracesExporter{ - cfg: cfg.(*Config), - sender: sender, - logger: zap.NewNop(), - } - mockOTelTracesExporter, err := exporterhelper.NewTracesExporter( - context.Background(), - exportertest.NewNopCreateSettings(), - cfg, - exp.pushTraceData, - exporterhelper.WithShutdown(exp.shutdown), - ) - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - require.Error(t, mockOTelTracesExporter.ConsumeTraces(ctx, traces)) -} - -func createSpan( - name string, - traceID pcommon.TraceID, - spanID pcommon.SpanID, - parentSpanID pcommon.SpanID, -) ptrace.Span { - span := ptrace.NewSpan() - span.SetName(name) - span.SetTraceID(traceID) - span.SetSpanID(spanID) - span.SetParentSpanID(parentSpanID) - return span -} - -func constructTraces(spans []ptrace.Span) ptrace.Traces { - traces := ptrace.NewTraces() - traces.ResourceSpans().EnsureCapacity(1) - rs := traces.ResourceSpans().AppendEmpty() - rs.ScopeSpans().EnsureCapacity(1) - ils := rs.ScopeSpans().AppendEmpty() - ils.Spans().EnsureCapacity(len(spans)) - for _, span := range spans { - span.CopyTo(ils.Spans().AppendEmpty()) - } - return traces -} - -func consumeTraces(ptrace ptrace.Traces) ([]*span, error) { - ctx := context.Background() - sender := &mockSender{} - - cfg := createDefaultConfig() - exp := tracesExporter{ - cfg: cfg.(*Config), - sender: sender, - logger: zap.NewNop(), - } - mockOTelTracesExporter, err := exporterhelper.NewTracesExporter( - context.Background(), - exportertest.NewNopCreateSettings(), - cfg, - exp.pushTraceData, - exporterhelper.WithShutdown(exp.shutdown), - ) - - if err != nil { - return nil, err - } - if err := mockOTelTracesExporter.ConsumeTraces(ctx, ptrace); err != nil { - return nil, err - } - if err := mockOTelTracesExporter.Shutdown(ctx); err != nil { - return nil, err - } - return sender.spans, nil -} - -// implements the spanSender interface -type mockSender struct { - spans []*span -} - -func (m *mockSender) SendSpan( - name string, - startMillis, durationMillis int64, - source, traceID, spanID string, - parents, _ []string, - spanTags []senders.SpanTag, - spanLogs []senders.SpanLog, -) error { - var parentSpanID uuid.UUID - if len(parents) == 1 { - parentSpanID = uuid.MustParse(parents[0]) - } - tags := map[string]string{} - for _, pair := range spanTags { - tags[pair.Key] = pair.Value - } - span := &span{ - Name: name, - TraceID: uuid.MustParse(traceID), - SpanID: uuid.MustParse(spanID), - ParentSpanID: parentSpanID, - Tags: tags, - StartMillis: startMillis, - DurationMillis: durationMillis, - SpanLogs: spanLogs, - Source: source, - } - m.spans = append(m.spans, span) - return nil -} -func (m *mockSender) Flush() error { return nil } -func (m *mockSender) Close() {} diff --git a/exporter/tanzuobservabilityexporter/transformer.go b/exporter/tanzuobservabilityexporter/transformer.go deleted file mode 100644 index 9e5d0613bd04..000000000000 --- a/exporter/tanzuobservabilityexporter/transformer.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" - -import ( - "errors" - "strconv" - "time" - - "github.com/google/uuid" - "github.com/wavefronthq/wavefront-sdk-go/senders" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" - - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" -) - -type traceTransformer struct { - resAttrs pcommon.Map -} - -func newTraceTransformer(resource pcommon.Resource) *traceTransformer { - t := &traceTransformer{ - resAttrs: resource.Attributes(), - } - return t -} - -var ( - errInvalidSpanID = errors.New("SpanID is invalid") - errInvalidTraceID = errors.New("TraceID is invalid") -) - -var appResAttrsKeys = []string{labelApplication, conventions.AttributeServiceName, labelService, labelShard, labelCluster} - -type span struct { - Name string - TraceID uuid.UUID - SpanID uuid.UUID - ParentSpanID uuid.UUID - Tags map[string]string - StartMillis int64 - DurationMillis int64 - SpanLogs []senders.SpanLog - Source string -} - -func (t *traceTransformer) Span(orig ptrace.Span) (span, error) { - traceID, err := traceIDtoUUID(orig.TraceID()) - if err != nil { - return span{}, errInvalidTraceID - } - - spanID, err := spanIDtoUUID(orig.SpanID()) - if err != nil { - return span{}, errInvalidSpanID - } - - startMillis, durationMillis := calculateTimes(orig) - - source, attributesWithoutSource := getSourceAndResourceTags(t.resAttrs) - tags := attributesToTagsReplaceSource( - newMap(attributesWithoutSource), orig.Attributes()) - fixServiceTag(tags) - t.setRequiredTags(tags) - - tags[labelSpanKind] = spanKind(orig) - - if droppedEventsCount := orig.DroppedEventsCount(); droppedEventsCount > 0 { - tags[labelDroppedEventsCount] = strconv.FormatUint(uint64(droppedEventsCount), 10) - } - - if droppedLinksCount := orig.DroppedLinksCount(); droppedLinksCount > 0 { - tags[labelDroppedLinksCount] = strconv.FormatUint(uint64(droppedLinksCount), 10) - } - - if droppedAttrsCount := orig.DroppedAttributesCount(); droppedAttrsCount > 0 { - tags[labelDroppedAttrsCount] = strconv.FormatUint(uint64(droppedAttrsCount), 10) - } - - errorTags := errorTagsFromStatus(orig.Status()) - for k, v := range errorTags { - tags[k] = v - } - - traceState := orig.TraceState().AsRaw() - if orig.TraceState().AsRaw() != "" { - tags[tracetranslator.TagW3CTraceState] = traceState - } - - return span{ - Name: orig.Name(), - TraceID: traceID, - SpanID: spanID, - ParentSpanID: parentSpanIDtoUUID(orig.ParentSpanID()), - Tags: tags, - Source: source, - StartMillis: startMillis, - DurationMillis: durationMillis, - SpanLogs: eventsToLogs(orig.Events()), - }, nil -} - -func getSourceAndResourceTagsAndSourceKey(attributes pcommon.Map) ( - string, map[string]string, string) { - attributesWithoutSource := map[string]string{} - attributes.Range(func(k string, v pcommon.Value) bool { - attributesWithoutSource[k] = v.AsString() - return true - }) - candidateKeys := []string{labelSource, conventions.AttributeHostName, "hostname", conventions.AttributeHostID} - var source string - var sourceKey string - for _, key := range candidateKeys { - if value, isFound := attributesWithoutSource[key]; isFound { - source = value - sourceKey = key - delete(attributesWithoutSource, key) - break - } - } - - // returning an empty source is fine as wavefront.go.sdk will set it up to a default value(os.hostname()) - return source, attributesWithoutSource, sourceKey -} - -func getSourceAndResourceTags(attributes pcommon.Map) (string, map[string]string) { - source, attributesWithoutSource, _ := getSourceAndResourceTagsAndSourceKey(attributes) - return source, attributesWithoutSource -} - -func getSourceAndKey(attributes pcommon.Map) (string, string) { - source, _, sourceKey := getSourceAndResourceTagsAndSourceKey(attributes) - return source, sourceKey -} - -func spanKind(span ptrace.Span) string { - switch span.Kind() { - case ptrace.SpanKindClient: - return "client" - case ptrace.SpanKindServer: - return "server" - case ptrace.SpanKindProducer: - return "producer" - case ptrace.SpanKindConsumer: - return "consumer" - case ptrace.SpanKindInternal: - return "internal" - case ptrace.SpanKindUnspecified: - return "unspecified" - default: - return "unknown" - } -} - -func (t *traceTransformer) setRequiredTags(tags map[string]string) { - if _, ok := tags[labelService]; !ok { - tags[labelService] = defaultServiceName - } - - if _, ok := tags[labelApplication]; !ok { - tags[labelApplication] = defaultApplicationName - } -} - -func eventsToLogs(events ptrace.SpanEventSlice) []senders.SpanLog { - var result []senders.SpanLog - for i := 0; i < events.Len(); i++ { - e := events.At(i) - fields := attributesToTagsReplaceSource(e.Attributes()) - fields[labelEventName] = e.Name() - result = append(result, senders.SpanLog{ - Timestamp: int64(e.Timestamp()) / time.Microsecond.Nanoseconds(), // Timestamp is in microseconds - Fields: fields, - }) - } - - return result -} - -func calculateTimes(span ptrace.Span) (int64, int64) { - startMillis := int64(span.StartTimestamp()) / time.Millisecond.Nanoseconds() - endMillis := int64(span.EndTimestamp()) / time.Millisecond.Nanoseconds() - durationMillis := endMillis - startMillis - // it's possible end time is unset, so default to 0 rather than using a negative number - if span.EndTimestamp() == 0 { - durationMillis = 0 - } - return startMillis, durationMillis -} - -func fixServiceTag(tags map[string]string) { - // tag `service` will take preference over `service.name` if both are provided - if _, ok := tags[labelService]; !ok { - if svcName, svcNameOk := tags[conventions.AttributeServiceName]; svcNameOk { - tags[labelService] = svcName - delete(tags, conventions.AttributeServiceName) - } - } -} - -func fixSourceKey(sourceKey string, tags map[string]string) { - delete(tags, sourceKey) - replaceSource(tags) -} - -func attributesToTags(attributes ...pcommon.Map) map[string]string { - tags := map[string]string{} - for _, att := range attributes { - att.Range(func(k string, v pcommon.Value) bool { - tags[k] = v.AsString() - return true - }) - } - return tags -} - -func appAttributesToTags(attributes pcommon.Map) map[string]string { - tags := map[string]string{} - for _, resAttrsKey := range appResAttrsKeys { - if resAttrVal, ok := attributes.Get(resAttrsKey); ok { - tags[resAttrsKey] = resAttrVal.AsString() - } - } - - return tags -} - -func replaceSource(tags map[string]string) { - if value, isFound := tags[labelSource]; isFound { - delete(tags, labelSource) - tags["_source"] = value - } -} - -func attributesToTagsReplaceSource(attributes ...pcommon.Map) map[string]string { - tags := attributesToTags(attributes...) - replaceSource(tags) - return tags -} - -func pointAndResAttrsToTagsAndFixSource(sourceKey string, attributes ...pcommon.Map) map[string]string { - tags := attributesToTags(attributes...) - fixServiceTag(tags) - fixSourceKey(sourceKey, tags) - return tags -} - -func newMap(tags map[string]string) pcommon.Map { - m := pcommon.NewMap() - for key, value := range tags { - m.PutStr(key, value) - } - return m -} - -func errorTagsFromStatus(status ptrace.Status) map[string]string { - tags := make(map[string]string) - - if status.Code() != ptrace.StatusCodeError { - return tags - } - - tags[labelError] = "true" - - if status.Message() != "" { - msg := status.Message() - const maxLength = 255 - len(conventions.OtelStatusDescription+"=") - if len(msg) > maxLength { - msg = msg[:maxLength] - } - tags[conventions.OtelStatusDescription] = msg - } - return tags -} - -func traceIDtoUUID(id pcommon.TraceID) (uuid.UUID, error) { - formatted, err := uuid.FromBytes(id[:]) - if err != nil || id.IsEmpty() { - return uuid.Nil, errInvalidTraceID - } - return formatted, nil -} - -func spanIDtoUUID(id pcommon.SpanID) (uuid.UUID, error) { - formatted, err := uuid.FromBytes(padTo16Bytes(id)) - if err != nil || id.IsEmpty() { - return uuid.Nil, errInvalidSpanID - } - return formatted, nil -} - -func parentSpanIDtoUUID(id pcommon.SpanID) uuid.UUID { - if id.IsEmpty() { - return uuid.Nil - } - // FromBytes only returns an error if the length is not 16 bytes, so the error case is unreachable - formatted, _ := uuid.FromBytes(padTo16Bytes(id)) - return formatted -} - -func padTo16Bytes(b [8]byte) []byte { - as16bytes := make([]byte, 16) - copy(as16bytes[16-len(b):], b[:]) - return as16bytes -} diff --git a/exporter/tanzuobservabilityexporter/transformer_test.go b/exporter/tanzuobservabilityexporter/transformer_test.go deleted file mode 100644 index d171336b9565..000000000000 --- a/exporter/tanzuobservabilityexporter/transformer_test.go +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package tanzuobservabilityexporter - -import ( - "testing" - "time" - - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" -) - -func TestSpanStartTimeIsConvertedToMilliseconds(t *testing.T) { - inNanos := int64(50000000) - att := pcommon.NewMap() - transform := transformerFromAttributes(att) - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetStartTimestamp(pcommon.Timestamp(inNanos)) - - actual, err := transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - - assert.Equal(t, inNanos/time.Millisecond.Nanoseconds(), actual.StartMillis) -} - -func TestSpanDurationIsCalculatedFromStartAndEndTimes(t *testing.T) { - startNanos := int64(50000000) - endNanos := int64(60000000) - att := pcommon.NewMap() - transform := transformerFromAttributes(att) - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetStartTimestamp(pcommon.Timestamp(startNanos)) - span.SetEndTimestamp(pcommon.Timestamp(endNanos)) - - actual, err := transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - - assert.Equal(t, int64(10), actual.DurationMillis) -} - -func TestSpanDurationIsZeroIfEndTimeIsUnset(t *testing.T) { - startNanos := int64(50000000) - att := pcommon.NewMap() - transform := transformerFromAttributes(att) - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetStartTimestamp(pcommon.Timestamp(startNanos)) - - actual, err := transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - - assert.Equal(t, int64(0), actual.DurationMillis) -} - -func TestSpanStatusCodeErrorAddsErrorTag(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeError, "")) - require.NoError(t, err, "transforming span to wavefront format") - - errorTag, ok := actual.Tags["error"] - assert.True(t, ok) - assert.Equal(t, "true", errorTag) -} - -func TestSpanStatusCodeOkDoesNotAddErrorTag(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeOk, "")) - require.NoError(t, err, "transforming span to wavefront format") - - _, ok := actual.Tags["error"] - assert.False(t, ok) -} - -func TestSpanStatusCodeUnsetDoesNotAddErrorTag(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeUnset, "")) - require.NoError(t, err, "transforming span to wavefront format") - - _, ok := actual.Tags["error"] - assert.False(t, ok) -} - -func TestSpanStatusMessageIsConvertedToTag(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - message := "some error message" - actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeError, message)) - - require.NoError(t, err, "transforming span to wavefront format") - - msgVal, ok := actual.Tags["otel.status_description"] - assert.True(t, ok) - assert.Equal(t, message, msgVal) -} - -func TestSpanStatusMessageIsIgnoredIfStatusIsNotError(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeOk, "not a real error message")) - - require.NoError(t, err, "transforming span to wavefront format") - - _, ok := actual.Tags["status.message"] - assert.False(t, ok) -} - -func TestSpanStatusMessageIsTruncatedToValidLength(t *testing.T) { - /* - * Maximum allowed length for a combination of a point tag key and value is 254 characters - * (255 including the "=" separating key and value). If the value is longer, the point is rejected and logged. - * Keep the number of distinct time series per metric and host to under 1000. - * -- https://docs.wavefront.com/wavefront_data_format.html - */ - transform := transformerFromAttributes(pcommon.NewMap()) - message := "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" - message += "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" - message += "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" - actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeError, message)) - - require.NoError(t, err, "transforming span to wavefront format") - - msgVal, ok := actual.Tags["otel.status_description"] - assert.True(t, ok) - assert.Equal(t, 255-1-len("otel.status_description"), len(msgVal), "message value truncated") -} - -func TestSpanEventsAreTranslatedToSpanLogs(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - now := time.Now() - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - event := span.Events().AppendEmpty() - event.SetName("eventName") - event.SetTimestamp(pcommon.NewTimestampFromTime(now)) - event.Attributes().PutStr("attrKey", "attrVal") - - result, err := transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - - require.Equal(t, 1, len(result.SpanLogs)) - actual := result.SpanLogs[0] - assert.Equal(t, now.UnixNano()/time.Microsecond.Nanoseconds(), actual.Timestamp) - name, ok := actual.Fields[labelEventName] - assert.True(t, ok) - assert.Equal(t, "eventName", name) - attrVal, ok := actual.Fields["attrKey"] - assert.True(t, ok) - assert.Equal(t, "attrVal", attrVal) -} - -func TestSpanKindIsTranslatedToTag(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - - internalSpan, err := transform.Span(spanWithKind(ptrace.SpanKindInternal)) - require.NoError(t, err, "transforming span to wavefront format") - kind, ok := internalSpan.Tags["span.kind"] - assert.True(t, ok) - assert.Equal(t, "internal", kind) - - serverSpan, err := transform.Span(spanWithKind(ptrace.SpanKindServer)) - require.NoError(t, err, "transforming span to wavefront format") - kind, ok = serverSpan.Tags["span.kind"] - assert.True(t, ok) - assert.Equal(t, "server", kind) - - clientSpan, err := transform.Span(spanWithKind(ptrace.SpanKindClient)) - require.NoError(t, err, "transforming span to wavefront format") - kind, ok = clientSpan.Tags["span.kind"] - assert.True(t, ok) - assert.Equal(t, "client", kind) - - consumerSpan, err := transform.Span(spanWithKind(ptrace.SpanKindConsumer)) - require.NoError(t, err, "transforming span to wavefront format") - kind, ok = consumerSpan.Tags["span.kind"] - assert.True(t, ok) - assert.Equal(t, "consumer", kind) - - producerSpan, err := transform.Span(spanWithKind(ptrace.SpanKindProducer)) - require.NoError(t, err, "transforming span to wavefront format") - kind, ok = producerSpan.Tags["span.kind"] - assert.True(t, ok) - assert.Equal(t, "producer", kind) - - unspecifiedSpan, err := transform.Span(spanWithKind(ptrace.SpanKindUnspecified)) - require.NoError(t, err, "transforming span to wavefront format") - kind, ok = unspecifiedSpan.Tags["span.kind"] - assert.True(t, ok) - assert.Equal(t, "unspecified", kind) -} - -func TestTraceStateTranslatedToTag(t *testing.T) { - transform := transformerFromAttributes(pcommon.NewMap()) - - spanWithState, err := transform.Span(spanWithTraceState("key=val")) - require.NoError(t, err, "transforming span to wavefront format") - stateVal, ok := spanWithState.Tags["w3c.tracestate"] - assert.True(t, ok) - assert.Equal(t, "key=val", stateVal) - - spanWithEmptyState, err := transform.Span(spanWithTraceState("")) - require.NoError(t, err, "transforming span to wavefront format") - _, ok = spanWithEmptyState.Tags["w3c.tracestate"] - assert.False(t, ok) -} - -func TestSpanForSourceTag(t *testing.T) { - inNanos := int64(50000000) - - //TestCase1: default value for source - resAttrs := pcommon.NewMap() - transform := transformerFromAttributes(resAttrs) - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetStartTimestamp(pcommon.Timestamp(inNanos)) - - actual, err := transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - assert.Equal(t, "", actual.Source) - - //TestCase2: source value from resAttrs.source - resAttrs = pcommon.NewMap() - resAttrs.PutStr(labelSource, "test_source") - resAttrs.PutStr(conventions.AttributeHostName, "test_host.name") - transform = transformerFromAttributes(resAttrs) - span = ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetStartTimestamp(pcommon.Timestamp(inNanos)) - - actual, err = transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - assert.Equal(t, "test_source", actual.Source) - assert.Equal(t, "test_host.name", actual.Tags[conventions.AttributeHostName]) - require.NotContains(t, actual.Tags, labelSource) - - //TestCase2: source value from resAttrs.host.name when source is not present - resAttrs = pcommon.NewMap() - resAttrs.PutStr("hostname", "test_hostname") - resAttrs.PutStr(conventions.AttributeHostName, "test_host.name") - transform = transformerFromAttributes(resAttrs) - span = ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetStartTimestamp(pcommon.Timestamp(inNanos)) - - actual, err = transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - assert.Equal(t, "test_host.name", actual.Source) - assert.Equal(t, "test_hostname", actual.Tags["hostname"]) - require.NotContains(t, actual.Tags, conventions.AttributeHostName) - - //TestCase4: source value from resAttrs.source when spanAttrs.source is present - resAttrs = pcommon.NewMap() - span.Attributes().PutStr(labelSource, "source_from_span_attribute") - resAttrs.PutStr(labelSource, "test_source") - resAttrs.PutStr(conventions.AttributeHostName, "test_host.name") - transform = transformerFromAttributes(resAttrs) - actual, err = transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - assert.Equal(t, "test_source", actual.Source) - assert.Equal(t, "test_host.name", actual.Tags[conventions.AttributeHostName]) - require.NotContains(t, actual.Tags, labelSource) - assert.Equal(t, "source_from_span_attribute", actual.Tags["_source"]) -} - -func TestSpanForDroppedCount(t *testing.T) { - inNanos := int64(50000000) - - //TestCase: 1 count tags are not set - resAttrs := pcommon.NewMap() - transform := transformerFromAttributes(resAttrs) - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetStartTimestamp(pcommon.Timestamp(inNanos)) - - actual, err := transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - assert.NotContains(t, actual.Tags, "otel.dropped_events_count") - assert.NotContains(t, actual.Tags, "otel.dropped_links_count") - assert.NotContains(t, actual.Tags, "otel.dropped_attributes_count") - - //TestCase2: count tags are set - span.SetDroppedEventsCount(123) - span.SetDroppedLinksCount(456) - span.SetDroppedAttributesCount(789) - - actual, err = transform.Span(span) - require.NoError(t, err, "transforming span to wavefront format") - assert.Equal(t, "123", actual.Tags["otel.dropped_events_count"]) - assert.Equal(t, "456", actual.Tags["otel.dropped_links_count"]) - assert.Equal(t, "789", actual.Tags["otel.dropped_attributes_count"]) -} - -func TestGetSourceAndResourceTags(t *testing.T) { - resAttrs := pcommon.NewMap() - resAttrs.PutStr(labelSource, "test_source") - resAttrs.PutStr(conventions.AttributeHostName, "test_host.name") - - actualSource, actualAttrsWithoutSource := getSourceAndResourceTags(resAttrs) - assert.Equal(t, "test_source", actualSource) - require.NotContains(t, actualAttrsWithoutSource, labelSource) -} - -func TestGetSourceAndKey(t *testing.T) { - resAttrs := pcommon.NewMap() - resAttrs.PutStr(labelSource, "some_source") - resAttrs.PutStr(conventions.AttributeHostName, "test_host.name") - - source, sourceKey := getSourceAndKey(resAttrs) - assert.Equal(t, "some_source", source) - assert.Equal(t, labelSource, sourceKey) -} - -func TestGetSourceAndKeyNotFound(t *testing.T) { - resAttrs := pcommon.NewMap() - resAttrs.PutStr("foo", "some_source") - resAttrs.PutStr("bar", "test_host.name") - - source, sourceKey := getSourceAndKey(resAttrs) - assert.Equal(t, "", source) - assert.Equal(t, "", sourceKey) -} - -func TestAttributesToTagsReplaceSource(t *testing.T) { - attrMap1 := newMap(map[string]string{"customer": "aws", "env": "dev"}) - attrMap2 := newMap(map[string]string{"env": "prod", "source": "ethernet"}) - result := attributesToTagsReplaceSource(attrMap1, attrMap2) - - // attrMap2 takes precedence because it is last, so "env"->"prod" not "dev" - assert.Equal( - t, - map[string]string{"env": "prod", "customer": "aws", "_source": "ethernet"}, - result) -} - -func spanWithKind(kind ptrace.SpanKind) ptrace.Span { - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.SetKind(kind) - return span -} - -func spanWithTraceState(state string) ptrace.Span { - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - span.TraceState().FromRaw(state) - return span -} - -func transformerFromAttributes(attrs pcommon.Map) *traceTransformer { - return &traceTransformer{ - resAttrs: attrs, - } -} - -func spanWithStatus(statusCode ptrace.StatusCode, message string) ptrace.Span { - span := ptrace.NewSpan() - span.SetSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) - span.SetTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - status := ptrace.NewStatus() - status.SetCode(statusCode) - if message != "" { - status.SetMessage(message) - } - status.CopyTo(span.Status()) - return span -} - -func TestAppAttributesToTags(t *testing.T) { - // 1. other attributes provided - attrMap := newMap(map[string]string{"k": "v"}) - tags := appAttributesToTags(attrMap) - assert.Equal(t, map[string]string{}, tags) - - // 2. service.name provided - attrMap = newMap(map[string]string{"k": "v", "application": "test_app", "service.name": "test_service.name", "shard": "test_shard", "cluster": "test_cluster"}) - tags1 := appAttributesToTags(attrMap) - assert.Equal(t, map[string]string{"application": "test_app", "service.name": "test_service.name", "shard": "test_shard", "cluster": "test_cluster"}, tags1) - - // 3. service and service.name both provided - attrMap = newMap(map[string]string{"k": "v", "application": "test_app", "service.name": "test_service.name", "shard": "test_shard", "cluster": "test_cluster", "service": "test_service"}) - tags2 := appAttributesToTags(attrMap) - assert.Equal(t, map[string]string{"application": "test_app", "service": "test_service", "shard": "test_shard", "cluster": "test_cluster", "service.name": "test_service.name"}, tags2) -} - -func TestFixServiceTag(t *testing.T) { - // service get picked up when both the tags are provided - attrMap := map[string]string{"application": "test_app", "shard": "test_shard", "cluster": "test_cluster", "service.name": "test_service"} - fixServiceTag(attrMap) - assert.Equal(t, map[string]string{"application": "test_app", "service": "test_service", "shard": "test_shard", "cluster": "test_cluster"}, attrMap) -} - -func TestPointAndResAttrsToTagsAndFixSource(t *testing.T) { - // 1. service.name provided - attrMap := newMap(map[string]string{"application": "test_app", "service.name": "test_service.name", "source": "test_source"}) - tags := pointAndResAttrsToTagsAndFixSource("source", attrMap) - assert.Equal(t, map[string]string{"application": "test_app", "service": "test_service.name"}, tags) - - // 2. service and service.name both provided - attrMap = newMap(map[string]string{"application": "test_app", "service.name": "test_service.name", "source": "test_source", "service": "test_service"}) - tags = pointAndResAttrsToTagsAndFixSource("source", attrMap) - assert.Equal(t, map[string]string{"application": "test_app", "service": "test_service", "service.name": "test_service.name"}, tags) - - // 3. service.name provided sourceKey other than "source" - attrMap = newMap(map[string]string{"application": "test_app", "service.name": "test_service.name", "source": "test_source", "other_source": "test_other_source"}) - tags = pointAndResAttrsToTagsAndFixSource("other_source", attrMap) - assert.Equal(t, map[string]string{"application": "test_app", "service": "test_service.name", "_source": "test_source"}, tags) - - // 2. service and service.name both provided - attrMap = newMap(map[string]string{"application": "test_app", "service.name": "test_service.name", "source": "test_source", "service": "test_service", "other_source": "test_other_source"}) - tags = pointAndResAttrsToTagsAndFixSource("other_source", attrMap) - assert.Equal(t, map[string]string{"application": "test_app", "service": "test_service", "service.name": "test_service.name", "_source": "test_source"}, tags) -} - -func TestTraceIDtoUUID(t *testing.T) { - tests := []struct { - name string - in pcommon.TraceID - out uuid.UUID - error bool - }{ - { - name: "empty", - in: pcommon.NewTraceIDEmpty(), - out: uuid.UUID{}, - error: true, - }, - { - name: "one", - in: pcommon.TraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}), - out: uuid.UUID{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, - }, - { - name: "all_bytes", - in: pcommon.TraceID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), - out: uuid.UUID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := traceIDtoUUID(tt.in) - if tt.error { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.out, got) - }) - } -} - -func BenchmarkTraceIDtoUUID(b *testing.B) { - for n := 0; n < b.N; n++ { - _, err := traceIDtoUUID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) - assert.NoError(b, err) - } -} diff --git a/go.mod b/go.mod index 7974dede7cab..b05712c7cd3a 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/skywalkingexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter v0.91.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/asapauthextension v0.91.0 @@ -307,7 +306,6 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/caio/go-tdigest/v4 v4.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -327,7 +325,6 @@ require ( github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/denisenkom/go-mssqldb v0.12.3 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/devigned/tab v0.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -337,7 +334,7 @@ require ( github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 // indirect github.com/eapache/go-resiliency v1.4.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect @@ -381,7 +378,7 @@ require ( github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect @@ -446,6 +443,7 @@ require ( github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -480,6 +478,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.19 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 // indirect + github.com/microsoft/go-mssqldb v1.6.0 // indirect github.com/miekg/dns v1.1.56 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect @@ -618,7 +617,6 @@ require ( github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e // indirect github.com/vmware/govmomi v0.34.1 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect - github.com/wavefronthq/wavefront-sdk-go v0.15.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect @@ -706,7 +704,7 @@ require ( k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kubelet v0.28.4 // indirect - k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect sigs.k8s.io/controller-runtime v0.16.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect @@ -831,8 +829,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splun replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter => ./exporter/sumologicexporter -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter => ./exporter/tanzuobservabilityexporter - replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter => ./exporter/tencentcloudlogserviceexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter => ./exporter/elasticsearchexporter diff --git a/go.sum b/go.sum index ea76e166a325..b9511b2b4ce2 100644 --- a/go.sum +++ b/go.sum @@ -92,13 +92,10 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= @@ -114,6 +111,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 h1:yfJe15aSwEQ6Oo6J+gdfdulPNoZ3TEhmbhLIoxZcA+U= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= github.com/Azure/azure-storage-queue-go v0.0.0-20230531184854-c06a8eff66fe h1:HGuouUM1533rBXmMtR7qh5pYNSSjUZG90b/MgJAnb/A= @@ -413,8 +412,6 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/caio/go-tdigest/v4 v4.0.1 h1:sx4ZxjmIEcLROUPs2j1BGe2WhOtHD6VSe6NNbBdKYh4= -github.com/caio/go-tdigest/v4 v4.0.1/go.mod h1:Wsa+f0EZnV2gShdj1adgl0tQSoXRxtM0QioTgukFw8U= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -499,8 +496,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= -github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= @@ -517,7 +512,6 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/ github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= @@ -532,8 +526,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 h1:wHGPJSXvwKQVf/XfhjUPyrhpcPKWNy8F3ikH+eiwoBg= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0/go.mod h1:PseHFo8Leko7J4A/TfZ6kkHdkzKBLUta6hRZR/OEbbc= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -697,8 +691,8 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -818,7 +812,9 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -1076,7 +1072,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/leoluk/perflib_exporter v0.2.1 h1:/3/ut1k/jFt5p4ypjLZKDHDqlXAK6ERZPVWtwdI389I= @@ -1143,6 +1138,8 @@ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQth github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -1189,7 +1186,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mongodb-forks/digest v1.0.5 h1:EJu3wtLZcA0HCvsZpX5yuD193/sW9tHiNvrEM5apXMk= github.com/mongodb-forks/digest v1.0.5/go.mod h1:rb+EX8zotClD5Dj4NdgxnJXG9nwrlx3NWKJ8xttz1Dg= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -1307,7 +1303,6 @@ github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1557,8 +1552,6 @@ github.com/vmware/govmomi v0.34.1 h1:Hqu2Uke2itC+cNoIcFQBLEZvX9wBRTTOP04J7V1fqRw github.com/vmware/govmomi v0.34.1/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/wavefronthq/wavefront-sdk-go v0.15.0 h1:po9E3vh/0y7kOx8D9EtFp7kbSLLLKbmu/w/s1xGJAQU= -github.com/wavefronthq/wavefront-sdk-go v0.15.0/go.mod h1:V72c8e+bXuLK8HpA6ioW0ll5mK9IPD+4IHNNDY75ksA= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= @@ -1757,7 +1750,6 @@ golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1867,7 +1859,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -2346,8 +2337,8 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4Va k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw= k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/components/components.go b/internal/components/components.go index b9ebb86a56fc..7ef8ab67785a 100644 --- a/internal/components/components.go +++ b/internal/components/components.go @@ -66,7 +66,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/skywalkingexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter" - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tanzuobservabilityexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/asapauthextension" @@ -358,7 +357,6 @@ func Components() (otelcol.Factories, error) { skywalkingexporter.NewFactory(), splunkhecexporter.NewFactory(), sumologicexporter.NewFactory(), - tanzuobservabilityexporter.NewFactory(), tencentcloudlogserviceexporter.NewFactory(), zipkinexporter.NewFactory(), } diff --git a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go index 22e8d5d12faf..29c5207418db 100644 --- a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go +++ b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go @@ -44,7 +44,7 @@ var ctimeSubstitutes = map[string]string{ "%f": "999999", "%s": "99999999", "%Z": "MST", - "%z": "-0700", + "%z": "Z0700", "%w": "-070000", "%i": "-07", "%j": "-07:00", diff --git a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go index 0d4969afd7ef..182b1dd4a9e1 100644 --- a/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go +++ b/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt_test.go @@ -54,3 +54,25 @@ func TestParse(t *testing.T) { t.Errorf("Given: %v, expected: %v", dt, dt2) } } + +func TestZulu(t *testing.T) { + format := "%Y-%m-%dT%H:%M:%S.%L%z" + // These time should all parse as UTC. + for _, input := range []string{ + "2019-01-02T15:04:05.666666Z", + "2019-01-02T15:04:05.666666-0000", + "2019-01-02T15:04:05.666666+0000", + } { + t.Run(input, func(t *testing.T) { + dt, err := Parse(format, input) + if err != nil { + t.Error(err) + } else if dt.UnixNano() != dt1.UnixNano() { + // We compare the unix nanoseconds because Go has a subtle parsing difference between "Z" and "+0000". + // The former returns a Time with the UTC timezone, the latter returns a Time with a 0000 time zone offset. + // (See Go's documentation for `time.Parse`.) + t.Errorf("Given: %v, expected: %v", dt, dt1) + } + }) + } +} diff --git a/internal/datadog/agent.go b/internal/datadog/agent.go index 43656a6a7528..6399081a60d9 100644 --- a/internal/datadog/agent.go +++ b/internal/datadog/agent.go @@ -17,6 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/stats" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -39,6 +40,13 @@ type TraceAgent struct { exit chan struct{} } +// ConnectorPerformanceFeatureGate uses optimized code paths for the Datadog Connector. +var ConnectorPerformanceFeatureGate = featuregate.GlobalRegistry().MustRegister( + "connector.datadogconnector.performance", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("Datadog Connector will use optimized code"), +) + // newAgent creates a new unstarted traceagent using the given context. Call Start to start the traceagent. // The out channel will receive outoing stats payloads resulting from spans ingested using the Ingest method. func NewAgent(ctx context.Context, out chan *pb.StatsPayload) *TraceAgent { diff --git a/internal/datadog/go.mod b/internal/datadog/go.mod index eb48338891a4..d1cb5da8e0c6 100644 --- a/internal/datadog/go.mod +++ b/internal/datadog/go.mod @@ -7,6 +7,7 @@ require ( github.com/DataDog/datadog-agent/pkg/trace v0.50.1 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.11.0 github.com/stretchr/testify v1.8.4 + go.opentelemetry.io/collector/featuregate v1.0.0 go.opentelemetry.io/collector/pdata v1.0.0 ) @@ -66,7 +67,6 @@ require ( go.opentelemetry.io/collector/component v0.91.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.91.0 // indirect go.opentelemetry.io/collector/confmap v0.91.0 // indirect - go.opentelemetry.io/collector/featuregate v1.0.0 // indirect go.opentelemetry.io/collector/semconv v0.91.0 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect diff --git a/internal/filter/filterset/regexp/regexpfilterset.go b/internal/filter/filterset/regexp/regexpfilterset.go index f8ff39c85591..0ab598076830 100644 --- a/internal/filter/filterset/regexp/regexpfilterset.go +++ b/internal/filter/filterset/regexp/regexpfilterset.go @@ -73,7 +73,6 @@ func (rfs *FilterSet) Matches(toMatch string) bool { } // addFilters compiles all the given filters and stores them as regexes. -// All regexes are automatically anchored to enforce full string matches. func (rfs *FilterSet) addFilters(filters []string) error { dedup := make(map[string]struct{}, len(filters)) for _, f := range filters { diff --git a/pkg/golden/README.md b/pkg/golden/README.md index aa5376e0b3d6..6f3880ec2f53 100644 --- a/pkg/golden/README.md +++ b/pkg/golden/README.md @@ -14,16 +14,16 @@ The package is expected to be used with pkg/pdatatest module. ## Generating an expected result file -The easiest way to capture the expected result in a file is `golden.WriteMetrics` or `golden.WriteLogs`. +The easiest way to capture the expected result in a file is `golden.WriteMetrics`, `golden.WriteTraces` or `golden.WriteLogs`. When writing a new test: 1. Write the test as if the expected file exists. 2. Follow the steps below for updating an existing test. When updating an existing test: -1. Add a call to `golden.WriteMetrics` or `golden.WriteLogs` or in the appropriate place. +1. Add a call to `golden.WriteMetrics`, `golden.WriteTraces` or `golden.WriteLogs` or in the appropriate place. 2. Run the test once. -3. Remove the call to `golden.WriteMetrics` or `golden.WriteLogs`. +3. Remove the call to `golden.WriteMetrics`, `golden.WriteTraces` or `golden.WriteLogs`. NOTE: `golden.WriteMetrics` will always mark the test as failed. This behavior is necessary to ensure the function is removed after the golden file is written. diff --git a/pkg/golden/golden.go b/pkg/golden/golden.go index 2c7bc70c04ae..ecf001a2497b 100644 --- a/pkg/golden/golden.go +++ b/pkg/golden/golden.go @@ -37,7 +37,7 @@ func ReadMetrics(filePath string) (pmetric.Metrics, error) { } // WriteMetrics writes a pmetric.Metrics to the specified file in YAML format. -func WriteMetrics(t *testing.T, filePath string, metrics pmetric.Metrics) error { +func WriteMetrics(t testing.TB, filePath string, metrics pmetric.Metrics) error { if err := writeMetrics(filePath, metrics); err != nil { return err } @@ -99,7 +99,7 @@ func ReadLogs(filePath string) (plog.Logs, error) { } // WriteLogs writes a plog.Logs to the specified file in YAML format. -func WriteLogs(t *testing.T, filePath string, logs plog.Logs) error { +func WriteLogs(t testing.TB, filePath string, logs plog.Logs) error { if err := writeLogs(filePath, logs); err != nil { return err } @@ -150,7 +150,7 @@ func ReadTraces(filePath string) (ptrace.Traces, error) { } // WriteTraces writes a ptrace.Traces to the specified file in YAML format. -func WriteTraces(t *testing.T, filePath string, traces ptrace.Traces) error { +func WriteTraces(t testing.TB, filePath string, traces ptrace.Traces) error { if err := writeTraces(filePath, traces); err != nil { return err } diff --git a/pkg/ottl/README.md b/pkg/ottl/README.md index 390dd97383c3..2c25f746009a 100644 --- a/pkg/ottl/README.md +++ b/pkg/ottl/README.md @@ -24,7 +24,7 @@ OTTL Contexts define how you access the fields on a piece of telemetry. See the | Telemetry | OTTL Context | |-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| | `Resource` | [Resource](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlresource/README.md) | -| `Instrumentation Scope` | [Instrumentation Scode](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlscope/README.md) | +| `Instrumentation Scope` | [Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlscope/README.md) | | `Span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspan/README.md) | | `Span Event` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspanevent/README.md) | | `Metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlmetric/README.md) | diff --git a/pkg/stanza/docs/types/expression.md b/pkg/stanza/docs/types/expression.md index d56d03732daa..9ae6bf078f7f 100644 --- a/pkg/stanza/docs/types/expression.md +++ b/pkg/stanza/docs/types/expression.md @@ -4,7 +4,7 @@ Expressions give the config flexibility by allowing dynamic business logic rules Most notably, expressions can be used to route log records and add new fields based on the contents of the log entry being processed. -For reference documentation of the expression language, see [here](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md). +For reference documentation of the expression language, see [here](https://github.com/expr-lang/expr/blob/master/docs/language-definition.md). Available to the expressions are a few special variables: - `body` contains the entry's body diff --git a/processor/redactionprocessor/README.md b/processor/redactionprocessor/README.md index 12e5696b2e10..9561c2c8d800 100644 --- a/processor/redactionprocessor/README.md +++ b/processor/redactionprocessor/README.md @@ -6,7 +6,8 @@ | Stability | [beta]: traces | | Distributions | [contrib], [sumo] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Fredaction%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fredaction) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Fredaction%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fredaction) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@leonsp-ai](https://www.github.com/leonsp-ai), [@dmitryax](https://www.github.com/dmitryax), [@mx-psi](https://www.github.com/mx-psi), [@TylerHelmuth](https://www.github.com/TylerHelmuth) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@dmitryax](https://www.github.com/dmitryax), [@mx-psi](https://www.github.com/mx-psi), [@TylerHelmuth](https://www.github.com/TylerHelmuth) | +| Emeritus | [@leonsp-ai](https://www.github.com/leonsp-ai) | [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/processor/redactionprocessor/metadata.yaml b/processor/redactionprocessor/metadata.yaml index 880f1430c39f..ecf3063c6a27 100644 --- a/processor/redactionprocessor/metadata.yaml +++ b/processor/redactionprocessor/metadata.yaml @@ -6,7 +6,8 @@ status: beta: [traces] distributions: [contrib, sumo] codeowners: - active: [leonsp-ai, dmitryax, mx-psi, TylerHelmuth] + active: [dmitryax, mx-psi, TylerHelmuth] + emeritus: [leonsp-ai] tests: config: diff --git a/processor/spanprocessor/span_test.go b/processor/spanprocessor/span_test.go index 93252b7a4dc3..55fb691fd0e9 100644 --- a/processor/spanprocessor/span_test.go +++ b/processor/spanprocessor/span_test.go @@ -610,7 +610,7 @@ func TestSpanProcessor_setStatusCodeConditionally(t *testing.T) { Code: "Error", Description: "custom error message", } - // This test numer two include rule for applying rule only for status code 400 + // This test number two include rule for applying rule only for status code 400 oCfg.Include = &filterconfig.MatchProperties{ Config: filterset.Config{ MatchType: filterset.Strict, @@ -630,7 +630,7 @@ func TestSpanProcessor_setStatusCodeConditionally(t *testing.T) { outputStatusDescription string }{ { - // without attribiutes - should not apply rule and leave status code as it is + // without attributes - should not apply rule and leave status code as it is inputStatusCode: ptrace.StatusCodeOk, outputStatusCode: ptrace.StatusCodeOk, }, diff --git a/receiver/apachesparkreceiver/scraper.go b/receiver/apachesparkreceiver/scraper.go index 33ccfd0662ce..c57b9fd81b66 100644 --- a/receiver/apachesparkreceiver/scraper.go +++ b/receiver/apachesparkreceiver/scraper.go @@ -58,7 +58,7 @@ func (s *sparkScraper) scrape(_ context.Context) (pmetric.Metrics, error) { // Call applications endpoint to get ids and names for all apps in the cluster apps, err := s.client.Applications() if err != nil { - return pmetric.NewMetrics(), errFailedAppIDCollection + return pmetric.NewMetrics(), errors.Join(errFailedAppIDCollection, err) } // Check apps against allowed app names from config diff --git a/receiver/apachesparkreceiver/scraper_test.go b/receiver/apachesparkreceiver/scraper_test.go index 01b969256246..c028b254cd41 100644 --- a/receiver/apachesparkreceiver/scraper_test.go +++ b/receiver/apachesparkreceiver/scraper_test.go @@ -53,7 +53,7 @@ func TestScraper(t *testing.T) { return pmetric.NewMetrics() }, config: createDefaultConfig().(*Config), - expectedErr: errFailedAppIDCollection, + expectedErr: errors.Join(errFailedAppIDCollection, errors.New("could not retrieve app ids")), }, { desc: "No Matching Allowed Apps", diff --git a/receiver/awscontainerinsightreceiver/design.md b/receiver/awscontainerinsightreceiver/design.md index 1669740c36ba..e5ba08f44172 100644 --- a/receiver/awscontainerinsightreceiver/design.md +++ b/receiver/awscontainerinsightreceiver/design.md @@ -11,7 +11,7 @@ `awscontainerinsightreceiver` collects data from two main sources: * `cadvisor` - * An customized `cadvisor` lib is embedded inside the receiver. The `cadvisor` setting is tweaked for Container Insights use cases. For example, only certain metrics are collected and only certain `cgroup` is included. + * A customized `cadvisor` lib is embedded inside the receiver. The `cadvisor` setting is tweaked for Container Insights use cases. For example, only certain metrics are collected and only certain `cgroup` is included. * The receiver generates Container Insights specific metrics from the raw metrics provided by `cadvisor`. The metrics are categorized as different infrastructure layers like node, node filesystem, node disk io, node network, pod, pod network, container, and container filesystem. * Some pod/container related labels like podName, podId, namespace, containerName are extracted from the container spec provided by `cadvisor`. This labels will be added as resource attributes for the metrics and the AWS Container Insights processor needs those attributes to do further processing of the metrics. * `k8sapiserver` diff --git a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go index 8c77d4634831..b5e07447af78 100644 --- a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go +++ b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go @@ -24,9 +24,7 @@ type nodeCapacity struct { logger *zap.Logger // osLstat returns a FileInfo describing the named file. - osLstat func(name string) (os.FileInfo, error) - // osSetenv sets the value of the environment variable named by the key - osSetenv func(key string, value string) error + osLstat func(name string) (os.FileInfo, error) virtualMemory func(ctx context.Context) (*mem.VirtualMemoryStat, error) cpuInfo func(ctx context.Context) ([]cpu.InfoStat, error) } @@ -37,7 +35,6 @@ func newNodeCapacity(logger *zap.Logger, options ...nodeCapacityOption) (nodeCap nc := &nodeCapacity{ logger: logger, osLstat: os.Lstat, - osSetenv: os.Setenv, virtualMemory: mem.VirtualMemoryWithContext, cpuInfo: cpu.InfoWithContext, } diff --git a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go index fc903775f0f1..fd8c4dac10c3 100644 --- a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go +++ b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go @@ -33,12 +33,6 @@ func TestNodeCapacity(t *testing.T) { } } - // can't parse cpu and mem info - setEnvOption := func(nc *nodeCapacity) { - nc.osSetenv = func(key, value string) error { - return nil - } - } virtualMemOption := func(nc *nodeCapacity) { nc.virtualMemory = func(ctx context.Context) (*mem.VirtualMemoryStat, error) { return nil, errors.New("error") @@ -49,7 +43,7 @@ func TestNodeCapacity(t *testing.T) { return nil, errors.New("error") } } - nc, err = newNodeCapacity(zap.NewNop(), lstatOption, setEnvOption, virtualMemOption, cpuInfoOption) + nc, err = newNodeCapacity(zap.NewNop(), lstatOption, virtualMemOption, cpuInfoOption) assert.NotNil(t, nc) assert.Nil(t, err) assert.Equal(t, int64(0), nc.getMemoryCapacity()) @@ -71,7 +65,7 @@ func TestNodeCapacity(t *testing.T) { }, nil } } - nc, err = newNodeCapacity(zap.NewNop(), lstatOption, setEnvOption, virtualMemOption, cpuInfoOption) + nc, err = newNodeCapacity(zap.NewNop(), lstatOption, virtualMemOption, cpuInfoOption) assert.NotNil(t, nc) assert.Nil(t, err) assert.Equal(t, int64(1024), nc.getMemoryCapacity()) diff --git a/receiver/cloudfoundryreceiver/README.md b/receiver/cloudfoundryreceiver/README.md index c32a7f8a4c29..37a379ba83ae 100644 --- a/receiver/cloudfoundryreceiver/README.md +++ b/receiver/cloudfoundryreceiver/README.md @@ -6,7 +6,8 @@ | Stability | [beta]: metrics | | Distributions | [contrib], [observiq], [splunk], [sumo] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fcloudfoundry%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fcloudfoundry) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fcloudfoundry%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fcloudfoundry) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@agoallikmaa](https://www.github.com/agoallikmaa), [@pellared](https://www.github.com/pellared), [@crobert-1](https://www.github.com/crobert-1) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@pellared](https://www.github.com/pellared), [@crobert-1](https://www.github.com/crobert-1) | +| Emeritus | [@agoallikmaa](https://www.github.com/agoallikmaa) | [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/receiver/cloudfoundryreceiver/metadata.yaml b/receiver/cloudfoundryreceiver/metadata.yaml index 4992884ee6b5..698e64aab9c6 100644 --- a/receiver/cloudfoundryreceiver/metadata.yaml +++ b/receiver/cloudfoundryreceiver/metadata.yaml @@ -6,7 +6,8 @@ status: beta: [metrics] distributions: [contrib, splunk, observiq, sumo] codeowners: - active: [agoallikmaa, pellared, crobert-1] + active: [pellared, crobert-1] + emeritus: [agoallikmaa] tests: config: diff --git a/receiver/elasticsearchreceiver/client.go b/receiver/elasticsearchreceiver/client.go index 3d8f828b05ea..359a515c89d1 100644 --- a/receiver/elasticsearchreceiver/client.go +++ b/receiver/elasticsearchreceiver/client.go @@ -61,7 +61,7 @@ func newElasticsearchClient(settings component.TelemetrySettings, c Config, h co var authHeader string if c.Username != "" && c.Password != "" { - userPass := fmt.Sprintf("%s:%s", c.Username, c.Password) + userPass := fmt.Sprintf("%s:%s", c.Username, string(c.Password)) authb64 := base64.StdEncoding.EncodeToString([]byte(userPass)) authHeader = fmt.Sprintf("Basic %s", authb64) } diff --git a/receiver/haproxyreceiver/scraper.go b/receiver/haproxyreceiver/scraper.go index 70343ad03ad8..7e7c9d1bdb31 100644 --- a/receiver/haproxyreceiver/scraper.go +++ b/receiver/haproxyreceiver/scraper.go @@ -85,93 +85,83 @@ func (s *scraper) scrape(ctx context.Context) (pmetric.Metrics, error) { now := pcommon.NewTimestampFromTime(time.Now()) for _, record := range records { - err := s.mb.RecordHaproxySessionsCountDataPoint(now, record["scur"]) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["scur"] != "" { + if err := s.mb.RecordHaproxySessionsCountDataPoint(now, record["scur"]); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } if record["conn_rate"] != "" { - err = s.mb.RecordHaproxyConnectionsRateDataPoint(now, record["conn_rate"]) - if err != nil { + if err := s.mb.RecordHaproxyConnectionsRateDataPoint(now, record["conn_rate"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["conn_tot"] != "" { - err = s.mb.RecordHaproxyConnectionsTotalDataPoint(now, record["conn_tot"]) - if err != nil { + if err := s.mb.RecordHaproxyConnectionsTotalDataPoint(now, record["conn_tot"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["lbtot"] != "" { - err = s.mb.RecordHaproxyServerSelectedTotalDataPoint(now, record["lbtot"]) - if err != nil { + if err := s.mb.RecordHaproxyServerSelectedTotalDataPoint(now, record["lbtot"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } - err = s.mb.RecordHaproxyBytesInputDataPoint(now, record["bin"]) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["bin"] != "" { + if err := s.mb.RecordHaproxyBytesInputDataPoint(now, record["bin"]); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } - err = s.mb.RecordHaproxyBytesOutputDataPoint(now, record["bout"]) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["bout"] != "" { + if err := s.mb.RecordHaproxyBytesOutputDataPoint(now, record["bout"]); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } if record["cli_abrt"] != "" { - err = s.mb.RecordHaproxyClientsCanceledDataPoint(now, record["cli_abrt"]) - if err != nil { + if err := s.mb.RecordHaproxyClientsCanceledDataPoint(now, record["cli_abrt"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["comp_byp"] != "" { - err = s.mb.RecordHaproxyCompressionBypassDataPoint(now, record["comp_byp"]) - if err != nil { + if err := s.mb.RecordHaproxyCompressionBypassDataPoint(now, record["comp_byp"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["comp_in"] != "" { - err = s.mb.RecordHaproxyCompressionInputDataPoint(now, record["comp_in"]) - if err != nil { + if err := s.mb.RecordHaproxyCompressionInputDataPoint(now, record["comp_in"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["comp_out"] != "" { - err = s.mb.RecordHaproxyCompressionOutputDataPoint(now, record["comp_out"]) - if err != nil { + if err := s.mb.RecordHaproxyCompressionOutputDataPoint(now, record["comp_out"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["comp_rsp"] != "" { - err = s.mb.RecordHaproxyCompressionCountDataPoint(now, record["comp_rsp"]) - if err != nil { + if err := s.mb.RecordHaproxyCompressionCountDataPoint(now, record["comp_rsp"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["dreq"] != "" { - err = s.mb.RecordHaproxyRequestsDeniedDataPoint(now, record["dreq"]) - if err != nil { + if err := s.mb.RecordHaproxyRequestsDeniedDataPoint(now, record["dreq"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["dresp"] != "" { - err = s.mb.RecordHaproxyResponsesDeniedDataPoint(now, record["dresp"]) - if err != nil { + if err := s.mb.RecordHaproxyResponsesDeniedDataPoint(now, record["dresp"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["downtime"] != "" { - err = s.mb.RecordHaproxyDowntimeDataPoint(now, record["downtime"]) - if err != nil { + if err := s.mb.RecordHaproxyDowntimeDataPoint(now, record["downtime"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["econ"] != "" { - err = s.mb.RecordHaproxyConnectionsErrorsDataPoint(now, record["econ"]) - if err != nil { + if err := s.mb.RecordHaproxyConnectionsErrorsDataPoint(now, record["econ"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["ereq"] != "" { - err = s.mb.RecordHaproxyRequestsErrorsDataPoint(now, record["ereq"]) - if err != nil { + if err := s.mb.RecordHaproxyRequestsErrorsDataPoint(now, record["ereq"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } @@ -189,72 +179,74 @@ func (s *scraper) scrape(ctx context.Context) (pmetric.Metrics, error) { s.mb.RecordHaproxyResponsesErrorsDataPoint(now, abortsVal+erespVal) } if record["chkfail"] != "" { - err = s.mb.RecordHaproxyFailedChecksDataPoint(now, record["chkfail"]) - if err != nil { + if err := s.mb.RecordHaproxyFailedChecksDataPoint(now, record["chkfail"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["wredis"] != "" { - err = s.mb.RecordHaproxyRequestsRedispatchedDataPoint(now, record["wredis"]) - if err != nil { + if err := s.mb.RecordHaproxyRequestsRedispatchedDataPoint(now, record["wredis"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } - err = s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_1xx"], metadata.AttributeStatusCode1xx) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["hrsp_1xx"] != "" { + if err := s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_1xx"], metadata.AttributeStatusCode1xx); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } - err = s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_2xx"], metadata.AttributeStatusCode2xx) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["hrsp_2xx"] != "" { + if err := s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_2xx"], metadata.AttributeStatusCode2xx); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } - err = s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_3xx"], metadata.AttributeStatusCode3xx) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["hrsp_3xx"] != "" { + if err := s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_3xx"], metadata.AttributeStatusCode3xx); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } - err = s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_4xx"], metadata.AttributeStatusCode4xx) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["hrsp_4xx"] != "" { + if err := s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_4xx"], metadata.AttributeStatusCode4xx); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } - err = s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_5xx"], metadata.AttributeStatusCode5xx) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["hrsp_5xx"] != "" { + if err := s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_5xx"], metadata.AttributeStatusCode5xx); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } - err = s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_other"], metadata.AttributeStatusCodeOther) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["hrsp_other"] != "" { + if err := s.mb.RecordHaproxyRequestsTotalDataPoint(now, record["hrsp_other"], metadata.AttributeStatusCodeOther); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } if record["wretr"] != "" { - err = s.mb.RecordHaproxyConnectionsRetriesDataPoint(now, record["wretr"]) - if err != nil { + if err := s.mb.RecordHaproxyConnectionsRetriesDataPoint(now, record["wretr"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } - err = s.mb.RecordHaproxySessionsTotalDataPoint(now, record["stot"]) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["stot"] != "" { + if err := s.mb.RecordHaproxySessionsTotalDataPoint(now, record["stot"]); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } if record["qcur"] != "" { - err = s.mb.RecordHaproxyRequestsQueuedDataPoint(now, record["qcur"]) - if err != nil { + if err := s.mb.RecordHaproxyRequestsQueuedDataPoint(now, record["qcur"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["req_rate"] != "" { - err = s.mb.RecordHaproxyRequestsRateDataPoint(now, record["req_rate"]) - if err != nil { + if err := s.mb.RecordHaproxyRequestsRateDataPoint(now, record["req_rate"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } if record["ttime"] != "" { - err = s.mb.RecordHaproxySessionsAverageDataPoint(now, record["ttime"]) - if err != nil { + if err := s.mb.RecordHaproxySessionsAverageDataPoint(now, record["ttime"]); err != nil { scrapeErrors = append(scrapeErrors, err) } } - err = s.mb.RecordHaproxySessionsRateDataPoint(now, record["rate"]) - if err != nil { - scrapeErrors = append(scrapeErrors, err) + if record["rate"] != "" { + if err := s.mb.RecordHaproxySessionsRateDataPoint(now, record["rate"]); err != nil { + scrapeErrors = append(scrapeErrors, err) + } } rb := s.mb.NewResourceBuilder() rb.SetHaproxyProxyName(record["pxname"]) diff --git a/receiver/haproxyreceiver/scraper_test.go b/receiver/haproxyreceiver/scraper_test.go index fd63bda111a4..9baaa61454e1 100644 --- a/receiver/haproxyreceiver/scraper_test.go +++ b/receiver/haproxyreceiver/scraper_test.go @@ -60,3 +60,46 @@ func Test_scraper_readStats(t *testing.T) { pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreResourceAttributeValue("haproxy.addr"), pmetrictest.IgnoreResourceMetricsOrder())) } + +func Test_scraper_readStatsWithIncompleteValues(t *testing.T) { + f, err := os.MkdirTemp("", "haproxytest") + require.NoError(t, err) + socketAddr := filepath.Join(f, "testhaproxy.sock") + l, err := net.Listen("unix", socketAddr) + require.NoError(t, err) + defer l.Close() + + go func() { + c, err2 := l.Accept() + require.NoError(t, err2) + + buf := make([]byte, 512) + nr, err2 := c.Read(buf) + require.NoError(t, err2) + + data := string(buf[0:nr]) + switch data { + case "show stats\n": + stats, err2 := os.ReadFile(filepath.Join("testdata", "30252_stats.txt")) + require.NoError(t, err2) + _, err2 = c.Write(stats) + require.NoError(t, err2) + default: + require.Fail(t, fmt.Sprintf("invalid message: %v", data)) + } + }() + + haProxyCfg := newDefaultConfig().(*Config) + haProxyCfg.Endpoint = socketAddr + s := newScraper(haProxyCfg, receivertest.NewNopCreateSettings()) + m, err := s.scrape(context.Background()) + require.NoError(t, err) + require.NotNil(t, m) + + expectedFile := filepath.Join("testdata", "scraper", "30252_expected.yaml") + expectedMetrics, err := golden.ReadMetrics(expectedFile) + require.NoError(t, err) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, m, pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreResourceAttributeValue("haproxy.addr"), + pmetrictest.IgnoreResourceMetricsOrder())) +} diff --git a/receiver/haproxyreceiver/testdata/30252_stats.txt b/receiver/haproxyreceiver/testdata/30252_stats.txt new file mode 100644 index 000000000000..44385e3dd43d --- /dev/null +++ b/receiver/haproxyreceiver/testdata/30252_stats.txt @@ -0,0 +1,7 @@ +# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,agent_status,agent_code,agent_duration,check_desc,agent_desc,check_rise,check_fall,check_health,agent_rise,agent_fall,agent_health,addr,cookie,mode,algo,conn_rate,conn_rate_max,conn_tot,intercepted,dcon,dses,wrew,connect,reuse,cache_lookups,cache_hits,srv_icur,src_ilim,qtime_max,ctime_max,rtime_max,ttime_max,eint,idle_conn_cur,safe_conn_cur,used_conn_cur,need_conn_est,uweight,agg_server_status,agg_server_check_status,agg_check_status,-,ssl_sess,ssl_reused_sess,ssl_failed_handshake,h2_headers_rcvd,h2_data_rcvd,h2_settings_rcvd,h2_rst_stream_rcvd,h2_goaway_rcvd,h2_detected_conn_protocol_errors,h2_detected_strm_protocol_errors,h2_rst_stream_resp,h2_goaway_resp,h2_open_connections,h2_backend_open_streams,h2_total_connections,h2_backend_total_streams,h1_open_connections,h1_open_streams,h1_total_connections,h1_total_streams,h1_bytes_in,h1_bytes_out,h1_spliced_bytes_in,h1_spliced_bytes_out, +stats,FRONTEND,,,0,1,524268,2,1444,47008,0,0,0,,,,,OPEN,,,,,,,,,1,2,0,,,,0,0,0,1,,,,0,2,0,0,0,0,,0,1,2,,,0,0,0,0,,,,,,,,,,,,,,,,,,,,,http,,0,1,2,2,0,0,0,,,0,0,,,,,,,0,,,,,,,,,-,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,1594,47052,0,0, +myfrontend,FRONTEND,,,1,1,524268,1,85470,107711,0,0,0,,,,,OPEN,,,,,,,,,1,3,0,,,,0,0,0,1,,,,0,134,0,0,0,0,,0,11,134,,,0,0,0,0,,,,,,,,,,,,,,,,,,,,,http,,0,1,1,0,0,0,0,,,0,0,,,,,,,0,,,,,,,,,-,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,134,94712,107309,0,0, +webservers,s1,0,0,0,1,,45,28734,36204,,0,,0,0,0,0,UP,1,1,0,0,0,159,0,,1,4,1,,45,,2,0,,4,L4OK,,0,0,45,0,0,0,0,,,,45,0,0,,,,,3,,,0,1,4,95,,,,Layer4 check passed,,2,3,4,,,,192.168.16.2:8080,,http,,,,,,,,0,1,44,,,1,,0,1,26,184,0,0,1,0,1,1,,,,-,0,0,0,,,,,,,,,,,,,,,,,,,,,, +webservers,s,,,,,,,,,,,,,,,,UP,,,,,,,,,,,,,,,,,,,LOK,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Layer check passed,,,,,,,,...,,http,,,,,,,,,,,,,,,,,,,,,,,,,,,,-,,,,,,,,,,,,,,,,,,,,,,,,, +webservers,s3,0,0,0,1,,44,28072,35376,,0,,0,0,0,0,UP,1,1,0,0,0,159,0,,1,4,3,,44,,2,0,,4,L4OK,,0,0,44,0,0,0,0,,,,44,0,0,,,,,4,,,0,1,4,121,,,,Layer4 check passed,,2,3,4,,,,192.168.16.4:8080,,http,,,,,,,,0,1,43,,,1,,0,3,25,1331,0,0,1,0,1,1,,,,-,0,0,0,,,,,,,,,,,,,,,,,,,,,, +webservers,BACKEND,0,0,0,1,52427,134,85470,107711,0,0,,0,0,0,0,UP,3,3,0,,0,159,0,,1,4,0,,134,,1,0,,11,,,,0,134,0,0,0,0,,,,134,0,0,0,0,0,0,3,,,0,1,4,105,,,,,,,,,,,,,,http,roundrobin,,,,,,,0,3,131,0,0,,,0,3,26,1331,0,,,,,3,0,0,0,-,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,3,134,107309,91496,0,0, \ No newline at end of file diff --git a/receiver/haproxyreceiver/testdata/scraper/30252_expected.yaml b/receiver/haproxyreceiver/testdata/scraper/30252_expected.yaml new file mode 100644 index 000000000000..b1b2d4b59bd9 --- /dev/null +++ b/receiver/haproxyreceiver/testdata/scraper/30252_expected.yaml @@ -0,0 +1,842 @@ +resourceMetrics: + - resource: + attributes: + - key: haproxy.addr + value: + stringValue: /var/folders/v7/y4ndd30x2w98zq5trwl9tnl40000gn/T/haproxytest3528850750/testhaproxy.sock + - key: haproxy.proxy_name + value: + stringValue: myfrontend + - key: haproxy.service_name + value: + stringValue: FRONTEND + scopeMetrics: + - metrics: + - description: Bytes in. Corresponds to HAProxy's `bin` metric. + name: haproxy.bytes.input + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "85470" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Bytes out. Corresponds to HAProxy's `bout` metric. + name: haproxy.bytes.output + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "107711" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Number of connections over the last elapsed second (frontend). Corresponds to HAProxy's `conn_rate` metric. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.connections.rate + unit: '{connections}' + - description: Requests denied because of security concerns. Corresponds to HAProxy's `dreq` metric + name: haproxy.requests.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Cumulative number of request errors. Corresponds to HAProxy's `ereq` metric. + name: haproxy.requests.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: HTTP requests per second over last elapsed second. Corresponds to HAProxy's `req_rate` metric. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.requests.rate + unit: '{requests}' + - description: Total number of HTTP requests received. Corresponds to HAProxy's `req_tot`, `hrsp_1xx`, `hrsp_2xx`, `hrsp_3xx`, `hrsp_4xx`, `hrsp_5xx` and `hrsp_other` metrics. + name: haproxy.requests.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 1xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "134" + attributes: + - key: status_code + value: + stringValue: 2xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 3xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 4xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 5xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: other + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Responses denied because of security concerns. Corresponds to HAProxy's `dresp` metric + name: haproxy.responses.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{responses}' + - description: Current sessions. Corresponds to HAProxy's `scur` metric. + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.count + unit: '{sessions}' + - description: Number of sessions per second over last elapsed second. Corresponds to HAProxy's `rate` metric. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.rate + unit: '{sessions}' + scope: + name: otelcol/haproxyreceiver + version: latest + - resource: + attributes: + - key: haproxy.addr + value: + stringValue: /var/folders/v7/y4ndd30x2w98zq5trwl9tnl40000gn/T/haproxytest3528850750/testhaproxy.sock + - key: haproxy.proxy_name + value: + stringValue: stats + - key: haproxy.service_name + value: + stringValue: FRONTEND + scopeMetrics: + - metrics: + - description: Bytes in. Corresponds to HAProxy's `bin` metric. + name: haproxy.bytes.input + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1444" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Bytes out. Corresponds to HAProxy's `bout` metric. + name: haproxy.bytes.output + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "47008" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Number of connections over the last elapsed second (frontend). Corresponds to HAProxy's `conn_rate` metric. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.connections.rate + unit: '{connections}' + - description: Requests denied because of security concerns. Corresponds to HAProxy's `dreq` metric + name: haproxy.requests.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Cumulative number of request errors. Corresponds to HAProxy's `ereq` metric. + name: haproxy.requests.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: HTTP requests per second over last elapsed second. Corresponds to HAProxy's `req_rate` metric. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.requests.rate + unit: '{requests}' + - description: Total number of HTTP requests received. Corresponds to HAProxy's `req_tot`, `hrsp_1xx`, `hrsp_2xx`, `hrsp_3xx`, `hrsp_4xx`, `hrsp_5xx` and `hrsp_other` metrics. + name: haproxy.requests.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 1xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2" + attributes: + - key: status_code + value: + stringValue: 2xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 3xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 4xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 5xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: other + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Responses denied because of security concerns. Corresponds to HAProxy's `dresp` metric + name: haproxy.responses.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{responses}' + - description: Current sessions. Corresponds to HAProxy's `scur` metric. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.count + unit: '{sessions}' + - description: Number of sessions per second over last elapsed second. Corresponds to HAProxy's `rate` metric. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.rate + unit: '{sessions}' + scope: + name: otelcol/haproxyreceiver + version: latest + - resource: + attributes: + - key: haproxy.addr + value: + stringValue: /var/folders/v7/y4ndd30x2w98zq5trwl9tnl40000gn/T/haproxytest3528850750/testhaproxy.sock + - key: haproxy.proxy_name + value: + stringValue: webservers + - key: haproxy.service_name + value: + stringValue: BACKEND + scopeMetrics: + - metrics: + - description: Bytes in. Corresponds to HAProxy's `bin` metric. + name: haproxy.bytes.input + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "85470" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Bytes out. Corresponds to HAProxy's `bout` metric. + name: haproxy.bytes.output + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "107711" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Number of requests that encountered an error trying to connect to a backend server. The backend stat is the sum of the stat. Corresponds to HAProxy's `econ` metric + name: haproxy.connections.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Number of times a connection to a server was retried. Corresponds to HAProxy's `wretr` metric. + name: haproxy.connections.retries + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{retries}' + - description: Requests denied because of security concerns. Corresponds to HAProxy's `dreq` metric + name: haproxy.requests.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Current queued requests. For the backend this reports the number queued without a server assigned. Corresponds to HAProxy's `qcur` metric. + name: haproxy.requests.queued + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Number of times a request was redispatched to another server. Corresponds to HAProxy's `wredis` metric. + name: haproxy.requests.redispatched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Total number of HTTP requests received. Corresponds to HAProxy's `req_tot`, `hrsp_1xx`, `hrsp_2xx`, `hrsp_3xx`, `hrsp_4xx`, `hrsp_5xx` and `hrsp_other` metrics. + name: haproxy.requests.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 1xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "134" + attributes: + - key: status_code + value: + stringValue: 2xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 3xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 4xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 5xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: other + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Responses denied because of security concerns. Corresponds to HAProxy's `dresp` metric + name: haproxy.responses.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{responses}' + - description: Cumulative number of response errors. Corresponds to HAProxy's `eresp` metric, `srv_abrt` will be counted here also. + name: haproxy.responses.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Number of times a server was selected, either for new sessions or when re-dispatching. Corresponds to HAProxy's `lbtot` metric. + name: haproxy.server_selected.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "134" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{selections}' + - description: Average total session time in ms over the last 1024 requests. Corresponds to HAProxy's `ttime` metric. + gauge: + dataPoints: + - asDouble: 105 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.average + unit: ms + - description: Current sessions. Corresponds to HAProxy's `scur` metric. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.count + unit: '{sessions}' + - description: Number of sessions per second over last elapsed second. Corresponds to HAProxy's `rate` metric. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.rate + unit: '{sessions}' + scope: + name: otelcol/haproxyreceiver + version: latest + - resource: + attributes: + - key: haproxy.addr + value: + stringValue: /var/folders/v7/y4ndd30x2w98zq5trwl9tnl40000gn/T/haproxytest3528850750/testhaproxy.sock + - key: haproxy.proxy_name + value: + stringValue: webservers + - key: haproxy.service_name + value: + stringValue: s1 + scopeMetrics: + - metrics: + - description: Bytes in. Corresponds to HAProxy's `bin` metric. + name: haproxy.bytes.input + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "28734" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Bytes out. Corresponds to HAProxy's `bout` metric. + name: haproxy.bytes.output + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "36204" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Number of requests that encountered an error trying to connect to a backend server. The backend stat is the sum of the stat. Corresponds to HAProxy's `econ` metric + name: haproxy.connections.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Number of times a connection to a server was retried. Corresponds to HAProxy's `wretr` metric. + name: haproxy.connections.retries + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{retries}' + - description: Current queued requests. For the backend this reports the number queued without a server assigned. Corresponds to HAProxy's `qcur` metric. + name: haproxy.requests.queued + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Number of times a request was redispatched to another server. Corresponds to HAProxy's `wredis` metric. + name: haproxy.requests.redispatched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Total number of HTTP requests received. Corresponds to HAProxy's `req_tot`, `hrsp_1xx`, `hrsp_2xx`, `hrsp_3xx`, `hrsp_4xx`, `hrsp_5xx` and `hrsp_other` metrics. + name: haproxy.requests.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 1xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "45" + attributes: + - key: status_code + value: + stringValue: 2xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 3xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 4xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 5xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: other + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Responses denied because of security concerns. Corresponds to HAProxy's `dresp` metric + name: haproxy.responses.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{responses}' + - description: Cumulative number of response errors. Corresponds to HAProxy's `eresp` metric, `srv_abrt` will be counted here also. + name: haproxy.responses.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Number of times a server was selected, either for new sessions or when re-dispatching. Corresponds to HAProxy's `lbtot` metric. + name: haproxy.server_selected.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "45" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{selections}' + - description: Average total session time in ms over the last 1024 requests. Corresponds to HAProxy's `ttime` metric. + gauge: + dataPoints: + - asDouble: 95 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.average + unit: ms + - description: Current sessions. Corresponds to HAProxy's `scur` metric. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.count + unit: '{sessions}' + - description: Number of sessions per second over last elapsed second. Corresponds to HAProxy's `rate` metric. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.rate + unit: '{sessions}' + scope: + name: otelcol/haproxyreceiver + version: latest + - resource: + attributes: + - key: haproxy.addr + value: + stringValue: /var/folders/v7/y4ndd30x2w98zq5trwl9tnl40000gn/T/haproxytest3528850750/testhaproxy.sock + - key: haproxy.proxy_name + value: + stringValue: webservers + - key: haproxy.service_name + value: + stringValue: s3 + scopeMetrics: + - metrics: + - description: Bytes in. Corresponds to HAProxy's `bin` metric. + name: haproxy.bytes.input + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "28072" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Bytes out. Corresponds to HAProxy's `bout` metric. + name: haproxy.bytes.output + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "35376" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: by + - description: Number of requests that encountered an error trying to connect to a backend server. The backend stat is the sum of the stat. Corresponds to HAProxy's `econ` metric + name: haproxy.connections.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Number of times a connection to a server was retried. Corresponds to HAProxy's `wretr` metric. + name: haproxy.connections.retries + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{retries}' + - description: Current queued requests. For the backend this reports the number queued without a server assigned. Corresponds to HAProxy's `qcur` metric. + name: haproxy.requests.queued + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Number of times a request was redispatched to another server. Corresponds to HAProxy's `wredis` metric. + name: haproxy.requests.redispatched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Total number of HTTP requests received. Corresponds to HAProxy's `req_tot`, `hrsp_1xx`, `hrsp_2xx`, `hrsp_3xx`, `hrsp_4xx`, `hrsp_5xx` and `hrsp_other` metrics. + name: haproxy.requests.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 1xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "44" + attributes: + - key: status_code + value: + stringValue: 2xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 3xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 4xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: 5xx + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status_code + value: + stringValue: other + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{requests}' + - description: Responses denied because of security concerns. Corresponds to HAProxy's `dresp` metric + name: haproxy.responses.denied + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{responses}' + - description: Cumulative number of response errors. Corresponds to HAProxy's `eresp` metric, `srv_abrt` will be counted here also. + name: haproxy.responses.errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Number of times a server was selected, either for new sessions or when re-dispatching. Corresponds to HAProxy's `lbtot` metric. + name: haproxy.server_selected.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "44" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{selections}' + - description: Average total session time in ms over the last 1024 requests. Corresponds to HAProxy's `ttime` metric. + gauge: + dataPoints: + - asDouble: 121 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.average + unit: ms + - description: Current sessions. Corresponds to HAProxy's `scur` metric. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.count + unit: '{sessions}' + - description: Number of sessions per second over last elapsed second. Corresponds to HAProxy's `rate` metric. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: haproxy.sessions.rate + unit: '{sessions}' + scope: + name: otelcol/haproxyreceiver + version: latest diff --git a/receiver/pulsarreceiver/go.mod b/receiver/pulsarreceiver/go.mod index 0a02155b09ed..1a25a0c2343b 100644 --- a/receiver/pulsarreceiver/go.mod +++ b/receiver/pulsarreceiver/go.mod @@ -33,7 +33,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/danieljoos/wincred v1.0.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/frankban/quicktest v1.14.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect diff --git a/receiver/pulsarreceiver/go.sum b/receiver/pulsarreceiver/go.sum index 432f6c02d239..26632e58d60d 100644 --- a/receiver/pulsarreceiver/go.sum +++ b/receiver/pulsarreceiver/go.sum @@ -95,8 +95,9 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= -github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b h1:HBah4D48ypg3J7Np4N+HY/ZR76fx3HEUGxDU6Uk39oQ= github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -310,6 +311,7 @@ github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -801,6 +803,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/receiver/receivercreator/README.md b/receiver/receivercreator/README.md index 20cd462e7d2e..03e2bcea9181 100644 --- a/receiver/receivercreator/README.md +++ b/receiver/receivercreator/README.md @@ -43,7 +43,7 @@ instantiate that receiver. **receivers.<receiver_type/id>.rule** Rule expression using [expvar -syntax](https://github.com/antonmedv/expr/blob/master/docs/language-definition.md). +syntax](https://github.com/expr-lang/expr/blob/master/docs/language-definition.md). Variables available are detailed below in [Rule Expressions](#rule-expressions). diff --git a/receiver/saphanareceiver/client.go b/receiver/saphanareceiver/client.go index da0a45f84a71..d11cef26b5ab 100644 --- a/receiver/saphanareceiver/client.go +++ b/receiver/saphanareceiver/client.go @@ -102,7 +102,7 @@ func newSapHanaClient(cfg *Config, factory sapHanaConnectionFactory) client { } func (c *sapHanaClient) Connect(ctx context.Context) error { - connector, err := sapdriver.NewDSNConnector(fmt.Sprintf("hdb://%s:%s@%s", c.receiverConfig.Username, c.receiverConfig.Password, c.receiverConfig.TCPAddr.Endpoint)) + connector, err := sapdriver.NewDSNConnector(fmt.Sprintf("hdb://%s:%s@%s", c.receiverConfig.Username, string(c.receiverConfig.Password), c.receiverConfig.TCPAddr.Endpoint)) if err != nil { return fmt.Errorf("error generating DSN for SAP HANA connection: %w", err) } diff --git a/receiver/snowflakereceiver/go.mod b/receiver/snowflakereceiver/go.mod index 24e730ec9b7a..0d0f17f853ee 100644 --- a/receiver/snowflakereceiver/go.mod +++ b/receiver/snowflakereceiver/go.mod @@ -47,7 +47,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/goccy/go-json v0.10.0 // indirect diff --git a/receiver/snowflakereceiver/go.sum b/receiver/snowflakereceiver/go.sum index 7c4a8c4d94c5..a12b2eb0e403 100644 --- a/receiver/snowflakereceiver/go.sum +++ b/receiver/snowflakereceiver/go.sum @@ -80,8 +80,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= diff --git a/receiver/sqlqueryreceiver/db_client.go b/receiver/sqlqueryreceiver/db_client.go index 4c5056c07dc4..a8b1c8f0bafb 100644 --- a/receiver/sqlqueryreceiver/db_client.go +++ b/receiver/sqlqueryreceiver/db_client.go @@ -8,9 +8,10 @@ import ( // register db drivers _ "github.com/SAP/go-hdb/driver" - _ "github.com/denisenkom/go-mssqldb" _ "github.com/go-sql-driver/mysql" _ "github.com/lib/pq" + _ "github.com/microsoft/go-mssqldb" + _ "github.com/microsoft/go-mssqldb/integratedauth/krb5" _ "github.com/sijms/go-ora/v2" _ "github.com/snowflakedb/gosnowflake" "go.uber.org/multierr" diff --git a/receiver/sqlqueryreceiver/go.mod b/receiver/sqlqueryreceiver/go.mod index 794b09f76dc7..bcf93063bce5 100644 --- a/receiver/sqlqueryreceiver/go.mod +++ b/receiver/sqlqueryreceiver/go.mod @@ -4,10 +4,10 @@ go 1.20 require ( github.com/SAP/go-hdb v1.6.11 - github.com/denisenkom/go-mssqldb v0.12.3 github.com/docker/go-connections v0.4.0 github.com/go-sql-driver/mysql v1.7.1 github.com/lib/pq v1.10.9 + github.com/microsoft/go-mssqldb v1.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.91.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.91.0 @@ -32,7 +32,7 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -65,7 +65,7 @@ require ( github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v24.0.7+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/expr-lang/expr v1.15.7 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect @@ -73,15 +73,22 @@ require ( github.com/goccy/go-json v0.10.0 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/flatbuffers v23.1.21+incompatible // indirect github.com/google/uuid v1.4.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/asmfmt v1.3.2 // indirect diff --git a/receiver/sqlqueryreceiver/go.sum b/receiver/sqlqueryreceiver/go.sum index c6a9366eb5c7..d6659dfe64d1 100644 --- a/receiver/sqlqueryreceiver/go.sum +++ b/receiver/sqlqueryreceiver/go.sum @@ -7,20 +7,19 @@ github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 h1:yfJe15aSwEQ6Oo6J+gdfdulPNoZ3TEhmbhLIoxZcA+U= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.0 h1:HCc0+LpPfpCKs6LGGLAhwBARt9632unrVcI6i8s/8os= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= @@ -106,10 +105,7 @@ github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= -github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= @@ -119,8 +115,8 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -148,9 +144,9 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -190,12 +186,31 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 h1:2r2WiFeAwiJ/uyx1qIKnV1L4C9w/2V8ehlbJY4gjFaM= github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4/go.mod h1:1yEQhaLb/cETXCqQmdh7lDjupNAReO7c83AHyK2dJ48= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -232,6 +247,8 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= @@ -254,7 +271,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= @@ -271,7 +287,6 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.m github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -309,6 +324,7 @@ github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -370,9 +386,8 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -392,14 +407,15 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -422,7 +438,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -434,19 +449,21 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -503,9 +520,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= diff --git a/reports/distributions/contrib.yaml b/reports/distributions/contrib.yaml index 2fbef2930e6b..aba751801861 100644 --- a/reports/distributions/contrib.yaml +++ b/reports/distributions/contrib.yaml @@ -53,7 +53,6 @@ components: - splunk_hec - sumologic - syslog - - tanzuobservability - tencentcloud_logservice - zipkin extension: diff --git a/reports/distributions/splunk.yaml b/reports/distributions/splunk.yaml index 1dc709c0b232..b9a5457ce794 100644 --- a/reports/distributions/splunk.yaml +++ b/reports/distributions/splunk.yaml @@ -15,6 +15,7 @@ components: connector: - spanmetrics exporter: + - awss3 - file - kafka - loadbalancing diff --git a/testbed/correctnesstests/metrics/correctness_test_case.go b/testbed/correctnesstests/metrics/correctness_test_case.go index f5ae409befe6..58765418e523 100644 --- a/testbed/correctnesstests/metrics/correctness_test_case.go +++ b/testbed/correctnesstests/metrics/correctness_test_case.go @@ -34,7 +34,7 @@ func newCorrectnessTestCase( func (tc *correctnessTestCase) startCollector() { tc.collector = testbed.NewInProcessCollector(componentFactories(tc.t)) - _, err := tc.collector.PrepareConfig(correctnesstests.CreateConfigYaml(tc.sender, tc.receiver, nil, "metrics")) + _, err := tc.collector.PrepareConfig(correctnesstests.CreateConfigYaml(tc.t, tc.sender, tc.receiver, nil, "metrics")) require.NoError(tc.t, err) rd, err := newResultsDir(tc.t.Name()) require.NoError(tc.t, err) diff --git a/testbed/correctnesstests/traces/correctness_test.go b/testbed/correctnesstests/traces/correctness_test.go index 5a1aab46b516..4c246ad1ff1c 100644 --- a/testbed/correctnesstests/traces/correctness_test.go +++ b/testbed/correctnesstests/traces/correctness_test.go @@ -55,7 +55,7 @@ func testWithTracingGoldenDataset( require.NoError(t, err, "default components resulted in: %v", err) runner := testbed.NewInProcessCollector(factories) validator := testbed.NewCorrectTestValidator(sender.ProtocolName(), receiver.ProtocolName(), dataProvider) - config := correctnesstests.CreateConfigYaml(sender, receiver, processors, "traces") + config := correctnesstests.CreateConfigYaml(t, sender, receiver, processors, "traces") log.Println(config) configCleanup, cfgErr := runner.PrepareConfig(config) require.NoError(t, cfgErr, "collector configuration resulted in: %v", cfgErr) @@ -123,7 +123,7 @@ func TestSporadicGoldenDataset(t *testing.T) { sending_queue: enabled: false `) - _, err = runner.PrepareConfig(correctnesstests.CreateConfigYaml(sender, receiver, nil, "traces")) + _, err = runner.PrepareConfig(correctnesstests.CreateConfigYaml(t, sender, receiver, nil, "traces")) require.NoError(t, err, "collector configuration resulted in: %v", err) validator := testbed.NewCorrectTestValidator(sender.ProtocolName(), receiver.ProtocolName(), dataProvider) tc := testbed.NewTestCase( diff --git a/testbed/correctnesstests/utils.go b/testbed/correctnesstests/utils.go index 6fe1790170cf..9df72cb89a1b 100644 --- a/testbed/correctnesstests/utils.go +++ b/testbed/correctnesstests/utils.go @@ -20,6 +20,7 @@ import ( // processors, and a pipeline type. A collector created from the resulting yaml string should be able to talk // the specified sender and receiver. func CreateConfigYaml( + t testing.TB, sender testbed.DataSender, receiver testbed.DataReceiver, processors map[string]string, @@ -51,6 +52,9 @@ processors: extensions: service: + telemetry: + metrics: + address: 127.0.0.1:%d extensions: pipelines: %s: @@ -64,6 +68,7 @@ service: sender.GenConfigYAMLStr(), receiver.GenConfigYAMLStr(), processorsSections, + testbed.GetAvailablePort(t), pipelineType, sender.ProtocolName(), processorsList, diff --git a/testbed/datasenders/k8s.go b/testbed/datasenders/k8s.go index 330771296ccc..a5f9dd30a6ea 100644 --- a/testbed/datasenders/k8s.go +++ b/testbed/datasenders/k8s.go @@ -175,7 +175,7 @@ func NewKubernetesContainerWriter() *FileLogK8sWriter { regex: '^(?P