diff --git a/cmd/builtin_output_gen.go b/cmd/builtin_output_gen.go
index 3473681bf44..35fe34505b8 100644
--- a/cmd/builtin_output_gen.go
+++ b/cmd/builtin_output_gen.go
@@ -7,11 +7,11 @@ import (
"strings"
)
-const _builtinOutputName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsd"
+const _builtinOutputName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetry"
-var _builtinOutputIndex = [...]uint8{0, 5, 8, 15, 41, 49, 53, 58, 64}
+var _builtinOutputIndex = [...]uint8{0, 5, 8, 15, 41, 49, 53, 58, 64, 90}
-const _builtinOutputLowerName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsd"
+const _builtinOutputLowerName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetry"
func (i builtinOutput) String() string {
if i >= builtinOutput(len(_builtinOutputIndex)-1) {
@@ -32,9 +32,10 @@ func _builtinOutputNoOp() {
_ = x[builtinOutputJSON-(5)]
_ = x[builtinOutputKafka-(6)]
_ = x[builtinOutputStatsd-(7)]
+ _ = x[builtinOutputExperimentalOpentelemetry-(8)]
}
-var _builtinOutputValues = []builtinOutput{builtinOutputCloud, builtinOutputCSV, builtinOutputDatadog, builtinOutputExperimentalPrometheusRW, builtinOutputInfluxdb, builtinOutputJSON, builtinOutputKafka, builtinOutputStatsd}
+var _builtinOutputValues = []builtinOutput{builtinOutputCloud, builtinOutputCSV, builtinOutputDatadog, builtinOutputExperimentalPrometheusRW, builtinOutputInfluxdb, builtinOutputJSON, builtinOutputKafka, builtinOutputStatsd, builtinOutputExperimentalOpentelemetry}
var _builtinOutputNameToValueMap = map[string]builtinOutput{
_builtinOutputName[0:5]: builtinOutputCloud,
@@ -53,6 +54,8 @@ var _builtinOutputNameToValueMap = map[string]builtinOutput{
_builtinOutputLowerName[53:58]: builtinOutputKafka,
_builtinOutputName[58:64]: builtinOutputStatsd,
_builtinOutputLowerName[58:64]: builtinOutputStatsd,
+ _builtinOutputName[64:90]: builtinOutputExperimentalOpentelemetry,
+ _builtinOutputLowerName[64:90]: builtinOutputExperimentalOpentelemetry,
}
var _builtinOutputNames = []string{
@@ -64,6 +67,7 @@ var _builtinOutputNames = []string{
_builtinOutputName[49:53],
_builtinOutputName[53:58],
_builtinOutputName[58:64],
+ _builtinOutputName[64:90],
}
// builtinOutputString retrieves an enum value from the enum constants string name.
diff --git a/cmd/outputs.go b/cmd/outputs.go
index 5ff05436054..67c0ddacb77 100644
--- a/cmd/outputs.go
+++ b/cmd/outputs.go
@@ -17,11 +17,14 @@ import (
"go.k6.io/k6/output/statsd"
"github.com/grafana/xk6-dashboard/dashboard"
+ "github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry"
"github.com/grafana/xk6-output-prometheus-remote/pkg/remotewrite"
)
// builtinOutput marks the available builtin outputs.
//
+// NOTE: that the enumer is the github.com/dmarkham/enumer
+//
//go:generate enumer -type=builtinOutput -trimprefix builtinOutput -transform=kebab -output builtin_output_gen.go
type builtinOutput uint32
@@ -34,6 +37,7 @@ const (
builtinOutputJSON
builtinOutputKafka
builtinOutputStatsd
+ builtinOutputExperimentalOpentelemetry
)
// TODO: move this to an output sub-module after we get rid of the old collectors?
@@ -63,6 +67,9 @@ func getAllOutputConstructors() (map[string]output.Constructor, error) {
return remotewrite.New(params)
},
"web-dashboard": dashboard.New,
+ builtinOutputExperimentalOpentelemetry.String(): func(params output.Params) (output.Output, error) {
+ return opentelemetry.New(params)
+ },
}
exts := ext.Get(ext.OutputExtension)
diff --git a/cmd/outputs_test.go b/cmd/outputs_test.go
index 8fabbd0ae78..0a7d916d308 100644
--- a/cmd/outputs_test.go
+++ b/cmd/outputs_test.go
@@ -10,7 +10,7 @@ func TestBuiltinOutputString(t *testing.T) {
t.Parallel()
exp := []string{
"cloud", "csv", "datadog", "experimental-prometheus-rw",
- "influxdb", "json", "kafka", "statsd",
+ "influxdb", "json", "kafka", "statsd", "experimental-opentelemetry",
}
assert.Equal(t, exp, builtinOutputStrings())
}
diff --git a/go.mod b/go.mod
index 213559d63cb..991dedb7df6 100644
--- a/go.mod
+++ b/go.mod
@@ -15,6 +15,7 @@ require (
github.com/gorilla/websocket v1.5.1
github.com/grafana/xk6-browser v1.6.1-0.20240701105714-29f6ef3049fe
github.com/grafana/xk6-dashboard v0.7.5
+ github.com/grafana/xk6-output-opentelemetry v0.1.1
github.com/grafana/xk6-output-prometheus-remote v0.4.0
github.com/grafana/xk6-redis v0.3.0
github.com/grafana/xk6-webcrypto v0.4.0
@@ -62,7 +63,7 @@ require (
github.com/andybalholm/cascadia v1.3.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bufbuild/protocompile v0.10.0 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chromedp/cdproto v0.0.0-20221023212508-67ada9507fb2 // indirect
github.com/chromedp/sysutil v1.0.0 // indirect
@@ -70,12 +71,12 @@ require (
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dlclark/regexp2 v1.9.0 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
- github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/pprof v0.0.0-20230728192033-2ba5b33183c6 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/sobek v0.0.0-20240708142253-1d1dbd360a51
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
@@ -89,13 +90,16 @@ require (
github.com/redis/go-redis/v9 v9.0.5 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
)
diff --git a/go.sum b/go.sum
index 0ef7afade62..78a8ed8fe68 100644
--- a/go.sum
+++ b/go.sum
@@ -23,8 +23,8 @@ github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao=
github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y=
github.com/bufbuild/protocompile v0.10.0 h1:+jW/wnLMLxaCEG8AX9lD0bQ5v9h1RUiMKOBOT5ll9dM=
github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -56,8 +56,8 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-sourcemap/sourcemap v2.1.4+incompatible h1:a+iTbH5auLKxaNwQFg0B+TCYl6lbukKPc7b5x0n1s6Q=
@@ -88,6 +88,8 @@ github.com/grafana/xk6-browser v1.6.1-0.20240701105714-29f6ef3049fe h1:uE7e1Lzri
github.com/grafana/xk6-browser v1.6.1-0.20240701105714-29f6ef3049fe/go.mod h1:TNngUsbmV3I5NDVklGxjpAR2znEjYEsBtHQirGw8nAI=
github.com/grafana/xk6-dashboard v0.7.5 h1:TcILyffT/Ea/XD7xG1jMA5lwtusOPRbEQsQDHmO30Mk=
github.com/grafana/xk6-dashboard v0.7.5/go.mod h1:Y75F8xmgCraKT+pBzFH6me9AyH5PkXD+Bxo1dm6Il/M=
+github.com/grafana/xk6-output-opentelemetry v0.1.1 h1:kLfzKkL9olhmMO+Kmr7ObhX3LknSAbUbzFaDG6mQVeg=
+github.com/grafana/xk6-output-opentelemetry v0.1.1/go.mod h1:wPvue+M7wcAAsOeshw8LcXQCTkf2t2xVrhQ3nvNSmWo=
github.com/grafana/xk6-output-prometheus-remote v0.4.0 h1:7k3xjuKaD9BwcX8iuu5v6PtAK1L53kvx1r8BaTUfRH4=
github.com/grafana/xk6-output-prometheus-remote v0.4.0/go.mod h1:esXXthLoVp9JUdGkECRthESVYu0TQTR24wrx2nRM9ak=
github.com/grafana/xk6-redis v0.3.0 h1:eV1YO0miPqGFilN8sL/3OdO6Mm+hZH2nsvJm5dkE0CM=
@@ -98,8 +100,8 @@ github.com/grafana/xk6-websockets v0.6.0 h1:k1z8Xy4sGOB2+VF0q621MoNo0mYY12s8Cqqo
github.com/grafana/xk6-websockets v0.6.0/go.mod h1:D76ALTjp3bMugqx7ulseJ9TZrmSvDogieWxWXr8S0+A=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc h1:KpMgaYJRieDkHZJWY3LMafvtqS/U8xX6+lUN+OKpl/Y=
@@ -159,7 +161,7 @@ github.com/r3labs/sse/v2 v2.10.0 h1:hFEkLLFY4LDifoHdiCN/LlGBAdVJYsANaLqNYa1l/v0=
github.com/r3labs/sse/v2 v2.10.0/go.mod h1:Igau6Whc+F17QUgML1fYe1VPZzTV6EMCnYktEmkNJ7I=
github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o=
github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e h1:zWKUYT07mGmVBH+9UgnHXd/ekCK99C8EbDSAt5qsjXE=
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs=
@@ -192,6 +194,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 h1:f2jriWfOdldanBwS9jNBdeOKAQN7b4ugAMaNu1/1k9g=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0/go.mod h1:B+bcQI1yTY+N0vqMpoZbEN7+XU4tNM0DmUiOwebFJWI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 h1:mM8nKi6/iFQ0iqst80wDHU2ge198Ye/TfN0WBS5U24Y=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0/go.mod h1:0PrIIzDteLSmNyxqcGYRL4mDIo8OTuBAOI/Bn1URxac=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE=
@@ -202,6 +208,8 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8=
+go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
@@ -311,8 +319,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
index 16abdfc084f..9433004a280 100644
--- a/vendor/github.com/cenkalti/backoff/v4/README.md
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -1,4 +1,4 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
@@ -21,8 +21,6 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
-[travis]: https://travis-ci.org/cenkalti/backoff
-[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
index 2c56c1e7189..aac99f196ad 100644
--- a/vendor/github.com/cenkalti/backoff/v4/exponential.go
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -71,6 +71,9 @@ type Clock interface {
Now() time.Time
}
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
@@ -81,7 +84,7 @@ const (
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff() *ExponentialBackOff {
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
b := &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
@@ -91,10 +94,62 @@ func NewExponentialBackOff() *ExponentialBackOff {
Stop: Stop,
Clock: SystemClock,
}
+ for _, fn := range opts {
+ fn(b)
+ }
b.Reset()
return b
}
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.InitialInterval = duration
+ }
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.RandomizationFactor = randomizationFactor
+ }
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Multiplier = multiplier
+ }
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxInterval = duration
+ }
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxElapsedTime = duration
+ }
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Stop = duration
+ }
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Clock = clock
+ }
+}
+
type systemClock struct{}
func (t systemClock) Now() time.Time {
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
index 8969526a6e5..7c7f0c69cd9 100644
--- a/vendor/github.com/go-logr/logr/README.md
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
index fb2f866f4b7..30568e768dc 100644
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
- outputFormat outputFormat
- prefix string
- values []any
- valuesStr string
- parentValuesStr string
- depth int
- opts *Options
- group string // for slog groups
- groupDepth int
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ depth int
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
}
// outputFormat indicates which outputFormat to use.
@@ -257,6 +256,13 @@ const (
outputJSON
)
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []any
@@ -264,76 +270,102 @@ type PseudoStruct []any
func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
if f.outputFormat == outputJSON {
- buf.WriteByte('{') // for the whole line
+ buf.WriteByte('{') // for the whole record
}
+ // Render builtins
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
- f.flatten(buf, vals, false, false) // keys are ours, no need to escape
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
- if f.parentValuesStr != "" {
- if continuing {
- buf.WriteByte(f.comma())
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
}
- buf.WriteString(f.parentValuesStr)
- continuing = true
- }
+ f.flatten(buf, vals, true) // escape user-provided keys
- groupDepth := f.groupDepth
- if f.group != "" {
- if f.valuesStr != "" || len(args) != 0 {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
- buf.WriteByte(f.colon())
- buf.WriteByte('{') // for the group
- continuing = false
- } else {
- // The group was empty
- groupDepth--
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
}
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
}
- if f.valuesStr != "" {
+ if bodyStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
- buf.WriteString(f.valuesStr)
- continuing = true
+ buf.WriteString(bodyStr)
}
- vals = args
- if hook := f.opts.RenderArgsHook; hook != nil {
- vals = hook(f.sanitize(vals))
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole record
}
- f.flatten(buf, vals, continuing, true) // escape user-provided keys
- for i := 0; i < groupDepth; i++ {
- buf.WriteByte('}') // for the groups
+ return buf.String()
+}
+
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
}
- if f.outputFormat == outputJSON {
- buf.WriteByte('}') // for the whole line
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
}
return buf.String()
}
-// flatten renders a list of key-value pairs into a buffer. If continuing is
-// true, it assumes that the buffer has previous values and will emit a
-// separator (which depends on the output format) before the first pair it
-// writes. If escapeKeys is true, the keys are assumed to have
-// non-JSON-compatible characters in them and must be evaluated for escapes.
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
-func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
@@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
v := kvList[i+1]
- if i > 0 || continuing {
+ if i > 0 {
if f.outputFormat == outputJSON {
buf.WriteByte(f.comma())
} else {
@@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any {
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
-func (f *Formatter) startGroup(group string) {
+func (f *Formatter) startGroup(name string) {
// Unnamed groups are just inlined.
- if group == "" {
+ if name == "" {
return
}
- // Any saved values can no longer be changed.
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
- continuing := false
-
- if f.parentValuesStr != "" {
- buf.WriteString(f.parentValuesStr)
- continuing = true
- }
-
- if f.group != "" && f.valuesStr != "" {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
- buf.WriteByte(f.colon())
- buf.WriteByte('{') // for the group
- continuing = false
- }
-
- if f.valuesStr != "" {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(f.valuesStr)
- }
-
- // NOTE: We don't close the scope here - that's done later, when a log line
- // is actually rendered (because we have N scopes to close).
-
- f.parentValuesStr = buf.String()
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
// Start collecting new values.
- f.group = group
- f.groupDepth++
+ f.groupName = name
f.valuesStr = ""
f.values = nil
}
@@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) {
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
- f.flatten(buf, vals, false, true) // escape user-provided keys
+ f.flatten(buf, vals, true) // escape user-provided keys
f.valuesStr = buf.String()
}
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/LICENSE b/vendor/github.com/grafana/xk6-output-opentelemetry/LICENSE
new file mode 100644
index 00000000000..0ad25db4bd1
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/LICENSE
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/attribute.go b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/attribute.go
new file mode 100644
index 00000000000..ad4194ca145
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/attribute.go
@@ -0,0 +1,27 @@
+package opentelemetry
+
+import (
+ "github.com/mstoykov/atlas"
+ "go.k6.io/k6/metrics"
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// newAttributeSet converts a k6 tag set into
+// the equivalent set of opentelemetry attributes
+func newAttributeSet(t *metrics.TagSet) attribute.Set {
+ n := (*atlas.Node)(t)
+ if n.Len() < 1 {
+ return *attribute.EmptySet()
+ }
+ labels := make([]attribute.KeyValue, 0, n.Len())
+ for !n.IsRoot() {
+ prev, key, value := n.Data()
+ n = prev
+ if key == "" || value == "" {
+ continue
+ }
+ labels = append(labels, attribute.String(key, value))
+ }
+
+ return attribute.NewSet(labels...)
+}
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/config.go b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/config.go
new file mode 100644
index 00000000000..daa2523ad06
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/config.go
@@ -0,0 +1,268 @@
+package opentelemetry
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/mstoykov/envconfig"
+ "go.k6.io/k6/errext"
+ "go.k6.io/k6/errext/exitcodes"
+ k6Const "go.k6.io/k6/lib/consts"
+ "go.k6.io/k6/lib/types"
+ "gopkg.in/guregu/null.v3"
+)
+
+const (
+ // grpcExporterType GRPC exporter type
+ grpcExporterType = "grpc"
+ // httpExporterType HTTP exporter type
+ httpExporterType = "http"
+)
+
+// Config represents the configuration for the OpenTelemetry output
+type Config struct {
+ // ServiceName is the name of the service to use for the metrics
+ // export, if not set it will use "k6"
+ ServiceName null.String `json:"serviceName" envconfig:"K6_OTEL_SERVICE_NAME"`
+ // ServiceVersion is the version of the service to use for the metrics
+ // export, if not set it will use k6's library version
+ ServiceVersion null.String `json:"serviceVersion" envconfig:"K6_OTEL_SERVICE_VERSION"`
+ // MetricPrefix is the prefix to use for the metrics
+ MetricPrefix null.String `json:"metricPrefix" envconfig:"K6_OTEL_METRIC_PREFIX"`
+ // FlushInterval is the interval at which to flush metrics from the k6
+ FlushInterval types.NullDuration `json:"flushInterval" envconfig:"K6_OTEL_FLUSH_INTERVAL"`
+
+ // ExporterType sets the type of OpenTelemetry Exporter to use
+ ExporterType null.String `json:"exporterType" envconfig:"K6_OTEL_EXPORTER_TYPE"`
+ // ExportInterval configures the intervening time between metrics exports
+ ExportInterval types.NullDuration `json:"exportInterval" envconfig:"K6_OTEL_EXPORT_INTERVAL"`
+
+ // Headers in W3C Correlation-Context format without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2")
+ Headers null.String `json:"headers" envconfig:"K6_OTEL_HEADERS"`
+
+ // TLSInsecureSkipVerify disables verification of the server's certificate chain
+ TLSInsecureSkipVerify null.Bool `json:"tlsInsecureSkipVerify" envconfig:"K6_OTEL_TLS_INSECURE_SKIP_VERIFY"`
+ // TLSCertificate is the path to the certificate file (rootCAs) to use for the exporter's TLS connection
+ TLSCertificate null.String `json:"tlsCertificate" envconfig:"K6_OTEL_TLS_CERTIFICATE"`
+ // TLSClientCertificate is the path to the certificate file (must be PEM encoded data)
+ // to use for the exporter's TLS connection
+ TLSClientCertificate null.String `json:"tlsClientCertificate" envconfig:"K6_OTEL_TLS_CLIENT_CERTIFICATE"`
+ // TLSClientKey is the path to the private key file (must be PEM encoded data) to use for the exporter's TLS connection
+ TLSClientKey null.String `json:"tlsClientKey" envconfig:"K6_OTEL_TLS_CLIENT_KEY"`
+
+ // HTTPExporterInsecure disables client transport security for the Exporter's HTTP
+ // connection.
+ HTTPExporterInsecure null.Bool `json:"httpExporterInsecure" envconfig:"K6_OTEL_HTTP_EXPORTER_INSECURE"`
+ // HTTPExporterEndpoint sets the target endpoint the OpenTelemetry Exporter
+ // will connect to.
+ HTTPExporterEndpoint null.String `json:"httpExporterEndpoint" envconfig:"K6_OTEL_HTTP_EXPORTER_ENDPOINT"`
+ // HTTPExporterURLPath sets the target URL path the OpenTelemetry Exporter
+ HTTPExporterURLPath null.String `json:"httpExporterURLPath" envconfig:"K6_OTEL_HTTP_EXPORTER_URL_PATH"`
+
+ // GRPCExporterEndpoint sets the target endpoint the OpenTelemetry Exporter
+ // will connect to.
+ GRPCExporterEndpoint null.String `json:"grpcExporterEndpoint" envconfig:"K6_OTEL_GRPC_EXPORTER_ENDPOINT"`
+ // GRPCExporterInsecure disables client transport security for the Exporter's gRPC
+ // connection.
+ GRPCExporterInsecure null.Bool `json:"grpcExporterInsecure" envconfig:"K6_OTEL_GRPC_EXPORTER_INSECURE"`
+}
+
+// GetConsolidatedConfig combines the options' values from the different sources
+// and returns the merged options. The Order of precedence used is documented
+// in the k6 Documentation https://grafana.com/docs/k6/latest/using-k6/k6-options/how-to/#order-of-precedence.
+func GetConsolidatedConfig(jsonRawConf json.RawMessage, env map[string]string) (Config, error) {
+ cfg := newDefaultConfig()
+ if jsonRawConf != nil {
+ jsonConf, err := parseJSON(jsonRawConf)
+ if err != nil {
+ return cfg, fmt.Errorf("parse JSON options failed: %w", err)
+ }
+ cfg = cfg.Apply(jsonConf)
+ }
+
+ if len(env) > 0 {
+ envConf, err := parseEnvs(env)
+ if err != nil {
+ return cfg, fmt.Errorf("parse environment variables options failed: %w", err)
+ }
+ cfg = cfg.Apply(envConf)
+ }
+
+ if err := cfg.Validate(); err != nil {
+ // TODO: check why k6's still exiting with 255
+ return cfg, errext.WithExitCodeIfNone(
+ fmt.Errorf("error validating OpenTelemetry output config: %w", err),
+ exitcodes.InvalidConfig,
+ )
+ }
+
+ return cfg, nil
+}
+
+// newDefaultConfig creates a new default config with default values
+func newDefaultConfig() Config {
+ return Config{
+ ServiceName: null.StringFrom("k6"),
+ ServiceVersion: null.StringFrom(k6Const.Version),
+ ExporterType: null.StringFrom(grpcExporterType),
+
+ HTTPExporterInsecure: null.BoolFrom(false),
+ HTTPExporterEndpoint: null.StringFrom("localhost:4318"),
+ HTTPExporterURLPath: null.StringFrom("/v1/metrics"),
+
+ GRPCExporterInsecure: null.BoolFrom(false),
+ GRPCExporterEndpoint: null.StringFrom("localhost:4317"),
+
+ ExportInterval: types.NullDurationFrom(10 * time.Second),
+ FlushInterval: types.NullDurationFrom(1 * time.Second),
+ }
+}
+
+// Apply applies the new config to the existing one
+func (cfg Config) Apply(v Config) Config {
+ if v.ServiceName.Valid {
+ cfg.ServiceName = v.ServiceName
+ }
+
+ if v.ServiceVersion.Valid {
+ cfg.ServiceVersion = v.ServiceVersion
+ }
+
+ if v.MetricPrefix.Valid {
+ cfg.MetricPrefix = v.MetricPrefix
+ }
+
+ if v.FlushInterval.Valid {
+ cfg.FlushInterval = v.FlushInterval
+ }
+
+ if v.ExporterType.Valid {
+ cfg.ExporterType = v.ExporterType
+ }
+
+ if v.ExportInterval.Valid {
+ cfg.ExportInterval = v.ExportInterval
+ }
+
+ if v.HTTPExporterInsecure.Valid {
+ cfg.HTTPExporterInsecure = v.HTTPExporterInsecure
+ }
+
+ if v.HTTPExporterEndpoint.Valid {
+ cfg.HTTPExporterEndpoint = v.HTTPExporterEndpoint
+ }
+
+ if v.HTTPExporterURLPath.Valid {
+ cfg.HTTPExporterURLPath = v.HTTPExporterURLPath
+ }
+
+ if v.GRPCExporterEndpoint.Valid {
+ cfg.GRPCExporterEndpoint = v.GRPCExporterEndpoint
+ }
+
+ if v.GRPCExporterInsecure.Valid {
+ cfg.GRPCExporterInsecure = v.GRPCExporterInsecure
+ }
+
+ if v.TLSInsecureSkipVerify.Valid {
+ cfg.TLSInsecureSkipVerify = v.TLSInsecureSkipVerify
+ }
+
+ if v.TLSCertificate.Valid {
+ cfg.TLSCertificate = v.TLSCertificate
+ }
+
+ if v.TLSClientCertificate.Valid {
+ cfg.TLSClientCertificate = v.TLSClientCertificate
+ }
+
+ if v.TLSClientKey.Valid {
+ cfg.TLSClientKey = v.TLSClientKey
+ }
+
+ if v.Headers.Valid {
+ cfg.Headers = v.Headers
+ }
+
+ return cfg
+}
+
+// Validate validates the config
+func (cfg Config) Validate() error {
+ if cfg.ServiceName.String == "" {
+ return errors.New("providing service name is required")
+ }
+
+ // TODO: it's not actually required, but we should probably have a default
+ // check if it works without it
+ if cfg.ServiceVersion.String == "" {
+ return errors.New("providing service version is required")
+ }
+
+ if cfg.ExporterType.String != grpcExporterType && cfg.ExporterType.String != httpExporterType {
+ return fmt.Errorf(
+ "unsupported exporter type %q, currently only %q and %q are supported",
+ cfg.ExporterType.String,
+ grpcExporterType,
+ httpExporterType,
+ )
+ }
+
+ if cfg.ExporterType.String == grpcExporterType {
+ if cfg.GRPCExporterEndpoint.String == "" {
+ return errors.New("gRPC exporter endpoint is required")
+ }
+ }
+
+ if cfg.ExporterType.String == httpExporterType {
+ if cfg.HTTPExporterEndpoint.String == "" {
+ return errors.New("HTTP exporter endpoint is required")
+ }
+ }
+
+ return nil
+}
+
+// String returns a string representation of the config
+func (cfg Config) String() string {
+ var endpoint string
+ exporter := cfg.ExporterType.String
+
+ if cfg.ExporterType.String == httpExporterType {
+ endpoint = "http"
+ if !cfg.HTTPExporterInsecure.Bool {
+ endpoint += "s"
+ }
+
+ endpoint += "://" + cfg.HTTPExporterEndpoint.String + cfg.HTTPExporterURLPath.String
+ } else {
+ endpoint = cfg.GRPCExporterEndpoint.String
+
+ if cfg.GRPCExporterInsecure.Bool {
+ exporter += " (insecure)"
+ }
+ }
+
+ return fmt.Sprintf("%s, %s", exporter, endpoint)
+}
+
+// parseJSON parses the supplied JSON into a Config.
+func parseJSON(data json.RawMessage) (Config, error) {
+ var c Config
+ err := json.Unmarshal(data, &c)
+ return c, err
+}
+
+// parseEnvs parses the supplied environment variables into a Config.
+func parseEnvs(env map[string]string) (Config, error) {
+ cfg := Config{}
+
+ err := envconfig.Process("K6_OTEL_", &cfg, func(key string) (string, bool) {
+ v, ok := env[key]
+ return v, ok
+ })
+
+ return cfg, err
+}
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/exporter.go b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/exporter.go
new file mode 100644
index 00000000000..877994193d2
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/exporter.go
@@ -0,0 +1,127 @@
+package opentelemetry
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "google.golang.org/grpc/credentials"
+)
+
+func getExporter(cfg Config) (metric.Exporter, error) {
+ // at the point of writing this code
+ // ctx isn't used at any point in the exporter
+ // later on, it could be used for the connection timeout
+ ctx := context.Background()
+
+ tlsConfig, err := buildTLSConfig(
+ cfg.TLSInsecureSkipVerify,
+ cfg.TLSCertificate,
+ cfg.TLSClientCertificate,
+ cfg.TLSClientKey,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ var headers map[string]string
+ if cfg.Headers.Valid {
+ headers, err = parseHeaders(cfg.Headers.String)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse headers: %w", err)
+ }
+ }
+
+ exporterType := cfg.ExporterType.String
+
+ if exporterType == grpcExporterType {
+ return buildGRPCExporter(ctx, cfg, tlsConfig, headers)
+ }
+
+ if exporterType == httpExporterType {
+ return buildHTTPExporter(ctx, cfg, tlsConfig, headers)
+ }
+
+ return nil, errors.New("unsupported exporter type " + exporterType + " specified")
+}
+
+func buildHTTPExporter(
+ ctx context.Context,
+ cfg Config,
+ tlsConfig *tls.Config,
+ headers map[string]string,
+) (metric.Exporter, error) {
+ opts := []otlpmetrichttp.Option{
+ otlpmetrichttp.WithEndpoint(cfg.HTTPExporterEndpoint.String),
+ otlpmetrichttp.WithURLPath(cfg.HTTPExporterURLPath.String),
+ }
+
+ if cfg.HTTPExporterInsecure.Bool {
+ opts = append(opts, otlpmetrichttp.WithInsecure())
+ }
+
+ if len(headers) > 0 {
+ opts = append(opts, otlpmetrichttp.WithHeaders(headers))
+ }
+
+ if tlsConfig != nil {
+ opts = append(opts, otlpmetrichttp.WithTLSClientConfig(tlsConfig))
+ }
+
+ return otlpmetrichttp.New(ctx, opts...)
+}
+
+func buildGRPCExporter(
+ ctx context.Context,
+ cfg Config,
+ tlsConfig *tls.Config,
+ headers map[string]string,
+) (metric.Exporter, error) {
+ opt := []otlpmetricgrpc.Option{
+ otlpmetricgrpc.WithEndpoint(cfg.GRPCExporterEndpoint.String),
+ }
+
+ if cfg.GRPCExporterInsecure.Bool {
+ opt = append(opt, otlpmetricgrpc.WithInsecure())
+ }
+
+ if len(headers) > 0 {
+ opt = append(opt, otlpmetricgrpc.WithHeaders(headers))
+ }
+
+ if tlsConfig != nil {
+ opt = append(opt, otlpmetricgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig)))
+ }
+
+ return otlpmetricgrpc.New(ctx, opt...)
+}
+
+func parseHeaders(raw string) (map[string]string, error) {
+ headers := make(map[string]string)
+ for _, header := range strings.Split(raw, ",") {
+ rawKey, rawValue, ok := strings.Cut(header, "=")
+ if !ok {
+ return nil, fmt.Errorf("invalid header %q, expected format key=value", header)
+ }
+
+ key, err := url.PathUnescape(rawKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unescape header key %q: %w", rawKey, err)
+ }
+
+ value, err := url.PathUnescape(rawValue)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unescape header value %q: %w", rawValue, err)
+ }
+
+ headers[key] = value
+ }
+
+ return headers, nil
+}
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/gauge.go b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/gauge.go
new file mode 100644
index 00000000000..7dc60e75f02
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/gauge.go
@@ -0,0 +1,67 @@
+package opentelemetry
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+)
+
+// NewFloat64Gauge returns a new Float64Gauge.
+func NewFloat64Gauge() *Float64Gauge {
+ return &Float64Gauge{}
+}
+
+// Float64Gauge is a temporary implementation of the OpenTelemetry sync gauge
+// it will be replaced by the official implementation once it's available.
+//
+// https://github.com/open-telemetry/opentelemetry-go/issues/3984
+type Float64Gauge struct {
+ observations sync.Map
+}
+
+// Callback implements the callback function for the underlying asynchronous gauge
+// it observes the current state of all previous Set() calls.
+func (f *Float64Gauge) Callback(_ context.Context, o metric.Float64Observer) error {
+ var err error
+
+ f.observations.Range(func(key, value interface{}) bool {
+ var v float64
+
+ // TODO: improve type assertion
+ switch val := value.(type) {
+ case float64:
+ v = val
+ case int64:
+ v = float64(val)
+ default:
+ err = errors.New("unexpected type for value " + fmt.Sprintf("%T", val))
+ return false
+ }
+
+ attrs, ok := key.(attribute.Set)
+ if !ok {
+ err = errors.New("unexpected type for key")
+ return false
+ }
+
+ o.Observe(v, metric.WithAttributeSet(attrs))
+
+ return true
+ })
+
+ return err
+}
+
+// Set sets the value of the gauge.
+func (f *Float64Gauge) Set(val float64, attrs attribute.Set) {
+ f.observations.Store(attrs, val)
+}
+
+// Delete deletes the gauge.
+func (f *Float64Gauge) Delete(attrs attribute.Set) {
+ f.observations.Delete(attrs)
+}
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/output.go b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/output.go
new file mode 100644
index 00000000000..08debd5ba01
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/output.go
@@ -0,0 +1,205 @@
+// Package opentelemetry performs output operations for the opentelemetry extension
+package opentelemetry
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ otelMetric "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+ "go.k6.io/k6/metrics"
+ "go.k6.io/k6/output"
+)
+
+// Output implements the lib.Output interface
+type Output struct {
+ output.SampleBuffer
+
+ config Config
+ periodicFlusher *output.PeriodicFlusher
+ logger logrus.FieldLogger
+
+ meterProvider *metric.MeterProvider
+ metricsRegistry *registry
+}
+
+var _ output.WithStopWithTestError = new(Output)
+
+// New creates an instance of the collector
+func New(p output.Params) (*Output, error) {
+ conf, err := GetConsolidatedConfig(p.JSONConfig, p.Environment)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Output{
+ config: conf,
+ logger: p.Logger,
+ }, nil
+}
+
+// Description returns a human-readable description of the output that will be shown in `k6 run`
+func (o *Output) Description() string {
+ return fmt.Sprintf("opentelemetry (%s)", o.config)
+}
+
+// StopWithTestError flushes all remaining metrics and finalizes the test run
+func (o *Output) StopWithTestError(_ error) error {
+ o.logger.Debug("Stopping...")
+ defer o.logger.Debug("Stopped!")
+
+ if err := o.meterProvider.Shutdown(context.Background()); err != nil {
+ o.logger.WithError(err).Error("can't shutdown OpenTelemetry metric provider")
+ }
+
+ o.periodicFlusher.Stop()
+
+ return nil
+}
+
+// Stop just implements an old interface (output.Output)
+func (o *Output) Stop() error {
+ return o.StopWithTestError(nil)
+}
+
+// Start performs initialization tasks prior to Engine using the output
+func (o *Output) Start() error {
+ o.logger.Debug("Starting output...")
+
+ exp, err := getExporter(o.config)
+ if err != nil {
+ return fmt.Errorf("failed to create OpenTelemetry exporter: %w", err)
+ }
+
+ res, err := resource.Merge(resource.Default(),
+ resource.NewWithAttributes(semconv.SchemaURL,
+ semconv.ServiceName(o.config.ServiceName.String),
+ semconv.ServiceVersion(o.config.ServiceVersion.String),
+ ))
+ if err != nil {
+ return fmt.Errorf("failed to create OpenTelemetry resource: %w", err)
+ }
+
+ meterProvider := metric.NewMeterProvider(
+ metric.WithResource(res),
+ metric.WithReader(
+ metric.NewPeriodicReader(
+ exp,
+ metric.WithInterval(o.config.ExportInterval.TimeDuration()),
+ ),
+ ),
+ )
+
+ pf, err := output.NewPeriodicFlusher(o.config.FlushInterval.TimeDuration(), o.flushMetrics)
+ if err != nil {
+ return err
+ }
+
+ o.logger.Debug("Started!")
+ o.periodicFlusher = pf
+ o.meterProvider = meterProvider
+ o.metricsRegistry = newRegistry(meterProvider.Meter("k6"), o.logger)
+
+ return nil
+}
+
+func (o *Output) flushMetrics() {
+ samples := o.GetBufferedSamples()
+ start := time.Now()
+ var count, errCount int
+ for _, sc := range samples {
+ samples := sc.GetSamples()
+
+ for _, sample := range samples {
+ if err := o.dispatch(sample); err != nil {
+ o.logger.WithError(err).Error("Error dispatching sample")
+ errCount++
+
+ continue
+ }
+ count++
+ }
+ }
+
+ if count > 0 {
+ o.logger.
+ WithField("t", time.Since(start)).
+ WithField("count", count).
+ Debug("registered metrics in OpenTelemetry metric provider")
+ }
+
+ if errCount > 0 {
+ o.logger.
+ WithField("t", time.Since(start)).
+ WithField("count", errCount).
+ Warn("can't flush some metrics")
+ }
+}
+
+func (o *Output) dispatch(entry metrics.Sample) error {
+ ctx := context.Background()
+ name := normalizeMetricName(o.config, entry.Metric.Name)
+
+ attributeSet := newAttributeSet(entry.Tags)
+ attributeSetOpt := otelMetric.WithAttributeSet(attributeSet)
+
+ unit := normalizeUnit(entry.Metric.Contains)
+
+ switch entry.Metric.Type {
+ case metrics.Counter:
+ counter, err := o.metricsRegistry.getOrCreateCounter(name, unit)
+ if err != nil {
+ return err
+ }
+
+ counter.Add(ctx, entry.Value, attributeSetOpt)
+ case metrics.Gauge:
+ gauge, err := o.metricsRegistry.getOrCreateGauge(name, unit)
+ if err != nil {
+ return err
+ }
+
+ gauge.Set(entry.Value, attributeSet)
+ case metrics.Trend:
+ trend, err := o.metricsRegistry.getOrCreateHistogram(name, unit)
+ if err != nil {
+ return err
+ }
+
+ trend.Record(ctx, entry.Value, attributeSetOpt)
+ case metrics.Rate:
+ nonZero, total, err := o.metricsRegistry.getOrCreateCountersForRate(name)
+ if err != nil {
+ return err
+ }
+
+ if entry.Value != 0 {
+ nonZero.Add(ctx, 1, attributeSetOpt)
+ }
+ total.Add(ctx, 1, attributeSetOpt)
+ default:
+ o.logger.Warnf("metric %q has unsupported metric type", entry.Metric.Name)
+ }
+
+ return nil
+}
+
+func normalizeMetricName(cfg Config, name string) string {
+ return cfg.MetricPrefix.String + name
+}
+
+func normalizeUnit(vt metrics.ValueType) string {
+ switch vt {
+ case metrics.Time:
+ return "ms"
+ case metrics.Data:
+ return "By"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/registry.go b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/registry.go
new file mode 100644
index 00000000000..d31217d8f8d
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/registry.go
@@ -0,0 +1,156 @@
+package opentelemetry
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/sirupsen/logrus"
+ otelMetric "go.opentelemetry.io/otel/metric"
+)
+
+// registry keep track of all metrics that have been used.
+type registry struct {
+ meter otelMetric.Meter
+ logger logrus.FieldLogger
+
+ counters sync.Map
+ gauges sync.Map
+ histograms sync.Map
+ rateCounters sync.Map
+}
+
+// newRegistry creates a new registry.
+func newRegistry(meter otelMetric.Meter, logger logrus.FieldLogger) *registry {
+ return ®istry{
+ meter: meter,
+ logger: logger,
+ }
+}
+
+func (r *registry) getOrCreateCounter(name, unit string) (otelMetric.Float64Counter, error) {
+ if counter, ok := r.counters.Load(name); ok {
+ if v, ok := counter.(otelMetric.Float64Counter); ok {
+ return v, nil
+ }
+
+ return nil, fmt.Errorf("metric %q is not a counter", name)
+ }
+
+ opts := []otelMetric.Float64CounterOption{}
+ if unit != "" {
+ opts = append(opts, otelMetric.WithUnit(unit))
+ }
+
+ c, err := r.meter.Float64Counter(name, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create counter for %q: %w", name, err)
+ }
+
+ r.logger.Debugf("registered counter metric %q", name)
+
+ r.counters.Store(name, c)
+ return c, nil
+}
+
+func (r *registry) getOrCreateHistogram(name, unit string) (otelMetric.Float64Histogram, error) {
+ if histogram, ok := r.histograms.Load(name); ok {
+ if v, ok := histogram.(otelMetric.Float64Histogram); ok {
+ return v, nil
+ }
+
+ return nil, fmt.Errorf("metric %q is not a histogram", name)
+ }
+
+ opts := []otelMetric.Float64HistogramOption{}
+ if unit != "" {
+ opts = append(opts, otelMetric.WithUnit(unit))
+ }
+
+ h, err := r.meter.Float64Histogram(name, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create histogram for %q: %w", name, err)
+ }
+
+ r.logger.Debugf("registered histogram metric %q", name)
+
+ r.histograms.Store(name, h)
+ return h, nil
+}
+
+func (r *registry) getOrCreateCountersForRate(name string) (otelMetric.Int64Counter, otelMetric.Int64Counter, error) {
+ // k6's rate metric tracks how frequently a non-zero value occurs.
+ // so to correctly calculate the rate in a metrics backend
+ // we need to split the rate metric into two counters:
+ // 2. number of non-zero occurrences
+ // 1. the total number of occurrences
+
+ nonZeroName := name + ".occurred"
+ totalName := name + ".total"
+
+ var err error
+ var nonZeroCounter, totalCounter otelMetric.Int64Counter
+
+ storedNonZeroCounter, ok := r.rateCounters.Load(nonZeroName)
+ if !ok {
+ nonZeroCounter, err = r.meter.Int64Counter(nonZeroName)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create counter for %q: %w", nonZeroName, err)
+ }
+
+ r.rateCounters.Store(nonZeroName, nonZeroCounter)
+ r.logger.Debugf("registered counter metric %q", nonZeroName)
+ } else {
+ nonZeroCounter, ok = storedNonZeroCounter.(otelMetric.Int64Counter)
+ if !ok {
+ return nil, nil, fmt.Errorf("metric %q stored not as counter", nonZeroName)
+ }
+ }
+
+ storedTotalCounter, ok := r.rateCounters.Load(totalName)
+ if !ok {
+ totalCounter, err = r.meter.Int64Counter(totalName)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create counter for %q: %w", totalName, err)
+ }
+
+ r.rateCounters.Store(totalName, totalCounter)
+ r.logger.Debugf("registered counter metric %q", totalName)
+ } else {
+ totalCounter, ok = storedTotalCounter.(otelMetric.Int64Counter)
+ if !ok {
+ return nil, nil, fmt.Errorf("metric %q stored not as counter", totalName)
+ }
+ }
+
+ return nonZeroCounter, totalCounter, nil
+}
+
+func (r *registry) getOrCreateGauge(name, unit string) (*Float64Gauge, error) {
+ if gauge, ok := r.gauges.Load(name); ok {
+ if v, ok := gauge.(*Float64Gauge); ok {
+ return v, nil
+ }
+
+ return nil, fmt.Errorf("metric %q is not a gauge", name)
+ }
+
+ g := NewFloat64Gauge()
+
+ opts := []otelMetric.Float64ObservableGaugeOption{
+ otelMetric.WithFloat64Callback(g.Callback),
+ }
+
+ if unit != "" {
+ opts = append(opts, otelMetric.WithUnit(unit))
+ }
+
+ _, err := r.meter.Float64ObservableGauge(name, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create gauge for %q: %w", name, err)
+ }
+
+ r.logger.Debugf("registered gauge metric %q ", name)
+
+ r.gauges.Store(name, g)
+ return g, nil
+}
diff --git a/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/tls.go b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/tls.go
new file mode 100644
index 00000000000..cff39c4908b
--- /dev/null
+++ b/vendor/github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry/tls.go
@@ -0,0 +1,57 @@
+package opentelemetry
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "os"
+
+ "gopkg.in/guregu/null.v3"
+)
+
+func buildTLSConfig(
+ insecureSkipVerify null.Bool,
+ certPath, clientCertPath, clientKeyPath null.String,
+) (*tls.Config, error) {
+ set := false
+ tlsConfig := &tls.Config{} //nolint:gosec // it's up to the caller
+
+ if insecureSkipVerify.Valid {
+ tlsConfig.InsecureSkipVerify = insecureSkipVerify.Bool
+ set = true
+ }
+
+ // Load the root certificate
+ if certPath.Valid {
+ b, err := os.ReadFile(certPath.String) //nolint:forbidigo
+ if err != nil {
+ return nil, fmt.Errorf("failed to read root certificate from %q: %w", certPath.String, err)
+ }
+
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(b); !ok {
+ return nil, errors.New("failed to append root certificate to the pool")
+ }
+
+ tlsConfig.RootCAs = cp
+ set = true
+ }
+
+ // Load the client certificate
+ if clientCertPath.Valid {
+ cert, err := tls.LoadX509KeyPair(clientCertPath.String, clientKeyPath.String)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load client certificate: %w", err)
+ }
+
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ set = true
+ }
+
+ if !set {
+ return nil, nil //nolint:nilnil // no TLS config set
+ }
+
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
index 19d9d37fff9..9005d6a0bf4 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -41,7 +41,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
m, ok := item.node.(map[string]interface{})
switch {
- case ok:
+ case ok && len(m) > 0:
// if the item is an object, then enqueue all of its children
for k, v := range m {
if item.msg == nil {
@@ -96,6 +96,8 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
queue = append(queue, child)
}
}
+ case ok && len(m) == 0:
+ fallthrough
case len(item.path) > 0:
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
index 628e1fde1cb..ed9a7e4387d 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -341,13 +341,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
- r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
return
}
+ r.Method = strings.ToUpper(override)
}
var pathComponents []string
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
index d01933c4fd2..fe634174b85 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -51,11 +51,13 @@ func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *u
key = match[1]
values = append([]string{match[2]}, values...)
}
- fieldPath := strings.Split(key, ".")
+
+ msgValue := msg.ProtoReflect()
+ fieldPath := normalizeFieldPath(msgValue, strings.Split(key, "."))
if filter.HasCommonPrefix(fieldPath) {
continue
}
- if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
+ if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil {
return err
}
}
@@ -68,6 +70,38 @@ func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value stri
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
}
+func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string {
+ newFieldPath := make([]string, 0, len(fieldPath))
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+ fieldDesc := fields.ByTextName(fieldName)
+ if fieldDesc == nil {
+ fieldDesc = fields.ByJSONName(fieldName)
+ }
+ if fieldDesc == nil {
+ // return initial field path values if no matching message field was found
+ return fieldPath
+ }
+
+ newFieldPath = append(newFieldPath, string(fieldDesc.Name()))
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated {
+ return fieldPath
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Get(fieldDesc).Message()
+ }
+
+ return newFieldPath
+}
+
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
if len(fieldPath) < 1 {
return errors.New("no field path")
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
new file mode 100644
index 00000000000..16f9af12b66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
@@ -0,0 +1,211 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+type client struct {
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // ourConn keeps track of where conn was created: true if created here in
+ // NewClient, or false if passed with an option. This is important on
+ // Shutdown as the conn should only be closed if we created it. Otherwise,
+ // it is up to the processes that passed the conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ msc colmetricpb.MetricsServiceClient
+}
+
+// newClient creates a new gRPC metric client.
+func newClient(ctx context.Context, cfg oconf.Config) (*client, error) {
+ c := &client{
+ exportTimeout: cfg.Metrics.Timeout,
+ requestFunc: cfg.RetryConfig.RequestFunc(retryable),
+ conn: cfg.GRPCConn,
+ }
+
+ if len(cfg.Metrics.Headers) > 0 {
+ c.metadata = metadata.New(cfg.Metrics.Headers)
+ }
+
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the client was
+ // created, create one using the configuration they did provide.
+ userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version()
+ dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
+ dialOpts = append(dialOpts, cfg.DialOptions...)
+
+ conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...)
+ if err != nil {
+ return nil, err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
+
+ return c, nil
+}
+
+// Shutdown shuts down the client, freeing all resource.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+func (c *client) Shutdown(ctx context.Context) error {
+ // The otlpmetric.Exporter synchronizes access to client methods and
+ // ensures this is called only once. The only thing that needs to be done
+ // here is to release any computational resources the client holds.
+
+ c.metadata = nil
+ c.requestFunc = nil
+ c.msc = nil
+
+ err := ctx.Err()
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ c.conn = nil
+ return err
+}
+
+// UploadMetrics sends protoMetrics to connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+ // The otlpmetric.Exporter synchronizes access to client methods, and
+ // ensures this is not called after the Exporter is shutdown. Only thing
+ // to do here is send data.
+
+ select {
+ case <-ctx.Done():
+ // Do not upload if the context is already expired.
+ return ctx.Err()
+ default:
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
+ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedDataPoints()
+ if n != 0 || msg != "" {
+ err := internal.MetricPartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function based on the clients configured export timeout.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+ }
+
+ return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ // Additionally, handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns if the status is RetryInfo
+// and the duration to wait for if an explicit throttle time is included.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return true, t.RetryDelay.AsDuration()
+ }
+ }
+ return false, 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
new file mode 100644
index 00000000000..9269f5a01b7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
@@ -0,0 +1,273 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Option applies a configuration option to the Exporter.
+type Option interface {
+ applyGRPCOption(oconf.Config) oconf.Config
+}
+
+func asGRPCOptions(opts []Option) []oconf.GRPCOption {
+ converted := make([]oconf.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying the export of metric data
+// that failed.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ oconf.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
+ return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the Exporter's gRPC
+// connection, just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return wrappedOption{oconf.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{oconf.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{oconf.WithEndpointURL(u)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.ReconnectionPeriod = rp
+ return cfg
+ })}
+}
+
+func compressorToCompression(compressor string) oconf.Compression {
+ if compressor == "gzip" {
+ return oconf.GzipCompression
+ }
+
+ otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+ return oconf.NoCompression
+}
+
+// WithCompressor sets the compressor the gRPC client uses.
+// Supported compressor values: "gzip".
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compressor will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+ return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{oconf.WithHeaders(headers)}
+}
+
+// WithTLSCredentials sets the gRPC connection to use creds.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no TLS credentials will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.Metrics.GRPCCredentials = creds
+ return cfg
+ })}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.ServiceConfig = serviceConfig
+ return cfg
+ })}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when establishing a
+// gRPC connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.DialOptions = opts
+ return cfg
+ })}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The Exporter
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.GRPCConn = conn
+ return cfg
+ })}
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the metric
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{oconf.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+ return wrappedOption{oconf.WithRetry(retry.Config(settings))}
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the client will use to
+// determine the Temporality of an instrument based on its kind. If this option
+// is not used, the client will use the DefaultTemporalitySelector from the
+// go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+ return wrappedOption{oconf.WithTemporalitySelector(selector)}
+}
+
+// WithAggregationSelector sets the AggregationSelector the client will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
+// explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+ return wrappedOption{oconf.WithAggregationSelector(selector)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
new file mode 100644
index 00000000000..48e63689e00
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT.
+OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+the filepath to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+ - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+ - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+ - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+ - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+ - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
new file mode 100644
index 00000000000..826276ba392
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
@@ -0,0 +1,167 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+// Exporter is a OpenTelemetry metric Exporter using gRPC.
+type Exporter struct {
+ // Ensure synchronous access to the client across all functionality.
+ clientMu sync.Mutex
+ client interface {
+ UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
+ Shutdown(context.Context) error
+ }
+
+ temporalitySelector metric.TemporalitySelector
+ aggregationSelector metric.AggregationSelector
+
+ shutdownOnce sync.Once
+}
+
+func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
+ ts := cfg.Metrics.TemporalitySelector
+ if ts == nil {
+ ts = func(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+ }
+ }
+
+ as := cfg.Metrics.AggregationSelector
+ if as == nil {
+ as = metric.DefaultAggregationSelector
+ }
+
+ return &Exporter{
+ client: c,
+
+ temporalitySelector: ts,
+ aggregationSelector: as,
+ }, nil
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.temporalitySelector(k)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.aggregationSelector(k)
+}
+
+// Export transforms and transmits metric data to an OTLP receiver.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ defer global.Debug("OTLP/gRPC exporter export", "Data", rm)
+
+ otlpRm, err := transform.ResourceMetrics(rm)
+ // Best effort upload of transformable metrics.
+ e.clientMu.Lock()
+ upErr := e.client.UploadMetrics(ctx, otlpRm)
+ e.clientMu.Unlock()
+ if upErr != nil {
+ if err == nil {
+ return fmt.Errorf("failed to upload metrics: %w", upErr)
+ }
+ // Merge the two errors.
+ return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
+ }
+ return err
+}
+
+// ForceFlush flushes any metric data held by an exporter.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+ // The exporter and client hold no state, nothing to flush.
+ return ctx.Err()
+}
+
+// Shutdown flushes all metric data held by an exporter and releases any held
+// computational resources.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ e.clientMu.Lock()
+ client := e.client
+ e.client = shutdownClient{}
+ e.clientMu.Unlock()
+ err = client.Shutdown(ctx)
+ })
+ return err
+}
+
+var errShutdown = fmt.Errorf("gRPC exporter is shutdown")
+
+type shutdownClient struct{}
+
+func (c shutdownClient) err(ctx context.Context) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return errShutdown
+}
+
+func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
+ return c.err(ctx)
+}
+
+func (c shutdownClient) Shutdown(ctx context.Context) error {
+ return c.err(ctx)
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct{ Type string }{Type: "OTLP/gRPC"}
+}
+
+// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
+// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
+// endpoint using gRPC.
+//
+// If an already established gRPC ClientConn is not passed in options using
+// WithGRPCConn, a connection to the OTLP endpoint will be established based
+// on options. If a connection cannot be establishes in the lifetime of ctx,
+// an error will be returned.
+func New(ctx context.Context, options ...Option) (*Exporter, error) {
+ cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
+ c, err := newClient(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return newExporter(c, cfg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
new file mode 100644
index 00000000000..17951ceb451
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,202 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+ name, err := url.PathUnescape(n)
+ if err != nil {
+ global.Error(err, "escape header key", "key", n)
+ continue
+ }
+ trimmedName := strings.TrimSpace(name)
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
new file mode 100644
index 00000000000..06718efa898
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
new file mode 100644
index 00000000000..ae100513bad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
@@ -0,0 +1,221 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Metrics.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
+ withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+ )
+
+ return opts
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
+
+func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "cumulative":
+ fn(cumulativeTemporality)
+ case "delta":
+ fn(deltaTemporality)
+ case "lowmemory":
+ fn(lowMemory)
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
+
+func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "explicit_bucket_histogram":
+ fn(metric.DefaultAggregationSelector)
+ case "base2_exponential_bucket_histogram":
+ fn(func(kind metric.InstrumentKind) metric.Aggregation {
+ if kind == metric.InstrumentKindHistogram {
+ return metric.AggregationBase2ExponentialHistogram{
+ MaxSize: 160,
+ MaxScale: 20,
+ NoMinMax: false,
+ }
+ }
+ return metric.DefaultAggregationSelector(kind)
+ })
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
new file mode 100644
index 00000000000..e2d99dc22c1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
@@ -0,0 +1,373 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+const (
+ // DefaultMaxAttempts describes how many times the driver
+ // should retry the sending of the payload in case of a
+ // retryable error.
+ DefaultMaxAttempts int = 5
+ // DefaultMetricsPath is a default URL path for endpoint that
+ // receives metrics.
+ DefaultMetricsPath string = "/v1/metrics"
+ // DefaultBackoff is a default base backoff time used in the
+ // exponential backoff strategy.
+ DefaultBackoff time.Duration = 300 * time.Millisecond
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span or metrics batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ TemporalitySelector metric.TemporalitySelector
+ AggregationSelector metric.AggregationSelector
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Metrics SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Priroritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Metrics.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
+ } else if cfg.Metrics.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Metrics.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Metrics.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = endpoint
+ return cfg
+ })
+}
+
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlpmetric: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Metrics.Endpoint = u.Host
+ cfg.Metrics.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Metrics.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Timeout = duration
+ return cfg
+ })
+}
+
+func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.TemporalitySelector = selector
+ return cfg
+ })
+}
+
+func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.AggregationSelector = selector
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
new file mode 100644
index 00000000000..8a3c8422e1b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
@@ -0,0 +1,58 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import "time"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// RetrySettings defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type RetrySettings struct {
+ // Enabled indicates whether to not retry sending batches in case of export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
+ // consecutive retries will always be `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
+ // Once this value is reached, the data is discarded.
+ MaxElapsedTime time.Duration
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
new file mode 100644
index 00000000000..2e36e0b6f25
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
@@ -0,0 +1,49 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "os"
+)
+
+// ReadTLSConfigFromFile reads a PEM certificate file and creates
+// a tls.Config that will use this certifate to verify a server certificate.
+func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return CreateTLSConfig(b)
+}
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
new file mode 100644
index 00000000000..f4d48198251
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
@@ -0,0 +1,67 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
new file mode 100644
index 00000000000..689779c3604
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
@@ -0,0 +1,156 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %s", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
new file mode 100644
index 00000000000..e80798eebde
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
@@ -0,0 +1,155 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// AttrIter transforms an attribute iterator into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
new file mode 100644
index 00000000000..d5d2fdcb7ea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
@@ -0,0 +1,114 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+var (
+ errUnknownAggregation = errors.New("unknown aggregation")
+ errUnknownTemporality = errors.New("unknown temporality")
+)
+
+type errMetric struct {
+ m *mpb.Metric
+ err error
+}
+
+func (e errMetric) Unwrap() error {
+ return e.err
+}
+
+func (e errMetric) Error() string {
+ format := "invalid metric (name: %q, description: %q, unit: %q): %s"
+ return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
+}
+
+func (e errMetric) Is(target error) bool {
+ return errors.Is(e.err, target)
+}
+
+// multiErr is used by the data-type transform functions to wrap multiple
+// errors into a single return value. The error message will show all errors
+// as a list and scope them by the datatype name that is returning them.
+type multiErr struct {
+ datatype string
+ errs []error
+}
+
+// errOrNil returns nil if e contains no errors, otherwise it returns e.
+func (e *multiErr) errOrNil() error {
+ if len(e.errs) == 0 {
+ return nil
+ }
+ return e
+}
+
+// append adds err to e. If err is a multiErr, its errs are flattened into e.
+func (e *multiErr) append(err error) {
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there is a *multiErr several steps down the chain, all the errors above
+ // it will be discarded if errors.As is used instead.
+ switch other := err.(type) {
+ case *multiErr:
+ // Flatten err errors into e.
+ e.errs = append(e.errs, other.errs...)
+ default:
+ e.errs = append(e.errs, err)
+ }
+}
+
+func (e *multiErr) Error() string {
+ es := make([]string, len(e.errs))
+ for i, err := range e.errs {
+ es[i] = fmt.Sprintf("* %s", err)
+ }
+
+ format := "%d errors occurred transforming %s:\n\t%s"
+ return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
+}
+
+func (e *multiErr) Unwrap() error {
+ switch len(e.errs) {
+ case 0:
+ return nil
+ case 1:
+ return e.errs[0]
+ }
+
+ // Return a multiErr without the leading error.
+ cp := &multiErr{
+ datatype: e.datatype,
+ errs: make([]error, len(e.errs)-1),
+ }
+ copy(cp.errs, e.errs[1:])
+ return cp
+}
+
+func (e *multiErr) Is(target error) bool {
+ if len(e.errs) == 0 {
+ return false
+ }
+ // Check if the first error is target.
+ return errors.Is(e.errs[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
new file mode 100644
index 00000000000..80f03b4b420
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
@@ -0,0 +1,320 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transform provides transformation functionality from the
+// sdk/metric/metricdata data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+ rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
+// contains invalid ScopeMetrics, an error will be returned along with an OTLP
+// ResourceMetrics that contains partial OTLP ScopeMetrics.
+func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
+ sms, err := ScopeMetrics(rm.ScopeMetrics)
+ return &mpb.ResourceMetrics{
+ Resource: &rpb.Resource{
+ Attributes: AttrIter(rm.Resource.Iter()),
+ },
+ ScopeMetrics: sms,
+ SchemaUrl: rm.Resource.SchemaURL(),
+ }, err
+}
+
+// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
+// sms contains invalid metric values, an error will be returned along with a
+// slice that contains partial OTLP ScopeMetrics.
+func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
+ errs := &multiErr{datatype: "ScopeMetrics"}
+ out := make([]*mpb.ScopeMetrics, 0, len(sms))
+ for _, sm := range sms {
+ ms, err := Metrics(sm.Metrics)
+ if err != nil {
+ errs.append(err)
+ }
+
+ out = append(out, &mpb.ScopeMetrics{
+ Scope: &cpb.InstrumentationScope{
+ Name: sm.Scope.Name,
+ Version: sm.Scope.Version,
+ },
+ Metrics: ms,
+ SchemaUrl: sm.Scope.SchemaURL,
+ })
+ }
+ return out, errs.errOrNil()
+}
+
+// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
+// invalid metric values, an error will be returned along with a slice that
+// contains partial OTLP Metrics.
+func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
+ errs := &multiErr{datatype: "Metrics"}
+ out := make([]*mpb.Metric, 0, len(ms))
+ for _, m := range ms {
+ o, err := metric(m)
+ if err != nil {
+ // Do not include invalid data. Drop the metric, report the error.
+ errs.append(errMetric{m: o, err: err})
+ continue
+ }
+ out = append(out, o)
+ }
+ return out, errs.errOrNil()
+}
+
+func metric(m metricdata.Metrics) (*mpb.Metric, error) {
+ var err error
+ out := &mpb.Metric{
+ Name: m.Name,
+ Description: m.Description,
+ Unit: string(m.Unit),
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ out.Data = Gauge[int64](a)
+ case metricdata.Gauge[float64]:
+ out.Data = Gauge[float64](a)
+ case metricdata.Sum[int64]:
+ out.Data, err = Sum[int64](a)
+ case metricdata.Sum[float64]:
+ out.Data, err = Sum[float64](a)
+ case metricdata.Histogram[int64]:
+ out.Data, err = Histogram(a)
+ case metricdata.Histogram[float64]:
+ out.Data, err = Histogram(a)
+ case metricdata.ExponentialHistogram[int64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.ExponentialHistogram[float64]:
+ out.Data, err = ExponentialHistogram(a)
+ default:
+ return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
+ }
+ return out, err
+}
+
+// Gauge returns an OTLP Metric_Gauge generated from g.
+func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
+ return &mpb.Metric_Gauge{
+ Gauge: &mpb.Gauge{
+ DataPoints: DataPoints(g.DataPoints),
+ },
+ }
+}
+
+// Sum returns an OTLP Metric_Sum generated from s. An error is returned
+// if the temporality of s is unknown.
+func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
+ t, err := Temporality(s.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Sum{
+ Sum: &mpb.Sum{
+ AggregationTemporality: t,
+ IsMonotonic: s.IsMonotonic,
+ DataPoints: DataPoints(s.DataPoints),
+ },
+ }, nil
+}
+
+// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
+func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
+ out := make([]*mpb.NumberDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ ndp := &mpb.NumberDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ switch v := any(dPt.Value).(type) {
+ case int64:
+ ndp.Value = &mpb.NumberDataPoint_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ ndp.Value = &mpb.NumberDataPoint_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, ndp)
+ }
+ return out
+}
+
+// Histogram returns an OTLP Metric_Histogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Histogram{
+ Histogram: &mpb.Histogram{
+ AggregationTemporality: t,
+ DataPoints: HistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
+// from dPts.
+func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
+ out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ hdp := &mpb.HistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ BucketCounts: dPt.BucketCounts,
+ ExplicitBounds: dPt.Bounds,
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ hdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ hdp.Max = &vF64
+ }
+ out = append(out, hdp)
+ }
+ return out
+}
+
+// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_ExponentialHistogram{
+ ExponentialHistogram: &mpb.ExponentialHistogram{
+ AggregationTemporality: t,
+ DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
+// from dPts.
+func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+ out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ ehdp := &mpb.ExponentialHistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ Scale: dPt.Scale,
+ ZeroCount: dPt.ZeroCount,
+ Exemplars: Exemplars(dPt.Exemplars),
+
+ Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
+ Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Max = &vF64
+ }
+ out = append(out, ehdp)
+ }
+ return out
+}
+
+// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
+// from bucket.
+func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+ return &mpb.ExponentialHistogramDataPoint_Buckets{
+ Offset: bucket.Offset,
+ BucketCounts: bucket.Counts,
+ }
+}
+
+// Temporality returns an OTLP AggregationTemporality generated from t. If t
+// is unknown, an error is returned along with the invalid
+// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
+func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
+ switch t {
+ case metricdata.DeltaTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
+ case metricdata.CumulativeTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
+ default:
+ err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
+ }
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64.
+// The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64
+// (a date before the year 1678 or after 2262).
+// timeUnixNano on the zero Time returns 0.
+// The result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+ if t.IsZero() {
+ return 0
+ }
+ return uint64(t.UnixNano())
+}
+
+// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
+func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exemplar {
+ out := make([]*mpb.Exemplar, 0, len(exemplars))
+ for _, exemplar := range exemplars {
+ e := &mpb.Exemplar{
+ FilteredAttributes: KeyValues(exemplar.FilteredAttributes),
+ TimeUnixNano: timeUnixNano(exemplar.Time),
+ SpanId: exemplar.SpanID,
+ TraceId: exemplar.TraceID,
+ }
+ switch v := any(exemplar.Value).(type) {
+ case int64:
+ e.Value = &mpb.Exemplar_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ e.Value = &mpb.Exemplar_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, e)
+ }
+ return out
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
new file mode 100644
index 00000000000..a4ce4da3e3e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
+func Version() string {
+ return "1.24.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
new file mode 100644
index 00000000000..73463c91d5f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
@@ -0,0 +1,307 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+ colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+type client struct {
+ // req is cloned for every upload the client makes.
+ req *http.Request
+ compression Compression
+ requestFunc retry.RequestFunc
+ httpClient *http.Client
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by another package.
+var ourTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+}
+
+// newClient creates a new HTTP metric client.
+func newClient(cfg oconf.Config) (*client, error) {
+ httpClient := &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.Metrics.Timeout,
+ }
+ if cfg.Metrics.TLSCfg != nil {
+ transport := ourTransport.Clone()
+ transport.TLSClientConfig = cfg.Metrics.TLSCfg
+ httpClient.Transport = transport
+ }
+
+ u := &url.URL{
+ Scheme: "https",
+ Host: cfg.Metrics.Endpoint,
+ Path: cfg.Metrics.URLPath,
+ }
+ if cfg.Metrics.Insecure {
+ u.Scheme = "http"
+ }
+ // Body is set when this is cloned during upload.
+ req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+
+ userAgent := "OTel Go OTLP over HTTP/protobuf metrics exporter/" + Version()
+ req.Header.Set("User-Agent", userAgent)
+
+ if n := len(cfg.Metrics.Headers); n > 0 {
+ for k, v := range cfg.Metrics.Headers {
+ req.Header.Set(k, v)
+ }
+ }
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ return &client{
+ compression: Compression(cfg.Metrics.Compression),
+ req: req,
+ requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
+ httpClient: httpClient,
+ }, nil
+}
+
+// Shutdown shuts down the client, freeing all resources.
+func (c *client) Shutdown(ctx context.Context) error {
+ // The otlpmetric.Exporter synchronizes access to client methods and
+ // ensures this is called only once. The only thing that needs to be done
+ // here is to release any computational resources the client holds.
+
+ c.requestFunc = nil
+ c.httpClient = nil
+ return ctx.Err()
+}
+
+// UploadMetrics sends protoMetrics to the connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+ // The otlpmetric.Exporter synchronizes access to client methods, and
+ // ensures this is not called after the Exporter is shutdown. Only thing
+ // to do here is send data.
+
+ pbRequest := &colmetricpb.ExportMetricsServiceRequest{
+ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
+ }
+ body, err := proto.Marshal(pbRequest)
+ if err != nil {
+ return err
+ }
+ request, err := c.newRequest(ctx, body)
+ if err != nil {
+ return err
+ }
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ select {
+ case <-iCtx.Done():
+ return iCtx.Err()
+ default:
+ }
+
+ request.reset(iCtx)
+ resp, err := c.httpClient.Do(request.Request)
+ var urlErr *url.Error
+ if errors.As(err, &urlErr) && urlErr.Temporary() {
+ return newResponseError(http.Header{})
+ }
+ if err != nil {
+ return err
+ }
+
+ var rErr error
+ switch sc := resp.StatusCode; {
+ case sc >= 200 && sc <= 299:
+ // Success, do not retry.
+
+ // Read the partial success message, if any.
+ var respData bytes.Buffer
+ if _, err := io.Copy(&respData, resp.Body); err != nil {
+ return err
+ }
+ if respData.Len() == 0 {
+ return nil
+ }
+
+ if resp.Header.Get("Content-Type") == "application/x-protobuf" {
+ var respProto colmetricpb.ExportMetricsServiceResponse
+ if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+ return err
+ }
+
+ if respProto.PartialSuccess != nil {
+ msg := respProto.PartialSuccess.GetErrorMessage()
+ n := respProto.PartialSuccess.GetRejectedDataPoints()
+ if n != 0 || msg != "" {
+ err := internal.MetricPartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ }
+ return nil
+ case sc == http.StatusTooManyRequests,
+ sc == http.StatusBadGateway,
+ sc == http.StatusServiceUnavailable,
+ sc == http.StatusGatewayTimeout:
+ // Retry-able failure.
+ rErr = newResponseError(resp.Header)
+
+ // Going to retry, drain the body to reuse the connection.
+ if _, err := io.Copy(io.Discard, resp.Body); err != nil {
+ _ = resp.Body.Close()
+ return err
+ }
+ default:
+ rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status)
+ }
+
+ if err := resp.Body.Close(); err != nil {
+ return err
+ }
+ return rErr
+ })
+}
+
+var gzPool = sync.Pool{
+ New: func() interface{} {
+ w := gzip.NewWriter(io.Discard)
+ return w
+ },
+}
+
+func (c *client) newRequest(ctx context.Context, body []byte) (request, error) {
+ r := c.req.Clone(ctx)
+ req := request{Request: r}
+
+ switch c.compression {
+ case NoCompression:
+ r.ContentLength = (int64)(len(body))
+ req.bodyReader = bodyReader(body)
+ case GzipCompression:
+ // Ensure the content length is not used.
+ r.ContentLength = -1
+ r.Header.Set("Content-Encoding", "gzip")
+
+ gz := gzPool.Get().(*gzip.Writer)
+ defer gzPool.Put(gz)
+
+ var b bytes.Buffer
+ gz.Reset(&b)
+
+ if _, err := gz.Write(body); err != nil {
+ return req, err
+ }
+ // Close needs to be called to ensure body if fully written.
+ if err := gz.Close(); err != nil {
+ return req, err
+ }
+
+ req.bodyReader = bodyReader(b.Bytes())
+ }
+
+ return req, nil
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+ return func() io.ReadCloser {
+ return io.NopCloser(bytes.NewReader(buf))
+ }
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+ *http.Request
+
+ // bodyReader allows the same body to be used for multiple requests.
+ bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+ r.Body = r.bodyReader()
+ r.Request = r.Request.WithContext(ctx)
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+ throttle int64
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers.
+func newResponseError(header http.Header) error {
+ var rErr retryableError
+ if v := header.Get("Retry-After"); v != "" {
+ if t, err := strconv.ParseInt(v, 10, 64); err == nil {
+ rErr.throttle = t
+ }
+ }
+ return rErr
+}
+
+func (e retryableError) Error() string {
+ return "retry-able request failure"
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+ if err == nil {
+ return false, 0
+ }
+
+ rErr, ok := err.(retryableError)
+ if !ok {
+ return false, 0
+ }
+
+ return true, time.Duration(rErr.throttle)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
new file mode 100644
index 00000000000..5948e2d9d73
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
@@ -0,0 +1,219 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+ "crypto/tls"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression oconf.Compression
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression = Compression(oconf.NoCompression)
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression = Compression(oconf.GzipCompression)
+)
+
+// Option applies an option to the Exporter.
+type Option interface {
+ applyHTTPOption(oconf.Config) oconf.Config
+}
+
+func asHTTPOptions(opts []Option) []oconf.HTTPOption {
+ converted := make([]oconf.HTTPOption, len(opts))
+ for i, o := range opts {
+ converted[i] = oconf.NewHTTPOption(o.applyHTTPOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying the export of metric data
+// that failed.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ oconf.HTTPOption
+}
+
+func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config {
+ return w.ApplyHTTPOption(cfg)
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to. This
+// endpoint is specified as a host and optional port, no path or scheme should
+// be included (see WithInsecure and WithURLPath).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{oconf.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{oconf.WithEndpointURL(u)}
+}
+
+// WithCompression sets the compression strategy the Exporter will use to
+// compress the HTTP body.
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compression strategy will be used.
+func WithCompression(compression Compression) Option {
+ return wrappedOption{oconf.WithCompression(oconf.Compression(compression))}
+}
+
+// WithURLPath sets the URL path the Exporter will send requests to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, the path
+// contained in that variable value will be used. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "/v1/metrics" will be used.
+func WithURLPath(urlPath string) Option {
+ return wrappedOption{oconf.WithURLPath(urlPath)}
+}
+
+// WithTLSClientConfig sets the TLS configuration the Exporter will use for
+// HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, the system default configuration is used.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+ return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)}
+}
+
+// WithInsecure disables client transport security for the Exporter's HTTP
+// connection.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+func WithInsecure() Option {
+ return wrappedOption{oconf.WithInsecure()}
+}
+
+// WithHeaders will send the provided headers with each HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{oconf.WithHeaders(headers)}
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the metric
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{oconf.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+ return wrappedOption{oconf.WithRetry(retry.Config(rc))}
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the client will use to
+// determine the Temporality of an instrument based on its kind. If this option
+// is not used, the client will use the DefaultTemporalitySelector from the
+// go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+ return wrappedOption{oconf.WithTemporalitySelector(selector)}
+}
+
+// WithAggregationSelector sets the AggregationSelector the client will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
+// explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+ return wrappedOption{oconf.WithAggregationSelector(selector)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
new file mode 100644
index 00000000000..a707b5ebb71
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
@@ -0,0 +1,93 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlpmetrichttp provides an OTLP metrics exporter using HTTP with protobuf payloads.
+By default the telemetry is sent to https://localhost:4318/v1/metrics.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/metrics" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+environment variable and by [WithEndpoint], [WithEndpointURL], and [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4318/v1/metrics") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+compression strategy the exporter uses to compress the HTTP body.
+Supported values: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+filepath to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+ - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+ - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+ - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+ - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+ - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
new file mode 100644
index 00000000000..96991ede5c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
@@ -0,0 +1,162 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+// Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP.
+type Exporter struct {
+ // Ensure synchronous access to the client across all functionality.
+ clientMu sync.Mutex
+ client interface {
+ UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
+ Shutdown(context.Context) error
+ }
+
+ temporalitySelector metric.TemporalitySelector
+ aggregationSelector metric.AggregationSelector
+
+ shutdownOnce sync.Once
+}
+
+func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
+ ts := cfg.Metrics.TemporalitySelector
+ if ts == nil {
+ ts = func(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+ }
+ }
+
+ as := cfg.Metrics.AggregationSelector
+ if as == nil {
+ as = metric.DefaultAggregationSelector
+ }
+
+ return &Exporter{
+ client: c,
+
+ temporalitySelector: ts,
+ aggregationSelector: as,
+ }, nil
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.temporalitySelector(k)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.aggregationSelector(k)
+}
+
+// Export transforms and transmits metric data to an OTLP receiver.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ defer global.Debug("OTLP/HTTP exporter export", "Data", rm)
+
+ otlpRm, err := transform.ResourceMetrics(rm)
+ // Best effort upload of transformable metrics.
+ e.clientMu.Lock()
+ upErr := e.client.UploadMetrics(ctx, otlpRm)
+ e.clientMu.Unlock()
+ if upErr != nil {
+ if err == nil {
+ return fmt.Errorf("failed to upload metrics: %w", upErr)
+ }
+ // Merge the two errors.
+ return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
+ }
+ return err
+}
+
+// ForceFlush flushes any metric data held by an exporter.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+ // The exporter and client hold no state, nothing to flush.
+ return ctx.Err()
+}
+
+// Shutdown flushes all metric data held by an exporter and releases any held
+// computational resources.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ e.clientMu.Lock()
+ client := e.client
+ e.client = shutdownClient{}
+ e.clientMu.Unlock()
+ err = client.Shutdown(ctx)
+ })
+ return err
+}
+
+var errShutdown = fmt.Errorf("HTTP exporter is shutdown")
+
+type shutdownClient struct{}
+
+func (c shutdownClient) err(ctx context.Context) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return errShutdown
+}
+
+func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
+ return c.err(ctx)
+}
+
+func (c shutdownClient) Shutdown(ctx context.Context) error {
+ return c.err(ctx)
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct{ Type string }{Type: "OTLP/HTTP"}
+}
+
+// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
+// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
+// endpoint using protobufs over HTTP.
+func New(_ context.Context, opts ...Option) (*Exporter, error) {
+ cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...)
+ c, err := newClient(cfg)
+ if err != nil {
+ return nil, err
+ }
+ return newExporter(c, cfg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
new file mode 100644
index 00000000000..9dfb55c41bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
@@ -0,0 +1,202 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+ name, err := url.PathUnescape(n)
+ if err != nil {
+ global.Error(err, "escape header key", "key", n)
+ continue
+ }
+ trimmedName := strings.TrimSpace(name)
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
new file mode 100644
index 00000000000..002c8b36b2e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry\"}" --out=oconf/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf\"}" --out=otest/collector.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
new file mode 100644
index 00000000000..2bab35be6c4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
@@ -0,0 +1,221 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Metrics.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
+ withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+ )
+
+ return opts
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
+
+func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "cumulative":
+ fn(cumulativeTemporality)
+ case "delta":
+ fn(deltaTemporality)
+ case "lowmemory":
+ fn(lowMemory)
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
+
+func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "explicit_bucket_histogram":
+ fn(metric.DefaultAggregationSelector)
+ case "base2_exponential_bucket_histogram":
+ fn(func(kind metric.InstrumentKind) metric.Aggregation {
+ if kind == metric.InstrumentKindHistogram {
+ return metric.AggregationBase2ExponentialHistogram{
+ MaxSize: 160,
+ MaxScale: 20,
+ NoMinMax: false,
+ }
+ }
+ return metric.DefaultAggregationSelector(kind)
+ })
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
new file mode 100644
index 00000000000..253539d7a3c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
@@ -0,0 +1,373 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+const (
+ // DefaultMaxAttempts describes how many times the driver
+ // should retry the sending of the payload in case of a
+ // retryable error.
+ DefaultMaxAttempts int = 5
+ // DefaultMetricsPath is a default URL path for endpoint that
+ // receives metrics.
+ DefaultMetricsPath string = "/v1/metrics"
+ // DefaultBackoff is a default base backoff time used in the
+ // exponential backoff strategy.
+ DefaultBackoff time.Duration = 300 * time.Millisecond
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span or metrics batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ TemporalitySelector metric.TemporalitySelector
+ AggregationSelector metric.AggregationSelector
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Metrics SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Priroritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Metrics.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
+ } else if cfg.Metrics.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Metrics.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Metrics.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = endpoint
+ return cfg
+ })
+}
+
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlpmetric: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Metrics.Endpoint = u.Host
+ cfg.Metrics.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Metrics.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Timeout = duration
+ return cfg
+ })
+}
+
+func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.TemporalitySelector = selector
+ return cfg
+ })
+}
+
+func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.AggregationSelector = selector
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
new file mode 100644
index 00000000000..f8805e31d62
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
@@ -0,0 +1,58 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import "time"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// RetrySettings defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type RetrySettings struct {
+ // Enabled indicates whether to not retry sending batches in case of export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
+ // consecutive retries will always be `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
+ // Once this value is reached, the data is discarded.
+ MaxElapsedTime time.Duration
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
new file mode 100644
index 00000000000..15cbec25850
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
@@ -0,0 +1,49 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "os"
+)
+
+// ReadTLSConfigFromFile reads a PEM certificate file and creates
+// a tls.Config that will use this certifate to verify a server certificate.
+func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return CreateTLSConfig(b)
+}
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
new file mode 100644
index 00000000000..584785778ac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
@@ -0,0 +1,67 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
new file mode 100644
index 00000000000..c8d2a6ddfc7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
@@ -0,0 +1,156 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %s", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
new file mode 100644
index 00000000000..7fcc144c1cd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
@@ -0,0 +1,155 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// AttrIter transforms an attribute iterator into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
new file mode 100644
index 00000000000..0b5446e4d07
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
@@ -0,0 +1,114 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+var (
+ errUnknownAggregation = errors.New("unknown aggregation")
+ errUnknownTemporality = errors.New("unknown temporality")
+)
+
+type errMetric struct {
+ m *mpb.Metric
+ err error
+}
+
+func (e errMetric) Unwrap() error {
+ return e.err
+}
+
+func (e errMetric) Error() string {
+ format := "invalid metric (name: %q, description: %q, unit: %q): %s"
+ return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
+}
+
+func (e errMetric) Is(target error) bool {
+ return errors.Is(e.err, target)
+}
+
+// multiErr is used by the data-type transform functions to wrap multiple
+// errors into a single return value. The error message will show all errors
+// as a list and scope them by the datatype name that is returning them.
+type multiErr struct {
+ datatype string
+ errs []error
+}
+
+// errOrNil returns nil if e contains no errors, otherwise it returns e.
+func (e *multiErr) errOrNil() error {
+ if len(e.errs) == 0 {
+ return nil
+ }
+ return e
+}
+
+// append adds err to e. If err is a multiErr, its errs are flattened into e.
+func (e *multiErr) append(err error) {
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there is a *multiErr several steps down the chain, all the errors above
+ // it will be discarded if errors.As is used instead.
+ switch other := err.(type) {
+ case *multiErr:
+ // Flatten err errors into e.
+ e.errs = append(e.errs, other.errs...)
+ default:
+ e.errs = append(e.errs, err)
+ }
+}
+
+func (e *multiErr) Error() string {
+ es := make([]string, len(e.errs))
+ for i, err := range e.errs {
+ es[i] = fmt.Sprintf("* %s", err)
+ }
+
+ format := "%d errors occurred transforming %s:\n\t%s"
+ return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
+}
+
+func (e *multiErr) Unwrap() error {
+ switch len(e.errs) {
+ case 0:
+ return nil
+ case 1:
+ return e.errs[0]
+ }
+
+ // Return a multiErr without the leading error.
+ cp := &multiErr{
+ datatype: e.datatype,
+ errs: make([]error, len(e.errs)-1),
+ }
+ copy(cp.errs, e.errs[1:])
+ return cp
+}
+
+func (e *multiErr) Is(target error) bool {
+ if len(e.errs) == 0 {
+ return false
+ }
+ // Check if the first error is target.
+ return errors.Is(e.errs[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
new file mode 100644
index 00000000000..c8ab8dbf6a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
@@ -0,0 +1,320 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transform provides transformation functionality from the
+// sdk/metric/metricdata data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+ rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
+// contains invalid ScopeMetrics, an error will be returned along with an OTLP
+// ResourceMetrics that contains partial OTLP ScopeMetrics.
+func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
+ sms, err := ScopeMetrics(rm.ScopeMetrics)
+ return &mpb.ResourceMetrics{
+ Resource: &rpb.Resource{
+ Attributes: AttrIter(rm.Resource.Iter()),
+ },
+ ScopeMetrics: sms,
+ SchemaUrl: rm.Resource.SchemaURL(),
+ }, err
+}
+
+// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
+// sms contains invalid metric values, an error will be returned along with a
+// slice that contains partial OTLP ScopeMetrics.
+func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
+ errs := &multiErr{datatype: "ScopeMetrics"}
+ out := make([]*mpb.ScopeMetrics, 0, len(sms))
+ for _, sm := range sms {
+ ms, err := Metrics(sm.Metrics)
+ if err != nil {
+ errs.append(err)
+ }
+
+ out = append(out, &mpb.ScopeMetrics{
+ Scope: &cpb.InstrumentationScope{
+ Name: sm.Scope.Name,
+ Version: sm.Scope.Version,
+ },
+ Metrics: ms,
+ SchemaUrl: sm.Scope.SchemaURL,
+ })
+ }
+ return out, errs.errOrNil()
+}
+
+// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
+// invalid metric values, an error will be returned along with a slice that
+// contains partial OTLP Metrics.
+func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
+ errs := &multiErr{datatype: "Metrics"}
+ out := make([]*mpb.Metric, 0, len(ms))
+ for _, m := range ms {
+ o, err := metric(m)
+ if err != nil {
+ // Do not include invalid data. Drop the metric, report the error.
+ errs.append(errMetric{m: o, err: err})
+ continue
+ }
+ out = append(out, o)
+ }
+ return out, errs.errOrNil()
+}
+
+func metric(m metricdata.Metrics) (*mpb.Metric, error) {
+ var err error
+ out := &mpb.Metric{
+ Name: m.Name,
+ Description: m.Description,
+ Unit: string(m.Unit),
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ out.Data = Gauge[int64](a)
+ case metricdata.Gauge[float64]:
+ out.Data = Gauge[float64](a)
+ case metricdata.Sum[int64]:
+ out.Data, err = Sum[int64](a)
+ case metricdata.Sum[float64]:
+ out.Data, err = Sum[float64](a)
+ case metricdata.Histogram[int64]:
+ out.Data, err = Histogram(a)
+ case metricdata.Histogram[float64]:
+ out.Data, err = Histogram(a)
+ case metricdata.ExponentialHistogram[int64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.ExponentialHistogram[float64]:
+ out.Data, err = ExponentialHistogram(a)
+ default:
+ return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
+ }
+ return out, err
+}
+
+// Gauge returns an OTLP Metric_Gauge generated from g.
+func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
+ return &mpb.Metric_Gauge{
+ Gauge: &mpb.Gauge{
+ DataPoints: DataPoints(g.DataPoints),
+ },
+ }
+}
+
+// Sum returns an OTLP Metric_Sum generated from s. An error is returned
+// if the temporality of s is unknown.
+func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
+ t, err := Temporality(s.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Sum{
+ Sum: &mpb.Sum{
+ AggregationTemporality: t,
+ IsMonotonic: s.IsMonotonic,
+ DataPoints: DataPoints(s.DataPoints),
+ },
+ }, nil
+}
+
+// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
+func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
+ out := make([]*mpb.NumberDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ ndp := &mpb.NumberDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ switch v := any(dPt.Value).(type) {
+ case int64:
+ ndp.Value = &mpb.NumberDataPoint_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ ndp.Value = &mpb.NumberDataPoint_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, ndp)
+ }
+ return out
+}
+
+// Histogram returns an OTLP Metric_Histogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Histogram{
+ Histogram: &mpb.Histogram{
+ AggregationTemporality: t,
+ DataPoints: HistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
+// from dPts.
+func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
+ out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ hdp := &mpb.HistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ BucketCounts: dPt.BucketCounts,
+ ExplicitBounds: dPt.Bounds,
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ hdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ hdp.Max = &vF64
+ }
+ out = append(out, hdp)
+ }
+ return out
+}
+
+// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_ExponentialHistogram{
+ ExponentialHistogram: &mpb.ExponentialHistogram{
+ AggregationTemporality: t,
+ DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
+// from dPts.
+func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+ out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ ehdp := &mpb.ExponentialHistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ Scale: dPt.Scale,
+ ZeroCount: dPt.ZeroCount,
+ Exemplars: Exemplars(dPt.Exemplars),
+
+ Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
+ Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Max = &vF64
+ }
+ out = append(out, ehdp)
+ }
+ return out
+}
+
+// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
+// from bucket.
+func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+ return &mpb.ExponentialHistogramDataPoint_Buckets{
+ Offset: bucket.Offset,
+ BucketCounts: bucket.Counts,
+ }
+}
+
+// Temporality returns an OTLP AggregationTemporality generated from t. If t
+// is unknown, an error is returned along with the invalid
+// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
+func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
+ switch t {
+ case metricdata.DeltaTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
+ case metricdata.CumulativeTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
+ default:
+ err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
+ }
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64.
+// The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64
+// (a date before the year 1678 or after 2262).
+// timeUnixNano on the zero Time returns 0.
+// The result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+ if t.IsZero() {
+ return 0
+ }
+ return uint64(t.UnixNano())
+}
+
+// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
+func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exemplar {
+ out := make([]*mpb.Exemplar, 0, len(exemplars))
+ for _, exemplar := range exemplars {
+ e := &mpb.Exemplar{
+ FilteredAttributes: KeyValues(exemplar.FilteredAttributes),
+ TimeUnixNano: timeUnixNano(exemplar.Time),
+ SpanId: exemplar.SpanID,
+ TraceId: exemplar.TraceID,
+ }
+ switch v := any(exemplar.Value).(type) {
+ case int64:
+ e.Value = &mpb.Exemplar_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ e.Value = &mpb.Exemplar_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, e)
+ }
+ return out
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
new file mode 100644
index 00000000000..3538f8a7d0e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
+func Version() string {
+ return "1.24.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 00000000000..acc9a670b22
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,264 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ metric.MeterProvider = MeterProvider{}
+ _ metric.Meter = Meter{}
+ _ metric.Observer = Observer{}
+ _ metric.Registration = Registration{}
+ _ metric.Int64Counter = Int64Counter{}
+ _ metric.Float64Counter = Float64Counter{}
+ _ metric.Int64UpDownCounter = Int64UpDownCounter{}
+ _ metric.Float64UpDownCounter = Float64UpDownCounter{}
+ _ metric.Int64Histogram = Int64Histogram{}
+ _ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64ObservableCounter = Int64ObservableCounter{}
+ _ metric.Float64ObservableCounter = Float64ObservableCounter{}
+ _ metric.Int64ObservableGauge = Int64ObservableGauge{}
+ _ metric.Float64ObservableGauge = Float64ObservableGauge{}
+ _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
+ _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+ _ metric.Int64Observer = Int64Observer{}
+ _ metric.Float64Observer = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+ return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+ return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ return Int64Histogram{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+ return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+ return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ return Float64Histogram{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+ return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+ return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+ return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+ return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+ metric.Int64Observable
+ embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+ metric.Float64Observable
+ embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
new file mode 100644
index 00000000000..faddbb0b61b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
@@ -0,0 +1,201 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "errors"
+ "fmt"
+)
+
+// errAgg is wrapped by misconfigured aggregations.
+var errAgg = errors.New("aggregation")
+
+// Aggregation is the aggregation used to summarize recorded measurements.
+type Aggregation interface {
+ // copy returns a deep copy of the Aggregation.
+ copy() Aggregation
+
+ // err returns an error for any misconfigured Aggregation.
+ err() error
+}
+
+// AggregationDrop is an Aggregation that drops all recorded data.
+type AggregationDrop struct{} // AggregationDrop has no parameters.
+
+var _ Aggregation = AggregationDrop{}
+
+// copy returns a deep copy of d.
+func (d AggregationDrop) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A drop aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDrop) err() error { return nil }
+
+// AggregationDefault is an Aggregation that uses the default instrument kind selection
+// mapping to select another Aggregation. A metric reader can be configured to
+// make an aggregation selection based on instrument kind that differs from
+// the default. This Aggregation ensures the default is used.
+//
+// See the [DefaultAggregationSelector] for information about the default
+// instrument kind selection mapping.
+type AggregationDefault struct{} // AggregationDefault has no parameters.
+
+var _ Aggregation = AggregationDefault{}
+
+// copy returns a deep copy of d.
+func (d AggregationDefault) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A default aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDefault) err() error { return nil }
+
+// AggregationSum is an Aggregation that summarizes a set of measurements as their
+// arithmetic sum.
+type AggregationSum struct{} // AggregationSum has no parameters.
+
+var _ Aggregation = AggregationSum{}
+
+// copy returns a deep copy of s.
+func (s AggregationSum) copy() Aggregation { return s }
+
+// err returns an error for any misconfiguration. A sum aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationSum) err() error { return nil }
+
+// AggregationLastValue is an Aggregation that summarizes a set of measurements as the
+// last one made.
+type AggregationLastValue struct{} // AggregationLastValue has no parameters.
+
+var _ Aggregation = AggregationLastValue{}
+
+// copy returns a deep copy of l.
+func (l AggregationLastValue) copy() Aggregation { return l }
+
+// err returns an error for any misconfiguration. A last-value aggregation has
+// no parameters and cannot be misconfigured, therefore this always returns
+// nil.
+func (AggregationLastValue) err() error { return nil }
+
+// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with explicitly defined buckets.
+type AggregationExplicitBucketHistogram struct {
+ // Boundaries are the increasing bucket boundary values. Boundary values
+ // define bucket upper bounds. Buckets are exclusive of their lower
+ // boundary and inclusive of their upper bound (except at positive
+ // infinity). A measurement is defined to fall into the greatest-numbered
+ // bucket with a boundary that is greater than or equal to the
+ // measurement. As an example, boundaries defined as:
+ //
+ // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
+ //
+ // Will define these buckets:
+ //
+ // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
+ // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
+ // (500.0, 1000.0], (1000.0, +∞)
+ Boundaries []float64
+ // NoMinMax indicates whether to not record the min and max of the
+ // distribution. By default, these extrema are recorded.
+ //
+ // Recording these extrema for cumulative data is expected to have little
+ // value, they will represent the entire life of the instrument instead of
+ // just the current collection cycle. It is recommended to set this to true
+ // for that type of data to avoid computing the low-value extrema.
+ NoMinMax bool
+}
+
+var _ Aggregation = AggregationExplicitBucketHistogram{}
+
+// errHist is returned by misconfigured ExplicitBucketHistograms.
+var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
+
+// err returns an error for any misconfiguration.
+func (h AggregationExplicitBucketHistogram) err() error {
+ if len(h.Boundaries) <= 1 {
+ return nil
+ }
+
+ // Check boundaries are monotonic.
+ i := h.Boundaries[0]
+ for _, j := range h.Boundaries[1:] {
+ if i >= j {
+ return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
+ }
+ i = j
+ }
+
+ return nil
+}
+
+// copy returns a deep copy of h.
+func (h AggregationExplicitBucketHistogram) copy() Aggregation {
+ b := make([]float64, len(h.Boundaries))
+ copy(b, h.Boundaries)
+ return AggregationExplicitBucketHistogram{
+ Boundaries: b,
+ NoMinMax: h.NoMinMax,
+ }
+}
+
+// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with bucket widths that grow exponentially.
+type AggregationBase2ExponentialHistogram struct {
+ // MaxSize is the maximum number of buckets to use for the histogram.
+ MaxSize int32
+ // MaxScale is the maximum resolution scale to use for the histogram.
+ //
+ // MaxScale has a maximum value of 20. Using a value of 20 means the
+ // maximum number of buckets that can fit within the range of a
+ // signed 32-bit integer index could be used.
+ //
+ // MaxScale has a minimum value of -10. Using a value of -10 means only
+ // two buckets will be used.
+ MaxScale int32
+
+ // NoMinMax indicates whether to not record the min and max of the
+ // distribution. By default, these extrema are recorded.
+ //
+ // Recording these extrema for cumulative data is expected to have little
+ // value, they will represent the entire life of the instrument instead of
+ // just the current collection cycle. It is recommended to set this to true
+ // for that type of data to avoid computing the low-value extrema.
+ NoMinMax bool
+}
+
+var _ Aggregation = AggregationBase2ExponentialHistogram{}
+
+// copy returns a deep copy of the Aggregation.
+func (e AggregationBase2ExponentialHistogram) copy() Aggregation {
+ return e
+}
+
+const (
+ expoMaxScale = 20
+ expoMinScale = -10
+)
+
+// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms.
+var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg)
+
+// err returns an error for any misconfigured Aggregation.
+func (e AggregationBase2ExponentialHistogram) err() error {
+ if e.MaxScale > expoMaxScale {
+ return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale)
+ }
+ if e.MaxSize <= 0 {
+ return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
new file mode 100644
index 00000000000..e9c0b38d0bc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "sync"
+)
+
+// cache is a locking storage used to quickly return already computed values.
+//
+// The zero value of a cache is empty and ready to use.
+//
+// A cache must not be copied after first use.
+//
+// All methods of a cache are safe to call concurrently.
+type cache[K comparable, V any] struct {
+ sync.Mutex
+ data map[K]V
+}
+
+// Lookup returns the value stored in the cache with the associated key if it
+// exists. Otherwise, f is called and its returned value is set in the cache
+// for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cache lock, so f
+// should not block excessively.
+func (c *cache[K, V]) Lookup(key K, f func() V) V {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.data == nil {
+ val := f()
+ c.data = map[K]V{key: val}
+ return val
+ }
+ if v, ok := c.data[key]; ok {
+ return v
+ }
+ val := f()
+ c.data[key] = val
+ return val
+}
+
+// HasKey returns true if Lookup has previously been called with that key
+//
+// HasKey is safe to call concurrently.
+func (c *cache[K, V]) HasKey(key K) bool {
+ c.Lock()
+ defer c.Unlock()
+ _, ok := c.data[key]
+ return ok
+}
+
+// cacheWithErr is a locking storage used to quickly return already computed values and an error.
+//
+// The zero value of a cacheWithErr is empty and ready to use.
+//
+// A cacheWithErr must not be copied after first use.
+//
+// All methods of a cacheWithErr are safe to call concurrently.
+type cacheWithErr[K comparable, V any] struct {
+ cache[K, valAndErr[V]]
+}
+
+type valAndErr[V any] struct {
+ val V
+ err error
+}
+
+// Lookup returns the value stored in the cacheWithErr with the associated key
+// if it exists. Otherwise, f is called and its returned value is set in the
+// cacheWithErr for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f
+// should not block excessively.
+func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) {
+ combined := c.cache.Lookup(key, func() valAndErr[V] {
+ val, err := f()
+ return valAndErr[V]{val: val, err: err}
+ })
+ return combined.val, combined.err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
new file mode 100644
index 00000000000..0b191128494
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
@@ -0,0 +1,148 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+// config contains configuration options for a MeterProvider.
+type config struct {
+ res *resource.Resource
+ readers []Reader
+ views []View
+}
+
+// readerSignals returns a force-flush and shutdown function for a
+// MeterProvider to call in their respective options. All Readers c contains
+// will have their force-flush and shutdown methods unified into returned
+// single functions.
+func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) {
+ var fFuncs, sFuncs []func(context.Context) error
+ for _, r := range c.readers {
+ sFuncs = append(sFuncs, r.Shutdown)
+ if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok {
+ fFuncs = append(fFuncs, f.ForceFlush)
+ }
+ }
+
+ return unify(fFuncs), unifyShutdown(sFuncs)
+}
+
+// unify unifies calling all of funcs into a single function call. All errors
+// returned from calls to funcs will be unify into a single error return
+// value.
+func unify(funcs []func(context.Context) error) func(context.Context) error {
+ return func(ctx context.Context) error {
+ var errs []error
+ for _, f := range funcs {
+ if err := f(ctx); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return unifyErrors(errs)
+ }
+}
+
+// unifyErrors combines multiple errors into a single error.
+func unifyErrors(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return fmt.Errorf("%v", errs)
+ }
+}
+
+// unifyShutdown unifies calling all of funcs once for a shutdown. If called
+// more than once, an ErrReaderShutdown error is returned.
+func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error {
+ f := unify(funcs)
+ var once sync.Once
+ return func(ctx context.Context) error {
+ err := ErrReaderShutdown
+ once.Do(func() { err = f(ctx) })
+ return err
+ }
+}
+
+// newConfig returns a config configured with options.
+func newConfig(options []Option) config {
+ conf := config{res: resource.Default()}
+ for _, o := range options {
+ conf = o.apply(conf)
+ }
+ return conf
+}
+
+// Option applies a configuration option value to a MeterProvider.
+type Option interface {
+ apply(config) config
+}
+
+// optionFunc applies a set of options to a config.
+type optionFunc func(config) config
+
+// apply returns a config with option(s) applied.
+func (o optionFunc) apply(conf config) config {
+ return o(conf)
+}
+
+// WithResource associates a Resource with a MeterProvider. This Resource
+// represents the entity producing telemetry and is associated with all Meters
+// the MeterProvider will create.
+//
+// By default, if this Option is not used, the default Resource from the
+// go.opentelemetry.io/otel/sdk/resource package will be used.
+func WithResource(res *resource.Resource) Option {
+ return optionFunc(func(conf config) config {
+ conf.res = res
+ return conf
+ })
+}
+
+// WithReader associates Reader r with a MeterProvider.
+//
+// By default, if this option is not used, the MeterProvider will perform no
+// operations; no data will be exported without a Reader.
+func WithReader(r Reader) Option {
+ return optionFunc(func(cfg config) config {
+ if r == nil {
+ return cfg
+ }
+ cfg.readers = append(cfg.readers, r)
+ return cfg
+ })
+}
+
+// WithView associates views a MeterProvider.
+//
+// Views are appended to existing ones in a MeterProvider if this option is
+// used multiple times.
+//
+// By default, if this option is not used, the MeterProvider will use the
+// default view.
+func WithView(views ...View) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.views = append(cfg.views, views...)
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
new file mode 100644
index 00000000000..475d3e39416
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metric provides an implementation of the OpenTelemetry metrics SDK.
+//
+// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
+// about the concept of OpenTelemetry metrics and
+// https://opentelemetry.io/docs/concepts/components/ for more information
+// about OpenTelemetry SDKs.
+//
+// The entry point for the metric package is the MeterProvider. It is the
+// object that all API calls use to create Meters, instruments, and ultimately
+// make metric measurements. Also, it is an object that should be used to
+// control the life-cycle (start, flush, and shutdown) of the SDK.
+//
+// A MeterProvider needs to be configured to export the measured data, this is
+// done by configuring it with a Reader implementation (using the WithReader
+// MeterProviderOption). Readers take two forms: ones that push to an endpoint
+// (NewPeriodicReader), and ones that an endpoint pulls from. See
+// [go.opentelemetry.io/otel/exporters] for exporters that can be used as
+// or with these Readers.
+//
+// Each Reader, when registered with the MeterProvider, can be augmented with a
+// View. Views allow users that run OpenTelemetry instrumented code to modify
+// the generated data of that instrumentation.
+//
+// The data generated by a MeterProvider needs to include information about its
+// origin. A MeterProvider needs to be configured with a Resource, using the
+// WithResource MeterProviderOption, to include this information. This Resource
+// should be used to describe the unique runtime environment instrumented code
+// is being run on. That way when multiple instances of the code are collected
+// at a single endpoint their origin is decipherable.
+//
+// See [go.opentelemetry.io/otel/metric] for more information about
+// the metric API.
+//
+// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about
+// the experimental features.
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
new file mode 100644
index 00000000000..940ba815942
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "os"
+ "strconv"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+ // The time interval (in milliseconds) between the start of two export attempts.
+ envInterval = "OTEL_METRIC_EXPORT_INTERVAL"
+ // Maximum allowed time (in milliseconds) to export data.
+ envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT"
+)
+
+// envDuration returns an environment variable's value as duration in milliseconds if it is exists,
+// or the defaultValue if the environment variable is not defined or the value is not valid.
+func envDuration(key string, defaultValue time.Duration) time.Duration {
+ v := os.Getenv(key)
+ if v == "" {
+ return defaultValue
+ }
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "environment variable", key, "value", v)
+ return defaultValue
+ }
+ if d <= 0 {
+ global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v)
+ return defaultValue
+ }
+ return time.Duration(d) * time.Millisecond
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
new file mode 100644
index 00000000000..3f1ce9f1d88
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "os"
+ "runtime"
+
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/internal/x"
+)
+
+// reservoirFunc returns the appropriately configured exemplar reservoir
+// creation func based on the passed InstrumentKind and user defined
+// environment variables.
+//
+// Note: This will only return non-nil values when the experimental exemplar
+// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
+// is not set to always_off.
+func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir[N] {
+ if !x.Exemplars.Enabled() {
+ return nil
+ }
+
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
+ resF := func() func() exemplar.Reservoir[N] {
+ // Explicit bucket histogram aggregation with more than 1 bucket will
+ // use AlignedHistogramBucketExemplarReservoir.
+ a, ok := agg.(AggregationExplicitBucketHistogram)
+ if ok && len(a.Boundaries) > 0 {
+ cp := make([]float64, len(a.Boundaries))
+ copy(cp, a.Boundaries)
+ return func() exemplar.Reservoir[N] {
+ bounds := cp
+ return exemplar.Histogram[N](bounds)
+ }
+ }
+
+ var n int
+ if a, ok := agg.(AggregationBase2ExponentialHistogram); ok {
+ // Base2 Exponential Histogram Aggregation SHOULD use a
+ // SimpleFixedSizeExemplarReservoir with a reservoir equal to the
+ // smaller of the maximum number of buckets configured on the
+ // aggregation or twenty (e.g. min(20, max_buckets)).
+ n = int(a.MaxSize)
+ if n > 20 {
+ n = 20
+ }
+ } else {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
+ // This Exemplar reservoir MAY take a configuration parameter for
+ // the size of the reservoir. If no size configuration is
+ // provided, the default size MAY be the number of possible
+ // concurrent threads (e.g. number of CPUs) to help reduce
+ // contention. Otherwise, a default size of 1 SHOULD be used.
+ n = runtime.NumCPU()
+ if n < 1 {
+ // Should never be the case, but be defensive.
+ n = 1
+ }
+ }
+
+ return func() exemplar.Reservoir[N] {
+ return exemplar.FixedSize[N](n)
+ }
+ }
+
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
+ const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
+
+ switch os.Getenv(filterEnvKey) {
+ case "always_on":
+ return resF()
+ case "always_off":
+ return exemplar.Drop[N]
+ case "trace_based":
+ fallthrough
+ default:
+ newR := resF()
+ return func() exemplar.Reservoir[N] {
+ return exemplar.SampledFilter(newR())
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
new file mode 100644
index 00000000000..da8941b378d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
@@ -0,0 +1,88 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ErrExporterShutdown is returned if Export or Shutdown are called after an
+// Exporter has been Shutdown.
+var ErrExporterShutdown = fmt.Errorf("exporter is shutdown")
+
+// Exporter handles the delivery of metric data to external receivers. This is
+// the final component in the metric push pipeline.
+type Exporter interface {
+ // Temporality returns the Temporality to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Exporter methods.
+ Temporality(InstrumentKind) metricdata.Temporality
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Aggregation returns the Aggregation to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Exporter methods.
+ Aggregation(InstrumentKind) Aggregation
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Export serializes and transmits metric data to a receiver.
+ //
+ // This is called synchronously, there is no concurrency safety
+ // requirement. Because of this, it is critical that all timeouts and
+ // cancellations of the passed context be honored.
+ //
+ // All retry logic must be contained in this function. The SDK does not
+ // implement any retry logic. All errors returned by this function are
+ // considered unrecoverable and will be reported to a configured error
+ // Handler.
+ //
+ // The passed ResourceMetrics may be reused when the call completes. If an
+ // exporter needs to hold this data after it returns, it needs to make a
+ // copy.
+ Export(context.Context, *metricdata.ResourceMetrics) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ForceFlush flushes any metric data held by an exporter.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // This method needs to be concurrent safe.
+ ForceFlush(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown flushes all metric data held by an exporter and releases any
+ // held computational resources.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // After Shutdown is called, calls to Export will perform no operation and
+ // instead will return an error indicating the shutdown state.
+ //
+ // This method needs to be concurrent safe.
+ Shutdown(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
new file mode 100644
index 00000000000..a4cfcbb95f1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
@@ -0,0 +1,353 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+var (
+ zeroInstrumentKind InstrumentKind
+ zeroScope instrumentation.Scope
+)
+
+// InstrumentKind is the identifier of a group of instruments that all
+// performing the same function.
+type InstrumentKind uint8
+
+const (
+ // instrumentKindUndefined is an undefined instrument kind, it should not
+ // be used by any initialized type.
+ instrumentKindUndefined InstrumentKind = iota // nolint:deadcode,varcheck,unused
+ // InstrumentKindCounter identifies a group of instruments that record
+ // increasing values synchronously with the code path they are measuring.
+ InstrumentKindCounter
+ // InstrumentKindUpDownCounter identifies a group of instruments that
+ // record increasing and decreasing values synchronously with the code path
+ // they are measuring.
+ InstrumentKindUpDownCounter
+ // InstrumentKindHistogram identifies a group of instruments that record a
+ // distribution of values synchronously with the code path they are
+ // measuring.
+ InstrumentKindHistogram
+ // InstrumentKindObservableCounter identifies a group of instruments that
+ // record increasing values in an asynchronous callback.
+ InstrumentKindObservableCounter
+ // InstrumentKindObservableUpDownCounter identifies a group of instruments
+ // that record increasing and decreasing values in an asynchronous
+ // callback.
+ InstrumentKindObservableUpDownCounter
+ // InstrumentKindObservableGauge identifies a group of instruments that
+ // record current values in an asynchronous callback.
+ InstrumentKindObservableGauge
+)
+
+type nonComparable [0]func() // nolint: unused // This is indeed used.
+
+// Instrument describes properties an instrument is created with.
+type Instrument struct {
+ // Name is the human-readable identifier of the instrument.
+ Name string
+ // Description describes the purpose of the instrument.
+ Description string
+ // Kind defines the functional group of the instrument.
+ Kind InstrumentKind
+ // Unit is the unit of measurement recorded by the instrument.
+ Unit string
+ // Scope identifies the instrumentation that created the instrument.
+ Scope instrumentation.Scope
+
+ // Ensure forward compatibility if non-comparable fields need to be added.
+ nonComparable // nolint: unused
+}
+
+// empty returns if all fields of i are their zero-value.
+func (i Instrument) empty() bool {
+ return i.Name == "" &&
+ i.Description == "" &&
+ i.Kind == zeroInstrumentKind &&
+ i.Unit == "" &&
+ i.Scope == zeroScope
+}
+
+// matches returns whether all the non-zero-value fields of i match the
+// corresponding fields of other. If i is empty it will match all other, and
+// true will always be returned.
+func (i Instrument) matches(other Instrument) bool {
+ return i.matchesName(other) &&
+ i.matchesDescription(other) &&
+ i.matchesKind(other) &&
+ i.matchesUnit(other) &&
+ i.matchesScope(other)
+}
+
+// matchesName returns true if the Name of i is "" or it equals the Name of
+// other, otherwise false.
+func (i Instrument) matchesName(other Instrument) bool {
+ return i.Name == "" || i.Name == other.Name
+}
+
+// matchesDescription returns true if the Description of i is "" or it equals
+// the Description of other, otherwise false.
+func (i Instrument) matchesDescription(other Instrument) bool {
+ return i.Description == "" || i.Description == other.Description
+}
+
+// matchesKind returns true if the Kind of i is its zero-value or it equals the
+// Kind of other, otherwise false.
+func (i Instrument) matchesKind(other Instrument) bool {
+ return i.Kind == zeroInstrumentKind || i.Kind == other.Kind
+}
+
+// matchesUnit returns true if the Unit of i is its zero-value or it equals the
+// Unit of other, otherwise false.
+func (i Instrument) matchesUnit(other Instrument) bool {
+ return i.Unit == "" || i.Unit == other.Unit
+}
+
+// matchesScope returns true if the Scope of i is its zero-value or it equals
+// the Scope of other, otherwise false.
+func (i Instrument) matchesScope(other Instrument) bool {
+ return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) &&
+ (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) &&
+ (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL)
+}
+
+// Stream describes the stream of data an instrument produces.
+type Stream struct {
+ // Name is the human-readable identifier of the stream.
+ Name string
+ // Description describes the purpose of the data.
+ Description string
+ // Unit is the unit of measurement recorded.
+ Unit string
+ // Aggregation the stream uses for an instrument.
+ Aggregation Aggregation
+ // AttributeFilter is an attribute Filter applied to the attributes
+ // recorded for an instrument's measurement. If the filter returns false
+ // the attribute will not be recorded, otherwise, if it returns true, it
+ // will record the attribute.
+ //
+ // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
+ // provide an allow-list of attribute keys here.
+ AttributeFilter attribute.Filter
+}
+
+// instID are the identifying properties of a instrument.
+type instID struct {
+ // Name is the name of the stream.
+ Name string
+ // Description is the description of the stream.
+ Description string
+ // Kind defines the functional group of the instrument.
+ Kind InstrumentKind
+ // Unit is the unit of the stream.
+ Unit string
+ // Number is the number type of the stream.
+ Number string
+}
+
+// Returns a normalized copy of the instID i.
+//
+// Instrument names are considered case-insensitive. Standardize the instrument
+// name to always be lowercase for the returned instID so it can be compared
+// without the name casing affecting the comparison.
+func (i instID) normalize() instID {
+ i.Name = strings.ToLower(i.Name)
+ return i
+}
+
+type int64Inst struct {
+ measures []aggregate.Measure[int64]
+
+ embedded.Int64Counter
+ embedded.Int64UpDownCounter
+ embedded.Int64Histogram
+}
+
+var (
+ _ metric.Int64Counter = (*int64Inst)(nil)
+ _ metric.Int64UpDownCounter = (*int64Inst)(nil)
+ _ metric.Int64Histogram = (*int64Inst)(nil)
+)
+
+func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
+ c := metric.NewAddConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) {
+ c := metric.NewRecordConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
+ for _, in := range i.measures {
+ in(ctx, val, s)
+ }
+}
+
+type float64Inst struct {
+ measures []aggregate.Measure[float64]
+
+ embedded.Float64Counter
+ embedded.Float64UpDownCounter
+ embedded.Float64Histogram
+}
+
+var (
+ _ metric.Float64Counter = (*float64Inst)(nil)
+ _ metric.Float64UpDownCounter = (*float64Inst)(nil)
+ _ metric.Float64Histogram = (*float64Inst)(nil)
+)
+
+func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
+ c := metric.NewAddConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) {
+ c := metric.NewRecordConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
+ for _, in := range i.measures {
+ in(ctx, val, s)
+ }
+}
+
+// observablID is a comparable unique identifier of an observable.
+type observablID[N int64 | float64] struct {
+ name string
+ description string
+ kind InstrumentKind
+ unit string
+ scope instrumentation.Scope
+}
+
+type float64Observable struct {
+ metric.Float64Observable
+ *observable[float64]
+
+ embedded.Float64ObservableCounter
+ embedded.Float64ObservableUpDownCounter
+ embedded.Float64ObservableGauge
+}
+
+var (
+ _ metric.Float64ObservableCounter = float64Observable{}
+ _ metric.Float64ObservableUpDownCounter = float64Observable{}
+ _ metric.Float64ObservableGauge = float64Observable{}
+)
+
+func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable {
+ return float64Observable{
+ observable: newObservable[float64](m, kind, name, desc, u),
+ }
+}
+
+type int64Observable struct {
+ metric.Int64Observable
+ *observable[int64]
+
+ embedded.Int64ObservableCounter
+ embedded.Int64ObservableUpDownCounter
+ embedded.Int64ObservableGauge
+}
+
+var (
+ _ metric.Int64ObservableCounter = int64Observable{}
+ _ metric.Int64ObservableUpDownCounter = int64Observable{}
+ _ metric.Int64ObservableGauge = int64Observable{}
+)
+
+func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable {
+ return int64Observable{
+ observable: newObservable[int64](m, kind, name, desc, u),
+ }
+}
+
+type observable[N int64 | float64] struct {
+ metric.Observable
+ observablID[N]
+
+ meter *meter
+ measures measures[N]
+ dropAggregation bool
+}
+
+func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
+ return &observable[N]{
+ observablID: observablID[N]{
+ name: name,
+ description: desc,
+ kind: kind,
+ unit: u,
+ scope: m.scope,
+ },
+ meter: m,
+ }
+}
+
+// observe records the val for the set of attrs.
+func (o *observable[N]) observe(val N, s attribute.Set) {
+ o.measures.observe(val, s)
+}
+
+func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) {
+ o.measures = append(o.measures, meas...)
+}
+
+type measures[N int64 | float64] []aggregate.Measure[N]
+
+// observe records the val for the set of attrs.
+func (m measures[N]) observe(val N, s attribute.Set) {
+ for _, in := range m {
+ in(context.Background(), val, s)
+ }
+}
+
+var errEmptyAgg = errors.New("no aggregators for observable instrument")
+
+// registerable returns an error if the observable o should not be registered,
+// and nil if it should. An errEmptyAgg error is returned if o is effectively a
+// no-op because it does not have any aggregators. Also, an error is returned
+// if scope defines a Meter other than the one o was created by.
+func (o *observable[N]) registerable(m *meter) error {
+ if len(o.measures) == 0 {
+ return errEmptyAgg
+ }
+ if m != o.meter {
+ return fmt.Errorf(
+ "invalid registration: observable %q from Meter %q, registered with Meter %q",
+ o.name,
+ o.scope.Name,
+ m.scope.Name,
+ )
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
new file mode 100644
index 00000000000..d5f9e982c2b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
@@ -0,0 +1,29 @@
+// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT.
+
+package metric
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[instrumentKindUndefined-0]
+ _ = x[InstrumentKindCounter-1]
+ _ = x[InstrumentKindUpDownCounter-2]
+ _ = x[InstrumentKindHistogram-3]
+ _ = x[InstrumentKindObservableCounter-4]
+ _ = x[InstrumentKindObservableUpDownCounter-5]
+ _ = x[InstrumentKindObservableGauge-6]
+}
+
+const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGauge"
+
+var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107}
+
+func (i InstrumentKind) String() string {
+ if i >= InstrumentKind(len(_InstrumentKind_index)-1) {
+ return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
new file mode 100644
index 00000000000..4060a2f76d3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
@@ -0,0 +1,160 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// now is used to return the current local time while allowing tests to
+// override the default time.Now function.
+var now = time.Now
+
+// Measure receives measurements to be aggregated.
+type Measure[N int64 | float64] func(context.Context, N, attribute.Set)
+
+// ComputeAggregation stores the aggregate of measurements into dest and
+// returns the number of aggregate data-points output.
+type ComputeAggregation func(dest *metricdata.Aggregation) int
+
+// Builder builds an aggregate function.
+type Builder[N int64 | float64] struct {
+ // Temporality is the temporality used for the returned aggregate function.
+ //
+ // If this is not provided a default of cumulative will be used (except for
+ // the last-value aggregate function where delta is the only appropriate
+ // temporality).
+ Temporality metricdata.Temporality
+ // Filter is the attribute filter the aggregate function will use on the
+ // input of measurements.
+ Filter attribute.Filter
+ // ReservoirFunc is the factory function used by aggregate functions to
+ // create new exemplar reservoirs for a new seen attribute set.
+ //
+ // If this is not provided a default factory function that returns an
+ // exemplar.Drop reservoir will be used.
+ ReservoirFunc func() exemplar.Reservoir[N]
+ // AggregationLimit is the cardinality limit of measurement attributes. Any
+ // measurement for new attributes once the limit has been reached will be
+ // aggregated into a single aggregate for the "otel.metric.overflow"
+ // attribute.
+ //
+ // If AggregationLimit is less than or equal to zero there will not be an
+ // aggregation limit imposed (i.e. unlimited attribute sets).
+ AggregationLimit int
+}
+
+func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] {
+ if b.ReservoirFunc != nil {
+ return b.ReservoirFunc
+ }
+
+ return exemplar.Drop[N]
+}
+
+type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
+
+func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
+ if b.Filter != nil {
+ fltr := b.Filter // Copy to make it immutable after assignment.
+ return func(ctx context.Context, n N, a attribute.Set) {
+ fAttr, dropped := a.Filter(fltr)
+ f(ctx, n, fAttr, dropped)
+ }
+ }
+ return func(ctx context.Context, n N, a attribute.Set) {
+ f(ctx, n, a, nil)
+ }
+}
+
+// LastValue returns a last-value aggregate function input and output.
+//
+// The Builder.Temporality is ignored and delta is use always.
+func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
+ // Delta temporality is the only temporality that makes semantic sense for
+ // a last-value aggregate.
+ lv := newLastValue[N](b.AggregationLimit, b.resFunc())
+
+ return b.filter(lv.measure), func(dest *metricdata.Aggregation) int {
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory
+ // reuse of the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+ lv.computeAggregation(&gData.DataPoints)
+ *dest = gData
+
+ return len(gData.DataPoints)
+ }
+}
+
+// PrecomputedSum returns a sum aggregate function input and output. The
+// arguments passed to the input are expected to be the precomputed sum values.
+func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
+ s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(s.measure), s.delta
+ default:
+ return b.filter(s.measure), s.cumulative
+ }
+}
+
+// Sum returns a sum aggregate function input and output.
+func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
+ s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(s.measure), s.delta
+ default:
+ return b.filter(s.measure), s.cumulative
+ }
+}
+
+// ExplicitBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
+ h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(h.measure), h.delta
+ default:
+ return b.filter(h.measure), h.cumulative
+ }
+}
+
+// ExponentialBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
+ h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(h.measure), h.delta
+ default:
+ return b.filter(h.measure), h.cumulative
+ }
+}
+
+// reset ensures s has capacity and sets it length. If the capacity of s too
+// small, a new slice is returned with the specified capacity and length.
+func reset[T any](s []T, length, capacity int) []T {
+ if cap(s) < capacity {
+ return make([]T, length, capacity)
+ }
+ return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
new file mode 100644
index 00000000000..e83a2693faf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package aggregate provides aggregate types used compute aggregations and
+// cycle the state of metric measurements made by the SDK. These types and
+// functionality are meant only for internal SDK use.
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
new file mode 100644
index 00000000000..4139a6d1560
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -0,0 +1,451 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "errors"
+ "math"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+const (
+ expoMaxScale = 20
+ expoMinScale = -10
+
+ smallestNonZeroNormalFloat64 = 0x1p-1022
+
+ // These redefine the Math constants with a type, so the compiler won't coerce
+ // them into an int on 32 bit platforms.
+ maxInt64 int64 = math.MaxInt64
+ minInt64 int64 = math.MinInt64
+)
+
+// expoHistogramDataPoint is a single data point in an exponential histogram.
+type expoHistogramDataPoint[N int64 | float64] struct {
+ res exemplar.Reservoir[N]
+
+ count uint64
+ min N
+ max N
+ sum N
+
+ maxSize int
+ noMinMax bool
+ noSum bool
+
+ scale int
+
+ posBuckets expoBuckets
+ negBuckets expoBuckets
+ zeroCount uint64
+}
+
+func newExpoHistogramDataPoint[N int64 | float64](maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
+ f := math.MaxFloat64
+ max := N(f) // if N is int64, max will overflow to -9223372036854775808
+ min := N(-f)
+ if N(maxInt64) > N(f) {
+ max = N(maxInt64)
+ min = N(minInt64)
+ }
+ return &expoHistogramDataPoint[N]{
+ min: max,
+ max: min,
+ maxSize: maxSize,
+ noMinMax: noMinMax,
+ noSum: noSum,
+ scale: maxScale,
+ }
+}
+
+// record adds a new measurement to the histogram. It will rescale the buckets if needed.
+func (p *expoHistogramDataPoint[N]) record(v N) {
+ p.count++
+
+ if !p.noMinMax {
+ if v < p.min {
+ p.min = v
+ }
+ if v > p.max {
+ p.max = v
+ }
+ }
+ if !p.noSum {
+ p.sum += v
+ }
+
+ absV := math.Abs(float64(v))
+
+ if float64(absV) == 0.0 {
+ p.zeroCount++
+ return
+ }
+
+ bin := p.getBin(absV)
+
+ bucket := &p.posBuckets
+ if v < 0 {
+ bucket = &p.negBuckets
+ }
+
+ // If the new bin would make the counts larger than maxScale, we need to
+ // downscale current measurements.
+ if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
+ if p.scale-scaleDelta < expoMinScale {
+ // With a scale of -10 there is only two buckets for the whole range of float64 values.
+ // This can only happen if there is a max size of 1.
+ otel.Handle(errors.New("exponential histogram scale underflow"))
+ return
+ }
+ // Downscale
+ p.scale -= scaleDelta
+ p.posBuckets.downscale(scaleDelta)
+ p.negBuckets.downscale(scaleDelta)
+
+ bin = p.getBin(absV)
+ }
+
+ bucket.record(bin)
+}
+
+// getBin returns the bin v should be recorded into.
+func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
+ frac, exp := math.Frexp(v)
+ if p.scale <= 0 {
+ // Because of the choice of fraction is always 1 power of two higher than we want.
+ correction := 1
+ if frac == .5 {
+ // If v is an exact power of two the frac will be .5 and the exp
+ // will be one higher than we want.
+ correction = 2
+ }
+ return (exp - correction) >> (-p.scale)
+ }
+ return exp<= bin {
+ low = bin
+ high = startBin + length - 1
+ }
+
+ count := 0
+ for high-low >= p.maxSize {
+ low = low >> 1
+ high = high >> 1
+ count++
+ if count > expoMaxScale-expoMinScale {
+ return count
+ }
+ }
+ return count
+}
+
+// expoBuckets is a set of buckets in an exponential histogram.
+type expoBuckets struct {
+ startBin int
+ counts []uint64
+}
+
+// record increments the count for the given bin, and expands the buckets if needed.
+// Size changes must be done before calling this function.
+func (b *expoBuckets) record(bin int) {
+ if len(b.counts) == 0 {
+ b.counts = []uint64{1}
+ b.startBin = bin
+ return
+ }
+
+ endBin := b.startBin + len(b.counts) - 1
+
+ // if the new bin is inside the current range
+ if bin >= b.startBin && bin <= endBin {
+ b.counts[bin-b.startBin]++
+ return
+ }
+ // if the new bin is before the current start add spaces to the counts
+ if bin < b.startBin {
+ origLen := len(b.counts)
+ newLength := endBin - bin + 1
+ shift := b.startBin - bin
+
+ if newLength > cap(b.counts) {
+ b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
+ }
+
+ copy(b.counts[shift:origLen+shift], b.counts[:])
+ b.counts = b.counts[:newLength]
+ for i := 1; i < shift; i++ {
+ b.counts[i] = 0
+ }
+ b.startBin = bin
+ b.counts[0] = 1
+ return
+ }
+ // if the new is after the end add spaces to the end
+ if bin > endBin {
+ if bin-b.startBin < cap(b.counts) {
+ b.counts = b.counts[:bin-b.startBin+1]
+ for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
+ b.counts[i] = 0
+ }
+ b.counts[bin-b.startBin] = 1
+ return
+ }
+
+ end := make([]uint64, bin-b.startBin-len(b.counts)+1)
+ b.counts = append(b.counts, end...)
+ b.counts[bin-b.startBin] = 1
+ }
+}
+
+// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
+// correct lower resolution bucket.
+func (b *expoBuckets) downscale(delta int) {
+ // Example
+ // delta = 2
+ // Original offset: -6
+ // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4
+ // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1
+ // new Offset: -2
+ // new Counts: [4, 14, 30, 10]
+
+ if len(b.counts) <= 1 || delta < 1 {
+ b.startBin = b.startBin >> delta
+ return
+ }
+
+ steps := 1 << delta
+ offset := b.startBin % steps
+ offset = (offset + steps) % steps // to make offset positive
+ for i := 1; i < len(b.counts); i++ {
+ idx := i + offset
+ if idx%steps == 0 {
+ b.counts[idx/steps] = b.counts[i]
+ continue
+ }
+ b.counts[idx/steps] += b.counts[i]
+ }
+
+ lastIdx := (len(b.counts) - 1 + offset) / steps
+ b.counts = b.counts[:lastIdx+1]
+ b.startBin = b.startBin >> delta
+}
+
+// newExponentialHistogram returns an Aggregator that summarizes a set of
+// measurements as an exponential histogram. Each histogram is scoped by attributes
+// and the aggregation cycle the measurements were made in.
+func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] {
+ return &expoHistogram[N]{
+ noSum: noSum,
+ noMinMax: noMinMax,
+ maxSize: int(maxSize),
+ maxScale: int(maxScale),
+
+ newRes: r,
+ limit: newLimiter[*expoHistogramDataPoint[N]](limit),
+ values: make(map[attribute.Set]*expoHistogramDataPoint[N]),
+
+ start: now(),
+ }
+}
+
+// expoHistogram summarizes a set of measurements as an histogram with exponentially
+// defined buckets.
+type expoHistogram[N int64 | float64] struct {
+ noSum bool
+ noMinMax bool
+ maxSize int
+ maxScale int
+
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[*expoHistogramDataPoint[N]]
+ values map[attribute.Set]*expoHistogramDataPoint[N]
+ valuesMu sync.Mutex
+
+ start time.Time
+}
+
+func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ // Ignore NaN and infinity.
+ if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
+ return
+ }
+
+ t := now()
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ attr := e.limit.Attributes(fltrAttr, e.values)
+ v, ok := e.values[attr]
+ if !ok {
+ v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum)
+ v.res = e.newRes()
+
+ e.values[attr] = v
+ }
+ v.record(value)
+ v.res.Offer(ctx, t, value, droppedAttr)
+}
+
+func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+ // In that case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+ h.Temporality = metricdata.DeltaTemporality
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ n := len(e.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for a, b := range e.values {
+ hDPts[i].Attributes = a
+ hDPts[i].StartTime = e.start
+ hDPts[i].Time = t
+ hDPts[i].Count = b.count
+ hDPts[i].Scale = int32(b.scale)
+ hDPts[i].ZeroCount = b.zeroCount
+ hDPts[i].ZeroThreshold = 0.0
+
+ hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin)
+ hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts))
+ copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts)
+
+ hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
+ hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
+
+ if !e.noSum {
+ hDPts[i].Sum = b.sum
+ }
+ if !e.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(b.min)
+ hDPts[i].Max = metricdata.NewExtrema(b.max)
+ }
+
+ b.res.Collect(&hDPts[i].Exemplars)
+
+ delete(e.values, a)
+ i++
+ }
+ e.start = t
+ h.DataPoints = hDPts
+ *dest = h
+ return n
+}
+
+func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+ // In that case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+ h.Temporality = metricdata.CumulativeTemporality
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ n := len(e.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for a, b := range e.values {
+ hDPts[i].Attributes = a
+ hDPts[i].StartTime = e.start
+ hDPts[i].Time = t
+ hDPts[i].Count = b.count
+ hDPts[i].Scale = int32(b.scale)
+ hDPts[i].ZeroCount = b.zeroCount
+ hDPts[i].ZeroThreshold = 0.0
+
+ hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin)
+ hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts))
+ copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts)
+
+ hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
+ hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
+
+ if !e.noSum {
+ hDPts[i].Sum = b.sum
+ }
+ if !e.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(b.min)
+ hDPts[i].Max = metricdata.NewExtrema(b.max)
+ }
+
+ b.res.Collect(&hDPts[i].Exemplars)
+
+ i++
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ }
+
+ h.DataPoints = hDPts
+ *dest = h
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
new file mode 100644
index 00000000000..a9a4706bf00
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -0,0 +1,249 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "sort"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type buckets[N int64 | float64] struct {
+ res exemplar.Reservoir[N]
+
+ counts []uint64
+ count uint64
+ total N
+ min, max N
+}
+
+// newBuckets returns buckets with n bins.
+func newBuckets[N int64 | float64](n int) *buckets[N] {
+ return &buckets[N]{counts: make([]uint64, n)}
+}
+
+func (b *buckets[N]) sum(value N) { b.total += value }
+
+func (b *buckets[N]) bin(idx int, value N) {
+ b.counts[idx]++
+ b.count++
+ if value < b.min {
+ b.min = value
+ } else if value > b.max {
+ b.max = value
+ }
+}
+
+// histValues summarizes a set of measurements as an histValues with
+// explicitly defined buckets.
+type histValues[N int64 | float64] struct {
+ noSum bool
+ bounds []float64
+
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[*buckets[N]]
+ values map[attribute.Set]*buckets[N]
+ valuesMu sync.Mutex
+}
+
+func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] {
+ // The responsibility of keeping all buckets correctly associated with the
+ // passed boundaries is ultimately this type's responsibility. Make a copy
+ // here so we can always guarantee this. Or, in the case of failure, have
+ // complete control over the fix.
+ b := make([]float64, len(bounds))
+ copy(b, bounds)
+ sort.Float64s(b)
+ return &histValues[N]{
+ noSum: noSum,
+ bounds: b,
+ newRes: r,
+ limit: newLimiter[*buckets[N]](limit),
+ values: make(map[attribute.Set]*buckets[N]),
+ }
+}
+
+// Aggregate records the measurement value, scoped by attr, and aggregates it
+// into a histogram.
+func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ // This search will return an index in the range [0, len(s.bounds)], where
+ // it will return len(s.bounds) if value is greater than the last element
+ // of s.bounds. This aligns with the buckets in that the length of buckets
+ // is len(s.bounds)+1, with the last bucket representing:
+ // (s.bounds[len(s.bounds)-1], +∞).
+ idx := sort.SearchFloat64s(s.bounds, float64(value))
+
+ t := now()
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ b, ok := s.values[attr]
+ if !ok {
+ // N+1 buckets. For example:
+ //
+ // bounds = [0, 5, 10]
+ //
+ // Then,
+ //
+ // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
+ b = newBuckets[N](len(s.bounds) + 1)
+ b.res = s.newRes()
+
+ // Ensure min and max are recorded values (not zero), for new buckets.
+ b.min, b.max = value, value
+ s.values[attr] = b
+ }
+ b.bin(idx, value)
+ if !s.noSum {
+ b.sum(value)
+ }
+ b.res.Offer(ctx, t, value, droppedAttr)
+}
+
+// newHistogram returns an Aggregator that summarizes a set of measurements as
+// an histogram.
+func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] {
+ return &histogram[N]{
+ histValues: newHistValues[N](boundaries, noSum, limit, r),
+ noMinMax: noMinMax,
+ start: now(),
+ }
+}
+
+// histogram summarizes a set of measurements as an histogram with explicitly
+// defined buckets.
+type histogram[N int64 | float64] struct {
+ *histValues[N]
+
+ noMinMax bool
+ start time.Time
+}
+
+func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+ // case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.Histogram[N])
+ h.Temporality = metricdata.DeltaTemporality
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ // Do not allow modification of our copy of bounds.
+ bounds := make([]float64, len(s.bounds))
+ copy(bounds, s.bounds)
+
+ n := len(s.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for a, b := range s.values {
+ hDPts[i].Attributes = a
+ hDPts[i].StartTime = s.start
+ hDPts[i].Time = t
+ hDPts[i].Count = b.count
+ hDPts[i].Bounds = bounds
+ hDPts[i].BucketCounts = b.counts
+
+ if !s.noSum {
+ hDPts[i].Sum = b.total
+ }
+
+ if !s.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(b.min)
+ hDPts[i].Max = metricdata.NewExtrema(b.max)
+ }
+
+ b.res.Collect(&hDPts[i].Exemplars)
+
+ // Unused attribute sets do not report.
+ delete(s.values, a)
+ i++
+ }
+ // The delta collection cycle resets.
+ s.start = t
+
+ h.DataPoints = hDPts
+ *dest = h
+
+ return n
+}
+
+func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+ // case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.Histogram[N])
+ h.Temporality = metricdata.CumulativeTemporality
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ // Do not allow modification of our copy of bounds.
+ bounds := make([]float64, len(s.bounds))
+ copy(bounds, s.bounds)
+
+ n := len(s.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for a, b := range s.values {
+ // The HistogramDataPoint field values returned need to be copies of
+ // the buckets value as we will keep updating them.
+ //
+ // TODO (#3047): Making copies for bounds and counts incurs a large
+ // memory allocation footprint. Alternatives should be explored.
+ counts := make([]uint64, len(b.counts))
+ copy(counts, b.counts)
+
+ hDPts[i].Attributes = a
+ hDPts[i].StartTime = s.start
+ hDPts[i].Time = t
+ hDPts[i].Count = b.count
+ hDPts[i].Bounds = bounds
+ hDPts[i].BucketCounts = counts
+
+ if !s.noSum {
+ hDPts[i].Sum = b.total
+ }
+
+ if !s.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(b.min)
+ hDPts[i].Max = metricdata.NewExtrema(b.max)
+ }
+
+ b.res.Collect(&hDPts[i].Exemplars)
+
+ i++
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ }
+
+ h.DataPoints = hDPts
+ *dest = h
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
new file mode 100644
index 00000000000..5699e728f1f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// datapoint is timestamped measurement data.
+type datapoint[N int64 | float64] struct {
+ timestamp time.Time
+ value N
+ res exemplar.Reservoir[N]
+}
+
+func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] {
+ return &lastValue[N]{
+ newRes: r,
+ limit: newLimiter[datapoint[N]](limit),
+ values: make(map[attribute.Set]datapoint[N]),
+ }
+}
+
+// lastValue summarizes a set of measurements as the last one made.
+type lastValue[N int64 | float64] struct {
+ sync.Mutex
+
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[datapoint[N]]
+ values map[attribute.Set]datapoint[N]
+}
+
+func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
+ s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ d, ok := s.values[attr]
+ if !ok {
+ d.res = s.newRes()
+ }
+
+ d.timestamp = t
+ d.value = value
+ d.res.Offer(ctx, t, value, droppedAttr)
+
+ s.values[attr] = d
+}
+
+func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ *dest = reset(*dest, n, n)
+
+ var i int
+ for a, v := range s.values {
+ (*dest)[i].Attributes = a
+ // The event time is the only meaningful timestamp, StartTime is
+ // ignored.
+ (*dest)[i].Time = v.timestamp
+ (*dest)[i].Value = v.value
+ v.res.Collect(&(*dest)[i].Exemplars)
+ // Do not report stale values.
+ delete(s.values, a)
+ i++
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
new file mode 100644
index 00000000000..d3de8427200
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// overflowSet is the attribute set used to record a measurement when adding
+// another distinct attribute set to the aggregate would exceed the aggregate
+// limit.
+var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
+
+// limiter limits aggregate values.
+type limiter[V any] struct {
+ // aggLimit is the maximum number of metric streams that can be aggregated.
+ //
+ // Any metric stream with attributes distinct from any set already
+ // aggregated once the aggLimit will be meet will instead be aggregated
+ // into an "overflow" metric stream. That stream will only contain the
+ // "otel.metric.overflow"=true attribute.
+ aggLimit int
+}
+
+// newLimiter returns a new Limiter with the provided aggregation limit.
+func newLimiter[V any](aggregation int) limiter[V] {
+ return limiter[V]{aggLimit: aggregation}
+}
+
+// Attributes checks if adding a measurement for attrs will exceed the
+// aggregation cardinality limit for the existing measurements. If it will,
+// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
+// limit is not set (limit <= 0), attr is returned.
+func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Set]V) attribute.Set {
+ if l.aggLimit > 0 {
+ _, exists := measurements[attrs]
+ if !exists && len(measurements) >= l.aggLimit-1 {
+ return overflowSet
+ }
+ }
+
+ return attrs
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
new file mode 100644
index 00000000000..02de2483f3b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -0,0 +1,250 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type sumValue[N int64 | float64] struct {
+ n N
+ res exemplar.Reservoir[N]
+}
+
+// valueMap is the storage for sums.
+type valueMap[N int64 | float64] struct {
+ sync.Mutex
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[sumValue[N]]
+ values map[attribute.Set]sumValue[N]
+}
+
+func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] {
+ return &valueMap[N]{
+ newRes: r,
+ limit: newLimiter[sumValue[N]](limit),
+ values: make(map[attribute.Set]sumValue[N]),
+ }
+}
+
+func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
+ s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ v, ok := s.values[attr]
+ if !ok {
+ v.res = s.newRes()
+ }
+
+ v.n += value
+ v.res.Offer(ctx, t, value, droppedAttr)
+
+ s.values[attr] = v
+}
+
+// newSum returns an aggregator that summarizes a set of measurements as their
+// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
+// the measurements were made in.
+func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] {
+ return &sum[N]{
+ valueMap: newValueMap[N](limit, r),
+ monotonic: monotonic,
+ start: now(),
+ }
+}
+
+// sum summarizes a set of measurements made as their arithmetic sum.
+type sum[N int64 | float64] struct {
+ *valueMap[N]
+
+ monotonic bool
+ start time.Time
+}
+
+func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.DeltaTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for attr, val := range s.values {
+ dPts[i].Attributes = attr
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = val.n
+ val.res.Collect(&dPts[i].Exemplars)
+ // Do not report stale values.
+ delete(s.values, attr)
+ i++
+ }
+ // The delta collection cycle resets.
+ s.start = t
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.CumulativeTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for attr, value := range s.values {
+ dPts[i].Attributes = attr
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = value.n
+ value.res.Collect(&dPts[i].Exemplars)
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ i++
+ }
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+// newPrecomputedSum returns an aggregator that summarizes a set of
+// observatrions as their arithmetic sum. Each sum is scoped by attributes and
+// the aggregation cycle the measurements were made in.
+func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] {
+ return &precomputedSum[N]{
+ valueMap: newValueMap[N](limit, r),
+ monotonic: monotonic,
+ start: now(),
+ }
+}
+
+// precomputedSum summarizes a set of observatrions as their arithmetic sum.
+type precomputedSum[N int64 | float64] struct {
+ *valueMap[N]
+
+ monotonic bool
+ start time.Time
+
+ reported map[attribute.Set]N
+}
+
+func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+ newReported := make(map[attribute.Set]N)
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.DeltaTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for attr, value := range s.values {
+ delta := value.n - s.reported[attr]
+
+ dPts[i].Attributes = attr
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = delta
+ value.res.Collect(&dPts[i].Exemplars)
+
+ newReported[attr] = value.n
+ // Unused attribute sets do not report.
+ delete(s.values, attr)
+ i++
+ }
+ // Unused attribute sets are forgotten.
+ s.reported = newReported
+ // The delta collection cycle resets.
+ s.start = t
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.CumulativeTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for attr, val := range s.values {
+ dPts[i].Attributes = attr
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = val.n
+ val.res.Collect(&dPts[i].Exemplars)
+
+ // Unused attribute sets do not report.
+ delete(s.values, attr)
+ i++
+ }
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
new file mode 100644
index 00000000000..3caeb542c57
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package exemplar provides an implementation of the OpenTelemetry exemplar
+// reservoir to be used in metric collection pipelines.
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
new file mode 100644
index 00000000000..39bf37b9e94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Drop returns a [Reservoir] that drops all measurements it is offered.
+func Drop[N int64 | float64]() Reservoir[N] { return &dropRes[N]{} }
+
+type dropRes[N int64 | float64] struct{}
+
+// Offer does nothing, all measurements offered will be dropped.
+func (r *dropRes[N]) Offer(context.Context, time.Time, N, []attribute.KeyValue) {}
+
+// Collect resets dest. No exemplars will ever be returned.
+func (r *dropRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+ *dest = (*dest)[:0]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
new file mode 100644
index 00000000000..4f5946fb966
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements
+// to r if the passed context associated with the measurement contains a sampled
+// [go.opentelemetry.io/otel/trace.SpanContext].
+func SampledFilter[N int64 | float64](r Reservoir[N]) Reservoir[N] {
+ return filtered[N]{Reservoir: r}
+}
+
+type filtered[N int64 | float64] struct {
+ Reservoir[N]
+}
+
+func (f filtered[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+ if trace.SpanContextFromContext(ctx).IsSampled() {
+ f.Reservoir.Offer(ctx, t, n, a)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
new file mode 100644
index 00000000000..6f4fe5524b1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "sort"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Histogram returns a [Reservoir] that samples the last measurement that falls
+// within a histogram bucket. The histogram bucket upper-boundaries are define
+// by bounds.
+//
+// The passed bounds will be sorted by this function.
+func Histogram[N int64 | float64](bounds []float64) Reservoir[N] {
+ sort.Float64s(bounds)
+ return &histRes[N]{
+ bounds: bounds,
+ storage: newStorage[N](len(bounds) + 1),
+ }
+}
+
+type histRes[N int64 | float64] struct {
+ *storage[N]
+
+ // bounds are bucket bounds in ascending order.
+ bounds []float64
+}
+
+func (r *histRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+ r.store[sort.SearchFloat64s(r.bounds, float64(n))] = newMeasurement(ctx, t, n, a)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
new file mode 100644
index 00000000000..7f9fda5b489
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
@@ -0,0 +1,195 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// rng is used to make sampling decisions.
+//
+// Do not use crypto/rand. There is no reason for the decrease in performance
+// given this is not a security sensitive decision.
+var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// random returns, as a float64, a uniform pseudo-random number in the open
+// interval (0.0,1.0).
+func random() float64 {
+ // TODO: This does not return a uniform number. rng.Float64 returns a
+ // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
+ // returns multiples of 2^-53, and not all floating point numbers between 0
+ // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand
+ // are always going to be 0).
+ //
+ // An alternative algorithm should be considered that will actually return
+ // a uniform number in the interval (0,1). For example, since the default
+ // rand source provides a uniform distribution for Int63, this can be
+ // converted following the prototypical code of Mersenne Twister 64 (Takuji
+ // Nishimura and Makoto Matsumoto:
+ // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c)
+ //
+ // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0)
+ //
+ // There are likely many other methods to explore here as well.
+
+ f := rng.Float64()
+ for f == 0 {
+ f = rng.Float64()
+ }
+ return f
+}
+
+// FixedSize returns a [Reservoir] that samples at most k exemplars. If there
+// are k or less measurements made, the Reservoir will sample each one. If
+// there are more than k, the Reservoir will then randomly sample all
+// additional measurement with a decreasing probability.
+func FixedSize[N int64 | float64](k int) Reservoir[N] {
+ r := &randRes[N]{storage: newStorage[N](k)}
+ r.reset()
+ return r
+}
+
+type randRes[N int64 | float64] struct {
+ *storage[N]
+
+ // count is the number of measurement seen.
+ count int64
+ // next is the next count that will store a measurement at a random index
+ // once the reservoir has been filled.
+ next int64
+ // w is the largest random number in a distribution that is used to compute
+ // the next next.
+ w float64
+}
+
+func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+ // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
+ // 1994). "Reservoir-Sampling Algorithms of Time Complexity
+ // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
+ // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435).
+ //
+ // A high-level overview of "Algorithm L":
+ // 0) Pre-calculate the random count greater than the storage size when
+ // an exemplar will be replaced.
+ // 1) Accept all measurements offered until the configured storage size is
+ // reached.
+ // 2) Loop:
+ // a) When the pre-calculate count is reached, replace a random
+ // existing exemplar with the offered measurement.
+ // b) Calculate the next random count greater than the existing one
+ // which will replace another exemplars
+ //
+ // The way a "replacement" count is computed is by looking at `n` number of
+ // independent random numbers each corresponding to an offered measurement.
+ // Of these numbers the smallest `k` (the same size as the storage
+ // capacity) of them are kept as a subset. The maximum value in this
+ // subset, called `w` is used to weight another random number generation
+ // for the next count that will be considered.
+ //
+ // By weighting the next count computation like described, it is able to
+ // perform a uniformly-weighted sampling algorithm based on the number of
+ // samples the reservoir has seen so far. The sampling will "slow down" as
+ // more and more samples are offered so as to reduce a bias towards those
+ // offered just prior to the end of the collection.
+ //
+ // This algorithm is preferred because of its balance of simplicity and
+ // performance. It will compute three random numbers (the bulk of
+ // computation time) for each item that becomes part of the reservoir, but
+ // it does not spend any time on items that do not. In particular it has an
+ // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of
+ // measurements offered and k is the reservoir size.
+ //
+ // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of
+ // this and other reservoir sampling algorithms. See
+ // https://github.com/MrAlias/reservoir-sampling for a performance
+ // comparison of reservoir sampling algorithms.
+
+ if int(r.count) < cap(r.store) {
+ r.store[r.count] = newMeasurement(ctx, t, n, a)
+ } else {
+ if r.count == r.next {
+ // Overwrite a random existing measurement with the one offered.
+ idx := int(rng.Int63n(int64(cap(r.store))))
+ r.store[idx] = newMeasurement(ctx, t, n, a)
+ r.advance()
+ }
+ }
+ r.count++
+}
+
+// reset resets r to the initial state.
+func (r *randRes[N]) reset() {
+ // This resets the number of exemplars known.
+ r.count = 0
+ // Random index inserts should only happen after the storage is full.
+ r.next = int64(cap(r.store))
+
+ // Initial random number in the series used to generate r.next.
+ //
+ // This is set before r.advance to reset or initialize the random number
+ // series. Without doing so it would always be 0 or never restart a new
+ // random number series.
+ //
+ // This maps the uniform random number in (0,1) to a geometric distribution
+ // over the same interval. The mean of the distribution is inversely
+ // proportional to the storage capacity.
+ r.w = math.Exp(math.Log(random()) / float64(cap(r.store)))
+
+ r.advance()
+}
+
+// advance updates the count at which the offered measurement will overwrite an
+// existing exemplar.
+func (r *randRes[N]) advance() {
+ // Calculate the next value in the random number series.
+ //
+ // The current value of r.w is based on the max of a distribution of random
+ // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity
+ // of the storage and each `u` in the interval (0,w)). To calculate the
+ // next r.w we use the fact that when the next exemplar is selected to be
+ // included in the storage an existing one will be dropped, and the
+ // corresponding random number in the set used to calculate r.w will also
+ // be replaced. The replacement random number will also be within (0,w),
+ // therefore the next r.w will be based on the same distribution (i.e.
+ // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
+ // computing the next random number `u` and take r.w as `w * u^(1/k)`.
+ r.w *= math.Exp(math.Log(random()) / float64(cap(r.store)))
+ // Use the new random number in the series to calculate the count of the
+ // next measurement that will be stored.
+ //
+ // Given 0 < r.w < 1, each iteration will result in subsequent r.w being
+ // smaller. This translates here into the next next being selected against
+ // a distribution with a higher mean (i.e. the expected value will increase
+ // and replacements become less likely)
+ //
+ // Important to note, the new r.next will always be at least 1 more than
+ // the last r.next.
+ r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1
+}
+
+func (r *randRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+ r.storage.Collect(dest)
+ // Call reset here even though it will reset r.count and restart the random
+ // number series. This will persist any old exemplars as long as no new
+ // measurements are offered, but it will also prioritize those new
+ // measurements that are made over the older collection cycle ones.
+ r.reset()
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
new file mode 100644
index 00000000000..7d5276a3411
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
@@ -0,0 +1,44 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Reservoir holds the sampled exemplar of measurements made.
+type Reservoir[N int64 | float64] interface {
+ // Offer accepts the parameters associated with a measurement. The
+ // parameters will be stored as an exemplar if the Reservoir decides to
+ // sample the measurement.
+ //
+ // The passed ctx needs to contain any baggage or span that were active
+ // when the measurement was made. This information may be used by the
+ // Reservoir in making a sampling decision.
+ //
+ // The time t is the time when the measurement was made. The val and attr
+ // parameters are the value and dropped (filtered) attributes of the
+ // measurement respectively.
+ Offer(ctx context.Context, t time.Time, val N, attr []attribute.KeyValue)
+
+ // Collect returns all the held exemplars.
+ //
+ // The Reservoir state is preserved after this call.
+ Collect(dest *[]metricdata.Exemplar[N])
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
new file mode 100644
index 00000000000..e2c2b90a351
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
@@ -0,0 +1,107 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// storage is an exemplar storage for [Reservoir] implementations.
+type storage[N int64 | float64] struct {
+ // store are the measurements sampled.
+ //
+ // This does not use []metricdata.Exemplar because it potentially would
+ // require an allocation for trace and span IDs in the hot path of Offer.
+ store []measurement[N]
+}
+
+func newStorage[N int64 | float64](n int) *storage[N] {
+ return &storage[N]{store: make([]measurement[N], n)}
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+ *dest = reset(*dest, len(r.store), len(r.store))
+ var n int
+ for _, m := range r.store {
+ if !m.valid {
+ continue
+ }
+
+ m.Exemplar(&(*dest)[n])
+ n++
+ }
+ *dest = (*dest)[:n]
+}
+
+// measurement is a measurement made by a telemetry system.
+type measurement[N int64 | float64] struct {
+ // FilteredAttributes are the attributes dropped during the measurement.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was made.
+ Time time.Time
+ // Value is the value of the measurement.
+ Value N
+ // SpanContext is the SpanContext active when a measurement was made.
+ SpanContext trace.SpanContext
+
+ valid bool
+}
+
+// newMeasurement returns a new non-empty Measurement.
+func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, droppedAttr []attribute.KeyValue) measurement[N] {
+ return measurement[N]{
+ FilteredAttributes: droppedAttr,
+ Time: ts,
+ Value: v,
+ SpanContext: trace.SpanContextFromContext(ctx),
+ valid: true,
+ }
+}
+
+// Exemplar returns m as a [metricdata.Exemplar].
+func (m measurement[N]) Exemplar(dest *metricdata.Exemplar[N]) {
+ dest.FilteredAttributes = m.FilteredAttributes
+ dest.Time = m.Time
+ dest.Value = m.Value
+
+ if m.SpanContext.HasTraceID() {
+ traceID := m.SpanContext.TraceID()
+ dest.TraceID = traceID[:]
+ } else {
+ dest.TraceID = dest.TraceID[:0]
+ }
+
+ if m.SpanContext.HasSpanID() {
+ spanID := m.SpanContext.SpanID()
+ dest.SpanID = spanID[:]
+ } else {
+ dest.SpanID = dest.SpanID[:0]
+ }
+}
+
+func reset[T any](s []T, length, capacity int) []T {
+ if cap(s) < capacity {
+ return make([]T, length, capacity)
+ }
+ return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
new file mode 100644
index 00000000000..9695492b0d1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
+
+// ReuseSlice returns a zeroed view of slice if its capacity is greater than or
+// equal to n. Otherwise, it returns a new []T with capacity equal to n.
+func ReuseSlice[T any](slice []T, n int) []T {
+ if cap(slice) >= n {
+ return slice[:n]
+ }
+ return make([]T, n)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
new file mode 100644
index 00000000000..aba69d65471
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
@@ -0,0 +1,112 @@
+# Experimental Features
+
+The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Cardinality Limit](#cardinality-limit)
+- [Exemplars](#exemplars)
+
+### Cardinality Limit
+
+The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value.
+The value must be an integer value.
+All other values are ignored.
+
+If the value set is less than or equal to `0`, no limit will be applied.
+
+#### Examples
+
+Set the cardinality limit to 2000.
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=2000
+```
+
+Set an infinite cardinality limit (functionally equivalent to disabling the feature).
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=-1
+```
+
+Disable the cardinality limit.
+
+```console
+unset OTEL_GO_X_CARDINALITY_LIMIT
+```
+
+### Exemplars
+
+A sample of measurements made may be exported directly as a set of exemplars.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
+The value of must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+Exemplar filters are a supported.
+The exemplar filter applies to all measurements made.
+They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
+
+To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
+The value must be the case-sensitive string defined by the [OpenTelemetry specification].
+
+- `"always_on"`: allows all measurements
+- `"always_off"`: denies all measurements
+- `"trace_based"`: allows only sampled measurements
+
+All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
+
+[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
+
+#### Examples
+
+Enable exemplars to be exported.
+
+```console
+export OTEL_GO_X_EXEMPLAR=true
+```
+
+Disable exemplars from being exported.
+
+```console
+unset OTEL_GO_X_EXEMPLAR
+```
+
+Set the exemplar filter to allow all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_on
+```
+
+Set the exemplar filter to deny all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_off
+```
+
+Set the exemplar filter to only allow sampled measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
+```
+
+Revert to the default exemplar filter (`"trace_based"`)
+
+```console
+unset OTEL_METRICS_EXEMPLAR_FILTER
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
new file mode 100644
index 00000000000..541160f9423
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package x contains support for OTel metric SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
+
+import (
+ "os"
+ "strconv"
+ "strings"
+)
+
+var (
+ // Exemplars is an experimental feature flag that defines if exemplars
+ // should be recorded for metric data-points.
+ //
+ // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable
+ // to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+ // will also enable this).
+ Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) {
+ if strings.ToLower(v) == "true" {
+ return v, true
+ }
+ return "", false
+ })
+
+ // CardinalityLimit is an experimental feature flag that defines if
+ // cardinality limits should be applied to the recorded metric data-points.
+ //
+ // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
+ // variable to the integer limit value you want to use.
+ //
+ // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
+ // will disable the cardinality limits.
+ CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
+ n, err := strconv.Atoi(v)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+ })
+)
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
new file mode 100644
index 00000000000..7d524de9ea1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
@@ -0,0 +1,214 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ManualReader is a simple Reader that allows an application to
+// read metrics on demand.
+type ManualReader struct {
+ sdkProducer atomic.Value
+ shutdownOnce sync.Once
+
+ mu sync.Mutex
+ isShutdown bool
+ externalProducers atomic.Value
+
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+}
+
+// Compile time check the manualReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&ManualReader{}: {}}
+
+// NewManualReader returns a Reader which is directly called to collect metrics.
+func NewManualReader(opts ...ManualReaderOption) *ManualReader {
+ cfg := newManualReaderConfig(opts)
+ r := &ManualReader{
+ temporalitySelector: cfg.temporalitySelector,
+ aggregationSelector: cfg.aggregationSelector,
+ }
+ r.externalProducers.Store(cfg.producers)
+ return r
+}
+
+// register stores the sdkProducer which enables the caller
+// to read metrics from the SDK on demand.
+func (mr *ManualReader) register(p sdkProducer) {
+ // Only register once. If producer is already set, do nothing.
+ if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+ msg := "did not register manual reader"
+ global.Error(errDuplicateRegister, msg)
+ }
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality {
+ return mr.temporalitySelector(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
+ return mr.aggregationSelector(kind)
+}
+
+// Shutdown closes any connections and frees any resources used by the reader.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Shutdown(context.Context) error {
+ err := ErrReaderShutdown
+ mr.shutdownOnce.Do(func() {
+ // Any future call to Collect will now return ErrReaderShutdown.
+ mr.sdkProducer.Store(produceHolder{
+ produce: shutdownProducer{}.produce,
+ })
+ mr.mu.Lock()
+ defer mr.mu.Unlock()
+ mr.isShutdown = true
+ // release references to Producer(s)
+ mr.externalProducers.Store([]Producer{})
+ err = nil
+ })
+ return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if rm == nil {
+ return errors.New("manual reader: *metricdata.ResourceMetrics is nil")
+ }
+ p := mr.sdkProducer.Load()
+ if p == nil {
+ return ErrReaderNotRegistered
+ }
+
+ ph, ok := p.(produceHolder)
+ if !ok {
+ // The atomic.Value is entirely in the periodicReader's control so
+ // this should never happen. In the unforeseen case that this does
+ // happen, return an error instead of panicking so a users code does
+ // not halt in the processes.
+ err := fmt.Errorf("manual reader: invalid producer: %T", p)
+ return err
+ }
+
+ err := ph.produce(ctx, rm)
+ if err != nil {
+ return err
+ }
+ var errs []error
+ for _, producer := range mr.externalProducers.Load().([]Producer) {
+ externalMetrics, err := producer.Produce(ctx)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+ }
+
+ global.Debug("ManualReader collection", "Data", rm)
+
+ return unifyErrors(errs)
+}
+
+// MarshalLog returns logging data about the ManualReader.
+func (r *ManualReader) MarshalLog() interface{} {
+ r.mu.Lock()
+ down := r.isShutdown
+ r.mu.Unlock()
+ return struct {
+ Type string
+ Registered bool
+ Shutdown bool
+ }{
+ Type: "ManualReader",
+ Registered: r.sdkProducer.Load() != nil,
+ Shutdown: down,
+ }
+}
+
+// manualReaderConfig contains configuration options for a ManualReader.
+type manualReaderConfig struct {
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+ producers []Producer
+}
+
+// newManualReaderConfig returns a manualReaderConfig configured with options.
+func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
+ cfg := manualReaderConfig{
+ temporalitySelector: DefaultTemporalitySelector,
+ aggregationSelector: DefaultAggregationSelector,
+ }
+ for _, opt := range opts {
+ cfg = opt.applyManual(cfg)
+ }
+ return cfg
+}
+
+// ManualReaderOption applies a configuration option value to a ManualReader.
+type ManualReaderOption interface {
+ applyManual(manualReaderConfig) manualReaderConfig
+}
+
+// WithTemporalitySelector sets the TemporalitySelector a reader will use to
+// determine the Temporality of an instrument based on its kind. If this
+// option is not used, the reader will use the DefaultTemporalitySelector.
+func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption {
+ return temporalitySelectorOption{selector: selector}
+}
+
+type temporalitySelectorOption struct {
+ selector func(instrument InstrumentKind) metricdata.Temporality
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig {
+ mrc.temporalitySelector = t.selector
+ return mrc
+}
+
+// WithAggregationSelector sets the AggregationSelector a reader will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// or the aggregation explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector AggregationSelector) ManualReaderOption {
+ return aggregationSelectorOption{selector: selector}
+}
+
+type aggregationSelectorOption struct {
+ selector AggregationSelector
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
+ c.aggregationSelector = t.selector
+ return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
new file mode 100644
index 00000000000..beb7876ec40
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
@@ -0,0 +1,690 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+// ErrInstrumentName indicates the created instrument has an invalid name.
+// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter.
+var ErrInstrumentName = errors.New("invalid instrument name")
+
+// meter handles the creation and coordination of all metric instruments. A
+// meter represents a single instrumentation scope; all metric telemetry
+// produced by an instrumentation scope will use metric instruments from a
+// single meter.
+type meter struct {
+ embedded.Meter
+
+ scope instrumentation.Scope
+ pipes pipelines
+
+ int64Insts *cacheWithErr[instID, *int64Inst]
+ float64Insts *cacheWithErr[instID, *float64Inst]
+ int64ObservableInsts *cacheWithErr[instID, int64Observable]
+ float64ObservableInsts *cacheWithErr[instID, float64Observable]
+
+ int64Resolver resolver[int64]
+ float64Resolver resolver[float64]
+}
+
+func newMeter(s instrumentation.Scope, p pipelines) *meter {
+ // viewCache ensures instrument conflicts, including number conflicts, this
+ // meter is asked to create are logged to the user.
+ var viewCache cache[string, instID]
+
+ var int64Insts cacheWithErr[instID, *int64Inst]
+ var float64Insts cacheWithErr[instID, *float64Inst]
+ var int64ObservableInsts cacheWithErr[instID, int64Observable]
+ var float64ObservableInsts cacheWithErr[instID, float64Observable]
+
+ return &meter{
+ scope: s,
+ pipes: p,
+ int64Insts: &int64Insts,
+ float64Insts: &float64Insts,
+ int64ObservableInsts: &int64ObservableInsts,
+ float64ObservableInsts: &float64ObservableInsts,
+ int64Resolver: newResolver[int64](p, &viewCache),
+ float64Resolver: newResolver[float64](p, &viewCache),
+ }
+}
+
+// Compile-time check meter implements metric.Meter.
+var _ metric.Meter = (*meter)(nil)
+
+// Int64Counter returns a new instrument identified by name and configured with
+// options. The instrument is used to synchronously record increasing int64
+// measurements during a computational operation.
+func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ cfg := metric.NewInt64CounterConfig(options...)
+ const kind = InstrumentKindCounter
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// int64 measurements during a computational operation.
+func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ cfg := metric.NewInt64UpDownCounterConfig(options...)
+ const kind = InstrumentKindUpDownCounter
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of int64 measurements during a computational operation.
+func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ cfg := metric.NewInt64HistogramConfig(options...)
+ p := int64InstProvider{m}
+ i, err := p.lookupHistogram(name, cfg)
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// int64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) {
+ inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.int64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+ for _, cback := range callbacks {
+ inst := int64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
+// Int64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+ cfg := metric.NewInt64ObservableCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableUpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// int64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+ cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ cfg := metric.NewInt64ObservableGaugeConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64Counter returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record increasing
+// float64 measurements during a computational operation.
+func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ cfg := metric.NewFloat64CounterConfig(options...)
+ const kind = InstrumentKindCounter
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// float64 measurements during a computational operation.
+func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ cfg := metric.NewFloat64UpDownCounterConfig(options...)
+ const kind = InstrumentKindUpDownCounter
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of float64 measurements during a computational operation.
+func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ cfg := metric.NewFloat64HistogramConfig(options...)
+ p := float64InstProvider{m}
+ i, err := p.lookupHistogram(name, cfg)
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// float64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) {
+ inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.float64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+ for _, cback := range callbacks {
+ inst := float64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
+// Float64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+ cfg := metric.NewFloat64ObservableCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableUpDownCounter returns a new instrument identified by name
+// and configured with options. The instrument is used to asynchronously record
+// float64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+ cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+ cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+func validateInstrumentName(name string) error {
+ if len(name) == 0 {
+ return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name)
+ }
+ if len(name) > 255 {
+ return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name)
+ }
+ if !isAlpha([]rune(name)[0]) {
+ return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name)
+ }
+ if len(name) == 1 {
+ return nil
+ }
+ for _, c := range name[1:] {
+ if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' {
+ return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name)
+ }
+ }
+ return nil
+}
+
+func isAlpha(c rune) bool {
+ return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
+}
+
+func isAlphanumeric(c rune) bool {
+ return isAlpha(c) || ('0' <= c && c <= '9')
+}
+
+func warnRepeatedObservableCallbacks(id Instrument) {
+ inst := fmt.Sprintf(
+ "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+ id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+ )
+ global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
+ "instrument", inst,
+ )
+}
+
+// RegisterCallback registers f to be called each collection cycle so it will
+// make observations for insts during those cycles.
+//
+// The only instruments f can make observations for are insts. All other
+// observations will be dropped and an error will be logged.
+//
+// Only instruments from this meter can be registered with f, an error is
+// returned if other instrument are provided.
+//
+// Only observations made in the callback will be exported. Unlike synchronous
+// instruments, asynchronous callbacks can "forget" attribute sets that are no
+// longer relevant by omitting the observation during the callback.
+//
+// The returned Registration can be used to unregister f.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
+ if len(insts) == 0 {
+ // Don't allocate a observer if not needed.
+ return noopRegister{}, nil
+ }
+
+ reg := newObserver()
+ var errs multierror
+ for _, inst := range insts {
+ // Unwrap any global.
+ if u, ok := inst.(interface {
+ Unwrap() metric.Observable
+ }); ok {
+ inst = u.Unwrap()
+ }
+
+ switch o := inst.(type) {
+ case int64Observable:
+ if err := o.registerable(m); err != nil {
+ if !errors.Is(err, errEmptyAgg) {
+ errs.append(err)
+ }
+ continue
+ }
+ reg.registerInt64(o.observablID)
+ case float64Observable:
+ if err := o.registerable(m); err != nil {
+ if !errors.Is(err, errEmptyAgg) {
+ errs.append(err)
+ }
+ continue
+ }
+ reg.registerFloat64(o.observablID)
+ default:
+ // Instrument external to the SDK.
+ return nil, fmt.Errorf("invalid observable: from different implementation")
+ }
+ }
+
+ err := errs.errorOrNil()
+ if reg.len() == 0 {
+ // All insts use drop aggregation or are invalid.
+ return noopRegister{}, err
+ }
+
+ // Some or all instruments were valid.
+ cback := func(ctx context.Context) error { return f(ctx, reg) }
+ return m.pipes.registerMultiCallback(cback), err
+}
+
+type observer struct {
+ embedded.Observer
+
+ float64 map[observablID[float64]]struct{}
+ int64 map[observablID[int64]]struct{}
+}
+
+func newObserver() observer {
+ return observer{
+ float64: make(map[observablID[float64]]struct{}),
+ int64: make(map[observablID[int64]]struct{}),
+ }
+}
+
+func (r observer) len() int {
+ return len(r.float64) + len(r.int64)
+}
+
+func (r observer) registerFloat64(id observablID[float64]) {
+ r.float64[id] = struct{}{}
+}
+
+func (r observer) registerInt64(id observablID[int64]) {
+ r.int64[id] = struct{}{}
+}
+
+var (
+ errUnknownObserver = errors.New("unknown observable instrument")
+ errUnregObserver = errors.New("observable instrument not registered for callback")
+)
+
+func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) {
+ var oImpl float64Observable
+ switch conv := o.(type) {
+ case float64Observable:
+ oImpl = conv
+ case interface {
+ Unwrap() metric.Observable
+ }:
+ // Unwrap any global.
+ async := conv.Unwrap()
+ var ok bool
+ if oImpl, ok = async.(float64Observable); !ok {
+ global.Error(errUnknownObserver, "failed to record asynchronous")
+ return
+ }
+ default:
+ global.Error(errUnknownObserver, "failed to record")
+ return
+ }
+
+ if _, registered := r.float64[oImpl.observablID]; !registered {
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", float64(0)),
+ )
+ }
+ return
+ }
+ c := metric.NewObserveConfig(opts)
+ oImpl.observe(v, c.Attributes())
+}
+
+func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
+ var oImpl int64Observable
+ switch conv := o.(type) {
+ case int64Observable:
+ oImpl = conv
+ case interface {
+ Unwrap() metric.Observable
+ }:
+ // Unwrap any global.
+ async := conv.Unwrap()
+ var ok bool
+ if oImpl, ok = async.(int64Observable); !ok {
+ global.Error(errUnknownObserver, "failed to record asynchronous")
+ return
+ }
+ default:
+ global.Error(errUnknownObserver, "failed to record")
+ return
+ }
+
+ if _, registered := r.int64[oImpl.observablID]; !registered {
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", int64(0)),
+ )
+ }
+ return
+ }
+ c := metric.NewObserveConfig(opts)
+ oImpl.observe(v, c.Attributes())
+}
+
+type noopRegister struct{ embedded.Registration }
+
+func (noopRegister) Unregister() error {
+ return nil
+}
+
+// int64InstProvider provides int64 OpenTelemetry instruments.
+type int64InstProvider struct{ *meter }
+
+func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) {
+ inst := Instrument{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ Scope: p.scope,
+ }
+ return p.int64Resolver.Aggregators(inst)
+}
+
+func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) {
+ boundaries := cfg.ExplicitBucketBoundaries()
+ aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+ if aggError != nil {
+ // If boundaries are invalid, ignore them.
+ boundaries = nil
+ }
+ inst := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ Scope: p.scope,
+ }
+ measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries)
+ return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
+ return p.meter.int64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &int64Inst{measures: aggs}, err
+ })
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
+ return p.meter.int64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &int64Inst{measures: aggs}, err
+ })
+}
+
+// float64InstProvider provides float64 OpenTelemetry instruments.
+type float64InstProvider struct{ *meter }
+
+func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) {
+ inst := Instrument{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ Scope: p.scope,
+ }
+ return p.float64Resolver.Aggregators(inst)
+}
+
+func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) {
+ boundaries := cfg.ExplicitBucketBoundaries()
+ aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+ if aggError != nil {
+ // If boundaries are invalid, ignore them.
+ boundaries = nil
+ }
+ inst := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ Scope: p.scope,
+ }
+ measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries)
+ return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
+ return p.meter.float64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &float64Inst{measures: aggs}, err
+ })
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
+ return p.meter.float64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &float64Inst{measures: aggs}, err
+ })
+}
+
+type int64Observer struct {
+ embedded.Int64Observer
+ measures[int64]
+}
+
+func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
+ c := metric.NewObserveConfig(opts)
+ o.observe(val, c.Attributes())
+}
+
+type float64Observer struct {
+ embedded.Float64Observer
+ measures[float64]
+}
+
+func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {
+ c := metric.NewObserveConfig(opts)
+ o.observe(val, c.Attributes())
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
new file mode 100644
index 00000000000..32c17934fc4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
@@ -0,0 +1,307 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+// ResourceMetrics is a collection of ScopeMetrics and the associated Resource
+// that created them.
+type ResourceMetrics struct {
+ // Resource represents the entity that collected the metrics.
+ Resource *resource.Resource
+ // ScopeMetrics are the collection of metrics with unique Scopes.
+ ScopeMetrics []ScopeMetrics
+}
+
+// ScopeMetrics is a collection of Metrics Produces by a Meter.
+type ScopeMetrics struct {
+ // Scope is the Scope that the Meter was created with.
+ Scope instrumentation.Scope
+ // Metrics are a list of aggregations created by the Meter.
+ Metrics []Metrics
+}
+
+// Metrics is a collection of one or more aggregated timeseries from an Instrument.
+type Metrics struct {
+ // Name is the name of the Instrument that created this data.
+ Name string
+ // Description is the description of the Instrument, which can be used in documentation.
+ Description string
+ // Unit is the unit in which the Instrument reports.
+ Unit string
+ // Data is the aggregated data from an Instrument.
+ Data Aggregation
+}
+
+// Aggregation is the store of data reported by an Instrument.
+// It will be one of: Gauge, Sum, Histogram.
+type Aggregation interface {
+ privateAggregation()
+}
+
+// Gauge represents a measurement of the current value of an instrument.
+type Gauge[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []DataPoint[N]
+}
+
+func (Gauge[N]) privateAggregation() {}
+
+// Sum represents the sum of all measurements of values from an instrument.
+type Sum[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []DataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+ // IsMonotonic represents if this aggregation only increases or decreases.
+ IsMonotonic bool
+}
+
+func (Sum[N]) privateAggregation() {}
+
+// DataPoint is a single data point in a timeseries.
+type DataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started. (optional)
+ StartTime time.Time `json:",omitempty"`
+ // Time is the time when the timeseries was recorded. (optional)
+ Time time.Time `json:",omitempty"`
+ // Value is the value of this data point.
+ Value N
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// Histogram represents the histogram of all measurements of values from an instrument.
+type Histogram[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []HistogramDataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+}
+
+func (Histogram[N]) privateAggregation() {}
+
+// HistogramDataPoint is a single histogram data point in a timeseries.
+type HistogramDataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this histogram has been calculated with.
+ Count uint64
+ // Bounds are the upper bounds of the buckets of the histogram. Because the
+ // last boundary is +infinity this one is implied.
+ Bounds []float64
+ // BucketCounts is the count of each of the buckets.
+ BucketCounts []uint64
+
+ // Min is the minimum value recorded. (optional)
+ Min Extrema[N]
+ // Max is the maximum value recorded. (optional)
+ Max Extrema[N]
+ // Sum is the sum of the values recorded.
+ Sum N
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialHistogram represents the histogram of all measurements of values from an instrument.
+type ExponentialHistogram[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // attributes.
+ DataPoints []ExponentialHistogramDataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+}
+
+func (ExponentialHistogram[N]) privateAggregation() {}
+
+// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries.
+type ExponentialHistogramDataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this histogram has been calculated with.
+ Count uint64
+ // Min is the minimum value recorded. (optional)
+ Min Extrema[N]
+ // Max is the maximum value recorded. (optional)
+ Max Extrema[N]
+ // Sum is the sum of the values recorded.
+ Sum N
+
+ // Scale describes the resolution of the histogram. Boundaries are
+ // located at powers of the base, where:
+ //
+ // base = 2 ^ (2 ^ -Scale)
+ Scale int32
+ // ZeroCount is the number of values whose absolute value
+ // is less than or equal to [ZeroThreshold].
+ // When ZeroThreshold is 0, this is the number of values that
+ // cannot be expressed using the standard exponential formula
+ // as well as values that have been rounded to zero.
+ // ZeroCount represents the special zero count bucket.
+ ZeroCount uint64
+
+ // PositiveBucket is range of positive value bucket counts.
+ PositiveBucket ExponentialBucket
+ // NegativeBucket is range of negative value bucket counts.
+ NegativeBucket ExponentialBucket
+
+ // ZeroThreshold is the width of the zero region. Where the zero region is
+ // defined as the closed interval [-ZeroThreshold, ZeroThreshold].
+ ZeroThreshold float64
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialBucket are a set of bucket counts, encoded in a contiguous array
+// of counts.
+type ExponentialBucket struct {
+ // Offset is the bucket index of the first entry in the Counts slice.
+ Offset int32
+ // Counts is an slice where Counts[i] carries the count of the bucket at
+ // index (Offset+i). Counts[i] is the count of values greater than
+ // base^(Offset+i) and less than or equal to base^(Offset+i+1).
+ Counts []uint64
+}
+
+// Extrema is the minimum or maximum value of a dataset.
+type Extrema[N int64 | float64] struct {
+ value N
+ valid bool
+}
+
+// MarshalText converts the Extrema value to text.
+func (e Extrema[N]) MarshalText() ([]byte, error) {
+ if !e.valid {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(e.value)
+}
+
+// MarshalJSON converts the Extrema value to JSON number.
+func (e *Extrema[N]) MarshalJSON() ([]byte, error) {
+ return e.MarshalText()
+}
+
+// NewExtrema returns an Extrema set to v.
+func NewExtrema[N int64 | float64](v N) Extrema[N] {
+ return Extrema[N]{value: v, valid: true}
+}
+
+// Value returns the Extrema value and true if the Extrema is defined.
+// Otherwise, if the Extrema is its zero-value, defined will be false.
+func (e Extrema[N]) Value() (v N, defined bool) {
+ return e.value, e.valid
+}
+
+// Exemplar is a measurement sampled from a timeseries providing a typical
+// example.
+type Exemplar[N int64 | float64] struct {
+ // FilteredAttributes are the attributes recorded with the measurement but
+ // filtered out of the timeseries' aggregated data.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was recorded.
+ Time time.Time
+ // Value is the measured value.
+ Value N
+ // SpanID is the ID of the span that was active during the measurement. If
+ // no span was active or the span was not sampled this will be empty.
+ SpanID []byte `json:",omitempty"`
+ // TraceID is the ID of the trace the active span belonged to during the
+ // measurement. If no span was active or the span was not sampled this will
+ // be empty.
+ TraceID []byte `json:",omitempty"`
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// data type.
+//
+// These data points cannot always be merged in a meaningful way. The Summary
+// type is only used by bridges from other metrics libraries, and cannot be
+// produced using OpenTelemetry instrumentation.
+type Summary struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // attributes.
+ DataPoints []SummaryDataPoint
+}
+
+func (Summary) privateAggregation() {}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this summary has been calculated with.
+ Count uint64
+
+ // Sum is the sum of the values recorded.
+ Sum float64
+
+ // (Optional) list of values at different quantiles of the distribution calculated
+ // from the current snapshot. The quantiles must be strictly increasing.
+ QuantileValues []QuantileValue
+}
+
+// QuantileValue is the value at a given quantile of a summary.
+type QuantileValue struct {
+ // Quantile is the quantile of this value.
+ //
+ // Must be in the interval [0.0, 1.0].
+ Quantile float64
+
+ // Value is the value at the given quantile of a summary.
+ //
+ // Quantile values must NOT be negative.
+ Value float64
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
new file mode 100644
index 00000000000..9fceb18cba7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
@@ -0,0 +1,41 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate stringer -type=Temporality
+
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+// Temporality defines the window that an aggregation was calculated over.
+type Temporality uint8
+
+const (
+ // undefinedTemporality represents an unset Temporality.
+ //nolint:deadcode,unused,varcheck
+ undefinedTemporality Temporality = iota
+
+ // CumulativeTemporality defines a measurement interval that continues to
+ // expand forward in time from a starting point. New measurements are
+ // added to all previous measurements since a start time.
+ CumulativeTemporality
+
+ // DeltaTemporality defines a measurement interval that resets each cycle.
+ // Measurements from one cycle are recorded independently, measurements
+ // from other cycles do not affect them.
+ DeltaTemporality
+)
+
+// MarshalText returns the byte encoded of t.
+func (t Temporality) MarshalText() ([]byte, error) {
+ return []byte(t.String()), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
new file mode 100644
index 00000000000..4da833cdce2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
+
+package metricdata
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[undefinedTemporality-0]
+ _ = x[CumulativeTemporality-1]
+ _ = x[DeltaTemporality-2]
+}
+
+const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality"
+
+var _Temporality_index = [...]uint8{0, 20, 41, 57}
+
+func (i Temporality) String() string {
+ if i >= Temporality(len(_Temporality_index)-1) {
+ return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
new file mode 100644
index 00000000000..ff86999c759
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
@@ -0,0 +1,381 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Default periodic reader timing.
+const (
+ defaultTimeout = time.Millisecond * 30000
+ defaultInterval = time.Millisecond * 60000
+)
+
+// periodicReaderConfig contains configuration options for a PeriodicReader.
+type periodicReaderConfig struct {
+ interval time.Duration
+ timeout time.Duration
+ producers []Producer
+}
+
+// newPeriodicReaderConfig returns a periodicReaderConfig configured with
+// options.
+func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
+ c := periodicReaderConfig{
+ interval: envDuration(envInterval, defaultInterval),
+ timeout: envDuration(envTimeout, defaultTimeout),
+ }
+ for _, o := range options {
+ c = o.applyPeriodic(c)
+ }
+ return c
+}
+
+// PeriodicReaderOption applies a configuration option value to a PeriodicReader.
+type PeriodicReaderOption interface {
+ applyPeriodic(periodicReaderConfig) periodicReaderConfig
+}
+
+// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig.
+type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig
+
+// applyPeriodic returns a periodicReaderConfig with option(s) applied.
+func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig {
+ return o(conf)
+}
+
+// WithTimeout configures the time a PeriodicReader waits for an export to
+// complete before canceling it. This includes an export which occurs as part
+// of Shutdown or ForceFlush if the user passed context does not have a
+// deadline. If the user passed context does have a deadline, it will be used
+// instead.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_TIMEOUT environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 30 seconds
+// is used as the default.
+func WithTimeout(d time.Duration) PeriodicReaderOption {
+ return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+ if d <= 0 {
+ return conf
+ }
+ conf.timeout = d
+ return conf
+ })
+}
+
+// WithInterval configures the intervening time between exports for a
+// PeriodicReader.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_INTERVAL environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 60 seconds
+// is used as the default.
+func WithInterval(d time.Duration) PeriodicReaderOption {
+ return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+ if d <= 0 {
+ return conf
+ }
+ conf.interval = d
+ return conf
+ })
+}
+
+// NewPeriodicReader returns a Reader that collects and exports metric data to
+// the exporter at a defined interval. By default, the returned Reader will
+// collect and export data every 60 seconds, and will cancel any attempts that
+// exceed 30 seconds, collect and export combined. The collect and export time
+// are not counted towards the interval between attempts.
+//
+// The Collect method of the returned Reader continues to gather and return
+// metric data to the user. It will not automatically send that data to the
+// exporter. That is left to the user to accomplish.
+func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader {
+ conf := newPeriodicReaderConfig(options)
+ ctx, cancel := context.WithCancel(context.Background())
+ r := &PeriodicReader{
+ interval: conf.interval,
+ timeout: conf.timeout,
+ exporter: exporter,
+ flushCh: make(chan chan error),
+ cancel: cancel,
+ done: make(chan struct{}),
+ rmPool: sync.Pool{
+ New: func() interface{} {
+ return &metricdata.ResourceMetrics{}
+ },
+ },
+ }
+ r.externalProducers.Store(conf.producers)
+
+ go func() {
+ defer func() { close(r.done) }()
+ r.run(ctx, conf.interval)
+ }()
+
+ return r
+}
+
+// PeriodicReader is a Reader that continuously collects and exports metric
+// data at a set interval.
+type PeriodicReader struct {
+ sdkProducer atomic.Value
+
+ mu sync.Mutex
+ isShutdown bool
+ externalProducers atomic.Value
+
+ interval time.Duration
+ timeout time.Duration
+ exporter Exporter
+ flushCh chan chan error
+
+ done chan struct{}
+ cancel context.CancelFunc
+ shutdownOnce sync.Once
+
+ rmPool sync.Pool
+}
+
+// Compile time check the periodicReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&PeriodicReader{}: {}}
+
+// newTicker allows testing override.
+var newTicker = time.NewTicker
+
+// run continuously collects and exports metric data at the specified
+// interval. This will run until ctx is canceled or times out.
+func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) {
+ ticker := newTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ err := r.collectAndExport(ctx)
+ if err != nil {
+ otel.Handle(err)
+ }
+ case errCh := <-r.flushCh:
+ errCh <- r.collectAndExport(ctx)
+ ticker.Reset(interval)
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// register registers p as the producer of this reader.
+func (r *PeriodicReader) register(p sdkProducer) {
+ // Only register once. If producer is already set, do nothing.
+ if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+ msg := "did not register periodic reader"
+ global.Error(errDuplicateRegister, msg)
+ }
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality {
+ return r.exporter.Temporality(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
+ return r.exporter.Aggregation(kind)
+}
+
+// collectAndExport gather all metric data related to the periodicReader r from
+// the SDK and exports it with r's exporter.
+func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(ctx, r.timeout)
+ defer cancel()
+
+ // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
+ rm := r.rmPool.Get().(*metricdata.ResourceMetrics)
+ err := r.Collect(ctx, rm)
+ if err == nil {
+ err = r.export(ctx, rm)
+ }
+ r.rmPool.Put(rm)
+ return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm. The metric
+// data is not exported to the configured exporter, it is left to the caller to
+// handle that if desired.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if rm == nil {
+ return errors.New("periodic reader: *metricdata.ResourceMetrics is nil")
+ }
+ // TODO (#3047): When collect is updated to accept output as param, pass rm.
+ return r.collect(ctx, r.sdkProducer.Load(), rm)
+}
+
+// collect unwraps p as a produceHolder and returns its produce results.
+func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error {
+ if p == nil {
+ return ErrReaderNotRegistered
+ }
+
+ ph, ok := p.(produceHolder)
+ if !ok {
+ // The atomic.Value is entirely in the periodicReader's control so
+ // this should never happen. In the unforeseen case that this does
+ // happen, return an error instead of panicking so a users code does
+ // not halt in the processes.
+ err := fmt.Errorf("periodic reader: invalid producer: %T", p)
+ return err
+ }
+
+ err := ph.produce(ctx, rm)
+ if err != nil {
+ return err
+ }
+ var errs []error
+ for _, producer := range r.externalProducers.Load().([]Producer) {
+ externalMetrics, err := producer.Produce(ctx)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+ }
+
+ global.Debug("PeriodicReader collection", "Data", rm)
+
+ return unifyErrors(errs)
+}
+
+// export exports metric data m using r's exporter.
+func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error {
+ return r.exporter.Export(ctx, m)
+}
+
+// ForceFlush flushes pending telemetry.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
+ // Prioritize the ctx timeout if it is set.
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, r.timeout)
+ defer cancel()
+ }
+
+ errCh := make(chan error, 1)
+ select {
+ case r.flushCh <- errCh:
+ select {
+ case err := <-errCh:
+ if err != nil {
+ return err
+ }
+ close(errCh)
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ case <-r.done:
+ return ErrReaderShutdown
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return r.exporter.ForceFlush(ctx)
+}
+
+// Shutdown flushes pending telemetry and then stops the export pipeline.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Shutdown(ctx context.Context) error {
+ err := ErrReaderShutdown
+ r.shutdownOnce.Do(func() {
+ // Prioritize the ctx timeout if it is set.
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, r.timeout)
+ defer cancel()
+ }
+
+ // Stop the run loop.
+ r.cancel()
+ <-r.done
+
+ // Any future call to Collect will now return ErrReaderShutdown.
+ ph := r.sdkProducer.Swap(produceHolder{
+ produce: shutdownProducer{}.produce,
+ })
+
+ if ph != nil { // Reader was registered.
+ // Flush pending telemetry.
+ m := r.rmPool.Get().(*metricdata.ResourceMetrics)
+ err = r.collect(ctx, ph, m)
+ if err == nil {
+ err = r.export(ctx, m)
+ }
+ r.rmPool.Put(m)
+ }
+
+ sErr := r.exporter.Shutdown(ctx)
+ if err == nil || err == ErrReaderShutdown {
+ err = sErr
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.isShutdown = true
+ // release references to Producer(s)
+ r.externalProducers.Store([]Producer{})
+ })
+ return err
+}
+
+// MarshalLog returns logging data about the PeriodicReader.
+func (r *PeriodicReader) MarshalLog() interface{} {
+ r.mu.Lock()
+ down := r.isShutdown
+ r.mu.Unlock()
+ return struct {
+ Type string
+ Exporter Exporter
+ Registered bool
+ Shutdown bool
+ Interval time.Duration
+ Timeout time.Duration
+ }{
+ Type: "PeriodicReader",
+ Exporter: r.exporter,
+ Registered: r.sdkProducer.Load() != nil,
+ Shutdown: down,
+ Interval: r.interval,
+ Timeout: r.timeout,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
new file mode 100644
index 00000000000..da39ab961c1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
@@ -0,0 +1,658 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "container/list"
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric/internal"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+ "go.opentelemetry.io/otel/sdk/metric/internal/x"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+var (
+ errCreatingAggregators = errors.New("could not create all aggregators")
+ errIncompatibleAggregation = errors.New("incompatible aggregation")
+ errUnknownAggregation = errors.New("unrecognized aggregation")
+)
+
+// instrumentSync is a synchronization point between a pipeline and an
+// instrument's aggregate function.
+type instrumentSync struct {
+ name string
+ description string
+ unit string
+ compAgg aggregate.ComputeAggregation
+}
+
+func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline {
+ if res == nil {
+ res = resource.Empty()
+ }
+ return &pipeline{
+ resource: res,
+ reader: reader,
+ views: views,
+ // aggregations is lazy allocated when needed.
+ }
+}
+
+// pipeline connects all of the instruments created by a meter provider to a Reader.
+// This is the object that will be `Reader.register()` when a meter provider is created.
+//
+// As instruments are created the instrument should be checked if it exists in
+// the views of a the Reader, and if so each aggregate function should be added
+// to the pipeline.
+type pipeline struct {
+ resource *resource.Resource
+
+ reader Reader
+ views []View
+
+ sync.Mutex
+ aggregations map[instrumentation.Scope][]instrumentSync
+ callbacks []func(context.Context) error
+ multiCallbacks list.List
+}
+
+// addSync adds the instrumentSync to pipeline p with scope. This method is not
+// idempotent. Duplicate calls will result in duplicate additions, it is the
+// callers responsibility to ensure this is called with unique values.
+func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
+ p.Lock()
+ defer p.Unlock()
+ if p.aggregations == nil {
+ p.aggregations = map[instrumentation.Scope][]instrumentSync{
+ scope: {iSync},
+ }
+ return
+ }
+ p.aggregations[scope] = append(p.aggregations[scope], iSync)
+}
+
+type multiCallback func(context.Context) error
+
+// addMultiCallback registers a multi-instrument callback to be run when
+// `produce()` is called.
+func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
+ p.Lock()
+ defer p.Unlock()
+ e := p.multiCallbacks.PushBack(c)
+ return func() {
+ p.Lock()
+ p.multiCallbacks.Remove(e)
+ p.Unlock()
+ }
+}
+
+// produce returns aggregated metrics from a single collection.
+//
+// This method is safe to call concurrently.
+func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ p.Lock()
+ defer p.Unlock()
+
+ var errs multierror
+ for _, c := range p.callbacks {
+ // TODO make the callbacks parallel. ( #3034 )
+ if err := c(ctx); err != nil {
+ errs.append(err)
+ }
+ if err := ctx.Err(); err != nil {
+ rm.Resource = nil
+ rm.ScopeMetrics = rm.ScopeMetrics[:0]
+ return err
+ }
+ }
+ for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
+ // TODO make the callbacks parallel. ( #3034 )
+ f := e.Value.(multiCallback)
+ if err := f(ctx); err != nil {
+ errs.append(err)
+ }
+ if err := ctx.Err(); err != nil {
+ // This means the context expired before we finished running callbacks.
+ rm.Resource = nil
+ rm.ScopeMetrics = rm.ScopeMetrics[:0]
+ return err
+ }
+ }
+
+ rm.Resource = p.resource
+ rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations))
+
+ i := 0
+ for scope, instruments := range p.aggregations {
+ rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments))
+ j := 0
+ for _, inst := range instruments {
+ data := rm.ScopeMetrics[i].Metrics[j].Data
+ if n := inst.compAgg(&data); n > 0 {
+ rm.ScopeMetrics[i].Metrics[j].Name = inst.name
+ rm.ScopeMetrics[i].Metrics[j].Description = inst.description
+ rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit
+ rm.ScopeMetrics[i].Metrics[j].Data = data
+ j++
+ }
+ }
+ rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j]
+ if len(rm.ScopeMetrics[i].Metrics) > 0 {
+ rm.ScopeMetrics[i].Scope = scope
+ i++
+ }
+ }
+
+ rm.ScopeMetrics = rm.ScopeMetrics[:i]
+
+ return errs.errorOrNil()
+}
+
+// inserter facilitates inserting of new instruments from a single scope into a
+// pipeline.
+type inserter[N int64 | float64] struct {
+ // aggregators is a cache that holds aggregate function inputs whose
+ // outputs have been inserted into the underlying reader pipeline. This
+ // cache ensures no duplicate aggregate functions are inserted into the
+ // reader pipeline and if a new request during an instrument creation asks
+ // for the same aggregate function input the same instance is returned.
+ aggregators *cache[instID, aggVal[N]]
+
+ // views is a cache that holds instrument identifiers for all the
+ // instruments a Meter has created, it is provided from the Meter that owns
+ // this inserter. This cache ensures during the creation of instruments
+ // with the same name but different options (e.g. description, unit) a
+ // warning message is logged.
+ views *cache[string, instID]
+
+ pipeline *pipeline
+}
+
+func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] {
+ if vc == nil {
+ vc = &cache[string, instID]{}
+ }
+ return &inserter[N]{
+ aggregators: &cache[instID, aggVal[N]]{},
+ views: vc,
+ pipeline: p,
+ }
+}
+
+// Instrument inserts the instrument inst with instUnit into a pipeline. All
+// views the pipeline contains are matched against, and any matching view that
+// creates a unique aggregate function will have its output inserted into the
+// pipeline and its input included in the returned slice.
+//
+// The returned aggregate function inputs are ensured to be deduplicated and
+// unique. If another view in another pipeline that is cached by this
+// inserter's cache has already inserted the same aggregate function for the
+// same instrument, that functions input instance is returned.
+//
+// If another instrument has already been inserted by this inserter, or any
+// other using the same cache, and it conflicts with the instrument being
+// inserted in this call, an aggregate function input matching the arguments
+// will still be returned but an Info level log message will also be logged to
+// the OTel global logger.
+//
+// If the passed instrument would result in an incompatible aggregate function,
+// an error is returned and that aggregate function output is not inserted nor
+// is its input returned.
+//
+// If an instrument is determined to use a Drop aggregation, that instrument is
+// not inserted nor returned.
+func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) {
+ var (
+ matched bool
+ measures []aggregate.Measure[N]
+ )
+
+ errs := &multierror{wrapped: errCreatingAggregators}
+ seen := make(map[uint64]struct{})
+ for _, v := range i.pipeline.views {
+ stream, match := v(inst)
+ if !match {
+ continue
+ }
+ matched = true
+ in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+ if err != nil {
+ errs.append(err)
+ }
+ if in == nil { // Drop aggregation.
+ continue
+ }
+ if _, ok := seen[id]; ok {
+ // This aggregate function has already been added.
+ continue
+ }
+ seen[id] = struct{}{}
+ measures = append(measures, in)
+ }
+
+ if matched {
+ return measures, errs.errorOrNil()
+ }
+
+ // Apply implicit default view if no explicit matched.
+ stream := Stream{
+ Name: inst.Name,
+ Description: inst.Description,
+ Unit: inst.Unit,
+ }
+ in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+ if err != nil {
+ errs.append(err)
+ }
+ if in != nil {
+ // Ensured to have not seen given matched was false.
+ measures = append(measures, in)
+ }
+ return measures, errs.errorOrNil()
+}
+
+// addCallback registers a single instrument callback to be run when
+// `produce()` is called.
+func (i *inserter[N]) addCallback(cback func(context.Context) error) {
+ i.pipeline.Lock()
+ defer i.pipeline.Unlock()
+ i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
+}
+
+var aggIDCount uint64
+
+// aggVal is the cached value in an aggregators cache.
+type aggVal[N int64 | float64] struct {
+ ID uint64
+ Measure aggregate.Measure[N]
+ Err error
+}
+
+// readerDefaultAggregation returns the default aggregation for the instrument
+// kind based on the reader's aggregation preferences. This is used unless the
+// aggregation is overridden with a view.
+func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation {
+ aggregation := i.pipeline.reader.aggregation(kind)
+ switch aggregation.(type) {
+ case nil, AggregationDefault:
+ // If the reader returns default or nil use the default selector.
+ aggregation = DefaultAggregationSelector(kind)
+ default:
+ // Deep copy and validate before using.
+ aggregation = aggregation.copy()
+ if err := aggregation.err(); err != nil {
+ orig := aggregation
+ aggregation = DefaultAggregationSelector(kind)
+ global.Error(
+ err, "using default aggregation instead",
+ "aggregation", orig,
+ "replacement", aggregation,
+ )
+ }
+ }
+ return aggregation
+}
+
+// cachedAggregator returns the appropriate aggregate input and output
+// functions for an instrument configuration. If the exact instrument has been
+// created within the inst.Scope, those aggregate function instances will be
+// returned. Otherwise, new computed aggregate functions will be cached and
+// returned.
+//
+// If the instrument configuration conflicts with an instrument that has
+// already been created (e.g. description, unit, data type) a warning will be
+// logged at the "Info" level with the global OTel logger. Valid new aggregate
+// functions for the instrument configuration will still be returned without an
+// error.
+//
+// If the instrument defines an unknown or incompatible aggregation, an error
+// is returned.
+func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) {
+ switch stream.Aggregation.(type) {
+ case nil:
+ // The aggregation was not overridden with a view. Use the aggregation
+ // provided by the reader.
+ stream.Aggregation = readerAggregation
+ case AggregationDefault:
+ // The view explicitly requested the default aggregation.
+ stream.Aggregation = DefaultAggregationSelector(kind)
+ }
+
+ if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
+ return nil, 0, fmt.Errorf(
+ "creating aggregator with instrumentKind: %d, aggregation %v: %w",
+ kind, stream.Aggregation, err,
+ )
+ }
+
+ id := i.instID(kind, stream)
+ // If there is a conflict, the specification says the view should
+ // still be applied and a warning should be logged.
+ i.logConflict(id)
+
+ // If there are requests for the same instrument with different name
+ // casing, the first-seen needs to be returned. Use a normalize ID for the
+ // cache lookup to ensure the correct comparison.
+ normID := id.normalize()
+ cv := i.aggregators.Lookup(normID, func() aggVal[N] {
+ b := aggregate.Builder[N]{
+ Temporality: i.pipeline.reader.temporality(kind),
+ ReservoirFunc: reservoirFunc[N](stream.Aggregation),
+ }
+ b.Filter = stream.AttributeFilter
+ // A value less than or equal to zero will disable the aggregation
+ // limits for the builder (an all the created aggregates).
+ // CardinalityLimit.Lookup returns 0 by default if unset (or
+ // unrecognized input). Use that value directly.
+ b.AggregationLimit, _ = x.CardinalityLimit.Lookup()
+
+ in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
+ if err != nil {
+ return aggVal[N]{0, nil, err}
+ }
+ if in == nil { // Drop aggregator.
+ return aggVal[N]{0, nil, nil}
+ }
+ i.pipeline.addSync(scope, instrumentSync{
+ // Use the first-seen name casing for this and all subsequent
+ // requests of this instrument.
+ name: stream.Name,
+ description: stream.Description,
+ unit: stream.Unit,
+ compAgg: out,
+ })
+ id := atomic.AddUint64(&aggIDCount, 1)
+ return aggVal[N]{id, in, err}
+ })
+ return cv.Measure, cv.ID, cv.Err
+}
+
+// logConflict validates if an instrument with the same case-insensitive name
+// as id has already been created. If that instrument conflicts with id, a
+// warning is logged.
+func (i *inserter[N]) logConflict(id instID) {
+ // The API specification defines names as case-insensitive. If there is a
+ // different casing of a name it needs to be a conflict.
+ name := id.normalize().Name
+ existing := i.views.Lookup(name, func() instID { return id })
+ if id == existing {
+ return
+ }
+
+ const msg = "duplicate metric stream definitions"
+ args := []interface{}{
+ "names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
+ "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
+ "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),
+ "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit),
+ "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number),
+ }
+
+ // The specification recommends logging a suggested view to resolve
+ // conflicts if possible.
+ //
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration
+ if id.Unit != existing.Unit || id.Number != existing.Number {
+ // There is no view resolution for these, don't make a suggestion.
+ global.Warn(msg, args...)
+ return
+ }
+
+ var stream string
+ if id.Name != existing.Name || id.Kind != existing.Kind {
+ stream = `Stream{Name: "{{NEW_NAME}}"}`
+ } else if id.Description != existing.Description {
+ stream = fmt.Sprintf("Stream{Description: %q}", existing.Description)
+ }
+
+ inst := fmt.Sprintf(
+ "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+ id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+ )
+ args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream))
+
+ global.Warn(msg, args...)
+}
+
+func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
+ var zero N
+ return instID{
+ Name: stream.Name,
+ Description: stream.Description,
+ Unit: stream.Unit,
+ Kind: kind,
+ Number: fmt.Sprintf("%T", zero),
+ }
+}
+
+// aggregateFunc returns new aggregate functions matching agg, kind, and
+// monotonic. If the agg is unknown or temporality is invalid, an error is
+// returned.
+func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
+ switch a := agg.(type) {
+ case AggregationDefault:
+ return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind)
+ case AggregationDrop:
+ // Return nil in and out to signify the drop aggregator.
+ case AggregationLastValue:
+ meas, comp = b.LastValue()
+ case AggregationSum:
+ switch kind {
+ case InstrumentKindObservableCounter:
+ meas, comp = b.PrecomputedSum(true)
+ case InstrumentKindObservableUpDownCounter:
+ meas, comp = b.PrecomputedSum(false)
+ case InstrumentKindCounter, InstrumentKindHistogram:
+ meas, comp = b.Sum(true)
+ default:
+ // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and
+ // instrumentKindUndefined or other invalid instrument kinds.
+ meas, comp = b.Sum(false)
+ }
+ case AggregationExplicitBucketHistogram:
+ var noSum bool
+ switch kind {
+ case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge:
+ // The sum should not be collected for any instrument that can make
+ // negative measurements:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+ noSum = true
+ }
+ meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum)
+ case AggregationBase2ExponentialHistogram:
+ var noSum bool
+ switch kind {
+ case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge:
+ // The sum should not be collected for any instrument that can make
+ // negative measurements:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+ noSum = true
+ }
+ meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum)
+
+ default:
+ err = errUnknownAggregation
+ }
+
+ return meas, comp, err
+}
+
+// isAggregatorCompatible checks if the aggregation can be used by the instrument.
+// Current compatibility:
+//
+// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram |
+// |--------------------------|------|-----------|-----|-----------|-----------------------|
+// | Counter | ✓ | | ✓ | ✓ | ✓ |
+// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
+// | Histogram | ✓ | | ✓ | ✓ | ✓ |
+// | Observable Counter | ✓ | | ✓ | ✓ | ✓ |
+// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
+// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |.
+func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
+ switch agg.(type) {
+ case AggregationDefault:
+ return nil
+ case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram:
+ switch kind {
+ case InstrumentKindCounter,
+ InstrumentKindUpDownCounter,
+ InstrumentKindHistogram,
+ InstrumentKindObservableCounter,
+ InstrumentKindObservableUpDownCounter,
+ InstrumentKindObservableGauge:
+ return nil
+ default:
+ return errIncompatibleAggregation
+ }
+ case AggregationSum:
+ switch kind {
+ case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter:
+ return nil
+ default:
+ // TODO: review need for aggregation check after
+ // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+ return errIncompatibleAggregation
+ }
+ case AggregationLastValue:
+ if kind == InstrumentKindObservableGauge {
+ return nil
+ }
+ // TODO: review need for aggregation check after
+ // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+ return errIncompatibleAggregation
+ case AggregationDrop:
+ return nil
+ default:
+ // This is used passed checking for default, it should be an error at this point.
+ return fmt.Errorf("%w: %v", errUnknownAggregation, agg)
+ }
+}
+
+// pipelines is the group of pipelines connecting Readers with instrument
+// measurement.
+type pipelines []*pipeline
+
+func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines {
+ pipes := make([]*pipeline, 0, len(readers))
+ for _, r := range readers {
+ p := newPipeline(res, r, views)
+ r.register(p)
+ pipes = append(pipes, p)
+ }
+ return pipes
+}
+
+func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
+ unregs := make([]func(), len(p))
+ for i, pipe := range p {
+ unregs[i] = pipe.addMultiCallback(c)
+ }
+ return unregisterFuncs{f: unregs}
+}
+
+type unregisterFuncs struct {
+ embedded.Registration
+ f []func()
+}
+
+func (u unregisterFuncs) Unregister() error {
+ for _, f := range u.f {
+ f()
+ }
+ return nil
+}
+
+// resolver facilitates resolving aggregate functions an instrument calls to
+// aggregate measurements with while updating all pipelines that need to pull
+// from those aggregations.
+type resolver[N int64 | float64] struct {
+ inserters []*inserter[N]
+}
+
+func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] {
+ in := make([]*inserter[N], len(p))
+ for i := range in {
+ in[i] = newInserter[N](p[i], vc)
+ }
+ return resolver[N]{in}
+}
+
+// Aggregators returns the Aggregators that must be updated by the instrument
+// defined by key.
+func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
+ var measures []aggregate.Measure[N]
+
+ errs := &multierror{}
+ for _, i := range r.inserters {
+ in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ errs.append(err)
+ }
+ measures = append(measures, in...)
+ }
+ return measures, errs.errorOrNil()
+}
+
+// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
+// defined by key. If boundaries were provided on instrument instantiation, those take precedence
+// over boundaries provided by the reader.
+func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
+ var measures []aggregate.Measure[N]
+
+ errs := &multierror{}
+ for _, i := range r.inserters {
+ agg := i.readerDefaultAggregation(id.Kind)
+ if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
+ histAgg.Boundaries = boundaries
+ agg = histAgg
+ }
+ in, err := i.Instrument(id, agg)
+ if err != nil {
+ errs.append(err)
+ }
+ measures = append(measures, in...)
+ }
+ return measures, errs.errorOrNil()
+}
+
+type multierror struct {
+ wrapped error
+ errors []string
+}
+
+func (m *multierror) errorOrNil() error {
+ if len(m.errors) == 0 {
+ return nil
+ }
+ if m.wrapped == nil {
+ return errors.New(strings.Join(m.errors, "; "))
+ }
+ return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; "))
+}
+
+func (m *multierror) append(err error) {
+ m.errors = append(m.errors, err.Error())
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
new file mode 100644
index 00000000000..7d1a9183ce1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
@@ -0,0 +1,154 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+)
+
+// MeterProvider handles the creation and coordination of Meters. All Meters
+// created by a MeterProvider will be associated with the same Resource, have
+// the same Views applied to them, and have their produced metric telemetry
+// passed to the configured Readers.
+type MeterProvider struct {
+ embedded.MeterProvider
+
+ pipes pipelines
+ meters cache[instrumentation.Scope, *meter]
+
+ forceFlush, shutdown func(context.Context) error
+ stopped atomic.Bool
+}
+
+// Compile-time check MeterProvider implements metric.MeterProvider.
+var _ metric.MeterProvider = (*MeterProvider)(nil)
+
+// NewMeterProvider returns a new and configured MeterProvider.
+//
+// By default, the returned MeterProvider is configured with the default
+// Resource and no Readers. Readers cannot be added after a MeterProvider is
+// created. This means the returned MeterProvider, one created with no
+// Readers, will perform no operations.
+func NewMeterProvider(options ...Option) *MeterProvider {
+ conf := newConfig(options)
+ flush, sdown := conf.readerSignals()
+
+ mp := &MeterProvider{
+ pipes: newPipelines(conf.res, conf.readers, conf.views),
+ forceFlush: flush,
+ shutdown: sdown,
+ }
+ // Log after creation so all readers show correctly they are registered.
+ global.Info("MeterProvider created",
+ "Resource", conf.res,
+ "Readers", conf.readers,
+ "Views", len(conf.views),
+ )
+ return mp
+}
+
+// Meter returns a Meter with the given name and configured with options.
+//
+// The name should be the name of the instrumentation scope creating
+// telemetry. This name may be the same as the instrumented code only if that
+// code provides built-in instrumentation.
+//
+// Calls to the Meter method after Shutdown has been called will return Meters
+// that perform no operations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter {
+ if name == "" {
+ global.Warn("Invalid Meter name.", "name", name)
+ }
+
+ if mp.stopped.Load() {
+ return noop.Meter{}
+ }
+
+ c := metric.NewMeterConfig(options...)
+ s := instrumentation.Scope{
+ Name: name,
+ Version: c.InstrumentationVersion(),
+ SchemaURL: c.SchemaURL(),
+ }
+
+ global.Info("Meter created",
+ "Name", s.Name,
+ "Version", s.Version,
+ "SchemaURL", s.SchemaURL,
+ )
+
+ return mp.meters.Lookup(s, func() *meter {
+ return newMeter(s, mp.pipes)
+ })
+}
+
+// ForceFlush flushes all pending telemetry.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// ForceFlush calls ForceFlush(context.Context) error
+// on all Readers that implements this method.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) ForceFlush(ctx context.Context) error {
+ if mp.forceFlush != nil {
+ return mp.forceFlush(ctx)
+ }
+ return nil
+}
+
+// Shutdown shuts down the MeterProvider flushing all pending telemetry and
+// releasing any held computational resources.
+//
+// This call is idempotent. The first call will perform all flush and
+// releasing operations. Subsequent calls will perform no action and will
+// return an error stating this.
+//
+// Measurements made by instruments from meters this MeterProvider created
+// will not be exported after Shutdown is called.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Shutdown(ctx context.Context) error {
+ // Even though it may seem like there is a synchronization issue between the
+ // call to `Store` and checking `shutdown`, the Go concurrency model ensures
+ // that is not the case, as all the atomic operations executed in a program
+ // behave as though executed in some sequentially consistent order. This
+ // definition provides the same semantics as C++'s sequentially consistent
+ // atomics and Java's volatile variables.
+ // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic.
+
+ mp.stopped.Store(true)
+ if mp.shutdown != nil {
+ return mp.shutdown(ctx)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
new file mode 100644
index 00000000000..65cedaf3c07
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
@@ -0,0 +1,200 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// errDuplicateRegister is logged by a Reader when an attempt to registered it
+// more than once occurs.
+var errDuplicateRegister = fmt.Errorf("duplicate reader registration")
+
+// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
+// the reader is registered with a MeterProvider.
+var ErrReaderNotRegistered = fmt.Errorf("reader is not registered")
+
+// ErrReaderShutdown is returned if Collect or Shutdown are called after a
+// reader has been Shutdown once.
+var ErrReaderShutdown = fmt.Errorf("reader is shutdown")
+
+// errNonPositiveDuration is logged when an environmental variable
+// has non-positive value.
+var errNonPositiveDuration = fmt.Errorf("non-positive duration")
+
+// Reader is the interface used between the SDK and an
+// exporter. Control flow is bi-directional through the
+// Reader, since the SDK initiates ForceFlush and Shutdown
+// while the exporter initiates collection. The Register() method here
+// informs the Reader that it can begin reading, signaling the
+// start of bi-directional control flow.
+//
+// Typically, push-based exporters that are periodic will
+// implement PeroidicExporter themselves and construct a
+// PeriodicReader to satisfy this interface.
+//
+// Pull-based exporters will typically implement Register
+// themselves, since they read on demand.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Reader interface {
+ // register registers a Reader with a MeterProvider.
+ // The producer argument allows the Reader to signal the sdk to collect
+ // and send aggregated metric measurements.
+ register(sdkProducer)
+
+ // temporality reports the Temporality for the instrument kind provided.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Reader methods.
+ temporality(InstrumentKind) metricdata.Temporality
+
+ // aggregation returns what Aggregation to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Reader methods.
+ aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type.
+
+ // Collect gathers and returns all metric data related to the Reader from
+ // the SDK and stores it in out. An error is returned if this is called
+ // after Shutdown or if out is nil.
+ //
+ // This method needs to be concurrent safe, and the cancellation of the
+ // passed context is expected to be honored.
+ Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown flushes all metric measurements held in an export pipeline and releases any
+ // held computational resources.
+ //
+ // This deadline or cancellation of the passed context are honored. An appropriate
+ // error will be returned in these situations. There is no guaranteed that all
+ // telemetry be flushed or all resources have been released in these
+ // situations.
+ //
+ // After Shutdown is called, calls to Collect will perform no operation and instead will return
+ // an error indicating the shutdown state.
+ //
+ // This method needs to be concurrent safe.
+ Shutdown(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// sdkProducer produces metrics for a Reader.
+type sdkProducer interface {
+ // produce returns aggregated metrics from a single collection.
+ //
+ // This method is safe to call concurrently.
+ produce(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// Producer produces metrics for a Reader from an external source.
+type Producer interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Produce returns aggregated metrics from an external source.
+ //
+ // This method should be safe to call concurrently.
+ Produce(context.Context) ([]metricdata.ScopeMetrics, error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// produceHolder is used as an atomic.Value to wrap the non-concrete producer
+// type.
+type produceHolder struct {
+ produce func(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// shutdownProducer produces an ErrReaderShutdown error always.
+type shutdownProducer struct{}
+
+// produce returns an ErrReaderShutdown error.
+func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
+ return ErrReaderShutdown
+}
+
+// TemporalitySelector selects the temporality to use based on the InstrumentKind.
+type TemporalitySelector func(InstrumentKind) metricdata.Temporality
+
+// DefaultTemporalitySelector is the default TemporalitySelector used if
+// WithTemporalitySelector is not provided. CumulativeTemporality will be used
+// for all instrument kinds if this TemporalitySelector is used.
+func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+// AggregationSelector selects the aggregation and the parameters to use for
+// that aggregation based on the InstrumentKind.
+//
+// If the Aggregation returned is nil or DefaultAggregation, the selection from
+// DefaultAggregationSelector will be used.
+type AggregationSelector func(InstrumentKind) Aggregation
+
+// DefaultAggregationSelector returns the default aggregation and parameters
+// that will be used to summarize measurement made from an instrument of
+// InstrumentKind. This AggregationSelector using the following selection
+// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum,
+// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue,
+// Histogram ⇨ ExplicitBucketHistogram.
+func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
+ switch ik {
+ case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter:
+ return AggregationSum{}
+ case InstrumentKindObservableGauge:
+ return AggregationLastValue{}
+ case InstrumentKindHistogram:
+ return AggregationExplicitBucketHistogram{
+ Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000},
+ NoMinMax: false,
+ }
+ }
+ panic("unknown instrument kind")
+}
+
+// ReaderOption is an option which can be applied to manual or Periodic
+// readers.
+type ReaderOption interface {
+ PeriodicReaderOption
+ ManualReaderOption
+}
+
+// WithProducers registers producers as an external Producer of metric data
+// for this Reader.
+func WithProducer(p Producer) ReaderOption {
+ return producerOption{p: p}
+}
+
+type producerOption struct {
+ p Producer
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig {
+ c.producers = append(c.producers, o.p)
+ return c
+}
+
+// applyPeriodic returns a periodicReaderConfig with option applied.
+func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
+ c.producers = append(c.producers, o.p)
+ return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
new file mode 100644
index 00000000000..310fa5a5309
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+ return "1.24.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
new file mode 100644
index 00000000000..65f243befed
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
@@ -0,0 +1,128 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+var (
+ errMultiInst = errors.New("name replacement for multiple instruments")
+ errEmptyView = errors.New("no criteria provided for view")
+
+ emptyView = func(Instrument) (Stream, bool) { return Stream{}, false }
+)
+
+// View is an override to the default behavior of the SDK. It defines how data
+// should be collected for certain instruments. It returns true and the exact
+// Stream to use for matching Instruments. Otherwise, if the view does not
+// match, false is returned.
+type View func(Instrument) (Stream, bool)
+
+// NewView returns a View that applies the Stream mask for all instruments that
+// match criteria. The returned View will only apply mask if all non-zero-value
+// fields of criteria match the corresponding Instrument passed to the view. If
+// no criteria are provided, all field of criteria are their zero-values, a
+// view that matches no instruments is returned. If you need to match a
+// zero-value field, create a View directly.
+//
+// The Name field of criteria supports wildcard pattern matching. The "*"
+// wildcard is recognized as matching zero or more characters, and "?" is
+// recognized as matching exactly one character. For example, a pattern of "*"
+// matches all instrument names.
+//
+// The Stream mask only applies updates for non-zero-value fields. By default,
+// the Instrument the View matches against will be use for the Name,
+// Description, and Unit of the returned Stream and no Aggregation or
+// AttributeFilter are set. All non-zero-value fields of mask are used instead
+// of the default. If you need to zero out an Stream field returned from a
+// View, create a View directly.
+func NewView(criteria Instrument, mask Stream) View {
+ if criteria.empty() {
+ global.Error(
+ errEmptyView, "dropping view",
+ "mask", mask,
+ )
+ return emptyView
+ }
+
+ var matchFunc func(Instrument) bool
+ if strings.ContainsAny(criteria.Name, "*?") {
+ if mask.Name != "" {
+ global.Error(
+ errMultiInst, "dropping view",
+ "criteria", criteria,
+ "mask", mask,
+ )
+ return emptyView
+ }
+
+ // Handle branching here in NewView instead of criteria.matches so
+ // criteria.matches remains inlinable for the simple case.
+ pattern := regexp.QuoteMeta(criteria.Name)
+ pattern = "^" + pattern + "$"
+ pattern = strings.ReplaceAll(pattern, `\?`, ".")
+ pattern = strings.ReplaceAll(pattern, `\*`, ".*")
+ re := regexp.MustCompile(pattern)
+ matchFunc = func(i Instrument) bool {
+ return re.MatchString(i.Name) &&
+ criteria.matchesDescription(i) &&
+ criteria.matchesKind(i) &&
+ criteria.matchesUnit(i) &&
+ criteria.matchesScope(i)
+ }
+ } else {
+ matchFunc = criteria.matches
+ }
+
+ var agg Aggregation
+ if mask.Aggregation != nil {
+ agg = mask.Aggregation.copy()
+ if err := agg.err(); err != nil {
+ global.Error(
+ err, "not using aggregation with view",
+ "criteria", criteria,
+ "mask", mask,
+ )
+ agg = nil
+ }
+ }
+
+ return func(i Instrument) (Stream, bool) {
+ if matchFunc(i) {
+ return Stream{
+ Name: nonZero(mask.Name, i.Name),
+ Description: nonZero(mask.Description, i.Description),
+ Unit: nonZero(mask.Unit, i.Unit),
+ Aggregation: agg,
+ AttributeFilter: mask.AttributeFilter,
+ }, true
+ }
+ return Stream{}, false
+ }
+}
+
+// nonZero returns v if it is non-zero-valued, otherwise alt.
+func nonZero[T comparable](v, alt T) T {
+ var zero T
+ if v != zero {
+ return v
+ }
+ return alt
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go
new file mode 100644
index 00000000000..8e13d157c2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go
@@ -0,0 +1,371 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
+
+package v1
+
+import (
+ v1 "go.opentelemetry.io/proto/otlp/metrics/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportMetricsServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An array of ResourceMetrics.
+ // For data coming from a single resource this array will typically contain one
+ // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+ // data from multiple origins typically batch the data before forwarding further and
+ // in that case this array will contain multiple elements.
+ ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
+}
+
+func (x *ExportMetricsServiceRequest) Reset() {
+ *x = ExportMetricsServiceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportMetricsServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsServiceRequest) ProtoMessage() {}
+
+func (x *ExportMetricsServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics {
+ if x != nil {
+ return x.ResourceMetrics
+ }
+ return nil
+}
+
+type ExportMetricsServiceResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The details of a partially successful export request.
+ //
+ // If the request is only partially accepted
+ // (i.e. when the server accepts only parts of the data and rejects the rest)
+ // the server MUST initialize the `partial_success` field and MUST
+ // set the `rejected_` with the number of items it rejected.
+ //
+ // Servers MAY also make use of the `partial_success` field to convey
+ // warnings/suggestions to senders even when the request was fully accepted.
+ // In such cases, the `rejected_` MUST have a value of `0` and
+ // the `error_message` MUST be non-empty.
+ //
+ // A `partial_success` message with an empty value (rejected_ = 0 and
+ // `error_message` = "") is equivalent to it not being set/present. Senders
+ // SHOULD interpret it the same way as in the full success case.
+ PartialSuccess *ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
+}
+
+func (x *ExportMetricsServiceResponse) Reset() {
+ *x = ExportMetricsServiceResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportMetricsServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsServiceResponse) ProtoMessage() {}
+
+func (x *ExportMetricsServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExportMetricsServiceResponse) GetPartialSuccess() *ExportMetricsPartialSuccess {
+ if x != nil {
+ return x.PartialSuccess
+ }
+ return nil
+}
+
+type ExportMetricsPartialSuccess struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of rejected data points.
+ //
+ // A `rejected_` field holding a `0` value indicates that the
+ // request was fully accepted.
+ RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"`
+ // A developer-facing human-readable message in English. It should be used
+ // either to explain why the server rejected parts of the data during a partial
+ // success or to convey warnings/suggestions during a full success. The message
+ // should offer guidance on how users can address such issues.
+ //
+ // error_message is an optional field. An error_message with an empty value
+ // is equivalent to it not being set.
+ ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *ExportMetricsPartialSuccess) Reset() {
+ *x = ExportMetricsPartialSuccess{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportMetricsPartialSuccess) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsPartialSuccess) ProtoMessage() {}
+
+func (x *ExportMetricsPartialSuccess) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsPartialSuccess.ProtoReflect.Descriptor instead.
+func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 {
+ if x != nil {
+ return x.RejectedDataPoints
+ }
+ return 0
+}
+
+func (x *ExportMetricsPartialSuccess) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+var File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = []byte{
+ 0x0a, 0x3e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2c, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f,
+ 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f,
+ 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f,
+ 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x12, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x50,
+ 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xac, 0x01, 0x0a, 0x0e, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x99, 0x01,
+ 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x46, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72,
+ 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xa4, 0x01, 0x0a, 0x2b, 0x69, 0x6f,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x33, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70,
+ 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x28, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc
+)
+
+func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData
+}
+
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = []interface{}{
+ (*ExportMetricsServiceRequest)(nil), // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest
+ (*ExportMetricsServiceResponse)(nil), // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse
+ (*ExportMetricsPartialSuccess)(nil), // 2: opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess
+ (*v1.ResourceMetrics)(nil), // 3: opentelemetry.proto.metrics.v1.ResourceMetrics
+}
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = []int32{
+ 3, // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest.resource_metrics:type_name -> opentelemetry.proto.metrics.v1.ResourceMetrics
+ 2, // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess
+ 0, // 2: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:input_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest
+ 1, // 3: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:output_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse
+ 3, // [3:4] is the sub-list for method output_type
+ 2, // [2:3] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() }
+func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() {
+ if File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportMetricsServiceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportMetricsServiceResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportMetricsPartialSuccess); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs,
+ MessageInfos: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto = out.File
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = nil
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = nil
+ file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go
new file mode 100644
index 00000000000..c42f1454ed2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go
@@ -0,0 +1,171 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ExportMetricsServiceRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ExportMetricsServiceRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.Export(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux".
+// UnaryRPC :call MetricsServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead.
+func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error {
+
+ mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_MetricsService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterMetricsServiceHandler(ctx, mux, conn)
+}
+
+// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn))
+}
+
+// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "MetricsServiceClient" to call the correct interceptors.
+func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error {
+
+ mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_MetricsService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, ""))
+)
+
+var (
+ forward_MetricsService_Export_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
new file mode 100644
index 00000000000..31d25fc15ac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.1.0
+// - protoc v3.21.6
+// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
+
+package v1
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// MetricsServiceClient is the client API for MetricsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type MetricsServiceClient interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error)
+}
+
+type metricsServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient {
+ return &metricsServiceClient{cc}
+}
+
+func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) {
+ out := new(ExportMetricsServiceResponse)
+ err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MetricsServiceServer is the server API for MetricsService service.
+// All implementations must embed UnimplementedMetricsServiceServer
+// for forward compatibility
+type MetricsServiceServer interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error)
+ mustEmbedUnimplementedMetricsServiceServer()
+}
+
+// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedMetricsServiceServer struct {
+}
+
+func (UnimplementedMetricsServiceServer) Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+func (UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {}
+
+// UnsafeMetricsServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to MetricsServiceServer will
+// result in compilation errors.
+type UnsafeMetricsServiceServer interface {
+ mustEmbedUnimplementedMetricsServiceServer()
+}
+
+func RegisterMetricsServiceServer(s grpc.ServiceRegistrar, srv MetricsServiceServer) {
+ s.RegisterService(&MetricsService_ServiceDesc, srv)
+}
+
+func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ExportMetricsServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricsServiceServer).Export(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// MetricsService_ServiceDesc is the grpc.ServiceDesc for MetricsService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var MetricsService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
+ HandlerType: (*MetricsServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Export",
+ Handler: _MetricsService_Export_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
new file mode 100644
index 00000000000..1ef2656e94d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
@@ -0,0 +1,2495 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/metrics/v1/metrics.proto
+
+package v1
+
+import (
+ v11 "go.opentelemetry.io/proto/otlp/common/v1"
+ v1 "go.opentelemetry.io/proto/otlp/resource/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// AggregationTemporality defines how a metric aggregator reports aggregated
+// values. It describes how those values relate to the time interval over
+// which they are aggregated.
+type AggregationTemporality int32
+
+const (
+ // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
+ AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
+ // DELTA is an AggregationTemporality for a metric aggregator which reports
+ // changes since last report time. Successive metrics contain aggregation of
+ // values from continuous and non-overlapping intervals.
+ //
+ // The values for a DELTA metric are based only on the time interval
+ // associated with one measurement cycle. There is no dependency on
+ // previous measurements like is the case for CUMULATIVE metrics.
+ //
+ // For example, consider a system measuring the number of requests that
+ // it receives and reports the sum of these requests every second as a
+ // DELTA metric:
+ //
+ // 1. The system starts receiving at time=t_0.
+ // 2. A request is received, the system measures 1 request.
+ // 3. A request is received, the system measures 1 request.
+ // 4. A request is received, the system measures 1 request.
+ // 5. The 1 second collection cycle ends. A metric is exported for the
+ // number of requests received over the interval of time t_0 to
+ // t_0+1 with a value of 3.
+ // 6. A request is received, the system measures 1 request.
+ // 7. A request is received, the system measures 1 request.
+ // 8. The 1 second collection cycle ends. A metric is exported for the
+ // number of requests received over the interval of time t_0+1 to
+ // t_0+2 with a value of 2.
+ AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
+ // CUMULATIVE is an AggregationTemporality for a metric aggregator which
+ // reports changes since a fixed start time. This means that current values
+ // of a CUMULATIVE metric depend on all previous measurements since the
+ // start time. Because of this, the sender is required to retain this state
+ // in some form. If this state is lost or invalidated, the CUMULATIVE metric
+ // values MUST be reset and a new fixed start time following the last
+ // reported measurement time sent MUST be used.
+ //
+ // For example, consider a system measuring the number of requests that
+ // it receives and reports the sum of these requests every second as a
+ // CUMULATIVE metric:
+ //
+ // 1. The system starts receiving at time=t_0.
+ // 2. A request is received, the system measures 1 request.
+ // 3. A request is received, the system measures 1 request.
+ // 4. A request is received, the system measures 1 request.
+ // 5. The 1 second collection cycle ends. A metric is exported for the
+ // number of requests received over the interval of time t_0 to
+ // t_0+1 with a value of 3.
+ // 6. A request is received, the system measures 1 request.
+ // 7. A request is received, the system measures 1 request.
+ // 8. The 1 second collection cycle ends. A metric is exported for the
+ // number of requests received over the interval of time t_0 to
+ // t_0+2 with a value of 5.
+ // 9. The system experiences a fault and loses state.
+ // 10. The system recovers and resumes receiving at time=t_1.
+ // 11. A request is received, the system measures 1 request.
+ // 12. The 1 second collection cycle ends. A metric is exported for the
+ // number of requests received over the interval of time t_1 to
+ // t_0+1 with a value of 1.
+ //
+ // Note: Even though, when reporting changes since last report time, using
+ // CUMULATIVE is valid, it is not recommended. This may cause problems for
+ // systems that do not use start_time to determine when the aggregation
+ // value was reset (e.g. Prometheus).
+ AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
+)
+
+// Enum value maps for AggregationTemporality.
+var (
+ AggregationTemporality_name = map[int32]string{
+ 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
+ 1: "AGGREGATION_TEMPORALITY_DELTA",
+ 2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ }
+ AggregationTemporality_value = map[string]int32{
+ "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
+ "AGGREGATION_TEMPORALITY_DELTA": 1,
+ "AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
+ }
+)
+
+func (x AggregationTemporality) Enum() *AggregationTemporality {
+ p := new(AggregationTemporality)
+ *p = x
+ return p
+}
+
+func (x AggregationTemporality) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AggregationTemporality) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (AggregationTemporality) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[0]
+}
+
+func (x AggregationTemporality) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AggregationTemporality.Descriptor instead.
+func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
+// bit-field representing 32 distinct boolean flags. Each flag defined in this
+// enum is a bit-mask. To test the presence of a single flag in the flags of
+// a data point, for example, use an expression like:
+//
+// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
+//
+type DataPointFlags int32
+
+const (
+ // The zero value for the enum. Should not be used for comparisons.
+ // Instead use bitwise "and" with the appropriate mask as shown above.
+ DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0
+ // This DataPoint is valid but has no recorded value. This value
+ // SHOULD be used to reflect explicitly missing data in a series, as
+ // for an equivalent to the Prometheus "staleness marker".
+ DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1
+)
+
+// Enum value maps for DataPointFlags.
+var (
+ DataPointFlags_name = map[int32]string{
+ 0: "DATA_POINT_FLAGS_DO_NOT_USE",
+ 1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK",
+ }
+ DataPointFlags_value = map[string]int32{
+ "DATA_POINT_FLAGS_DO_NOT_USE": 0,
+ "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1,
+ }
+)
+
+func (x DataPointFlags) Enum() *DataPointFlags {
+ p := new(DataPointFlags)
+ *p = x
+ return p
+}
+
+func (x DataPointFlags) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (DataPointFlags) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[1].Descriptor()
+}
+
+func (DataPointFlags) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[1]
+}
+
+func (x DataPointFlags) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use DataPointFlags.Descriptor instead.
+func (DataPointFlags) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+// MetricsData represents the metrics data that can be stored in a persistent
+// storage, OR can be embedded by other protocols that transfer OTLP metrics
+// data but do not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type MetricsData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An array of ResourceMetrics.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
+}
+
+func (x *MetricsData) Reset() {
+ *x = MetricsData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricsData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricsData) ProtoMessage() {}
+
+func (x *MetricsData) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricsData.ProtoReflect.Descriptor instead.
+func (*MetricsData) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MetricsData) GetResourceMetrics() []*ResourceMetrics {
+ if x != nil {
+ return x.ResourceMetrics
+ }
+ return nil
+}
+
+// A collection of ScopeMetrics from a Resource.
+type ResourceMetrics struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource for the metrics in this message.
+ // If this field is not set then no resource info is known.
+ Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ // A list of metrics that originate from a resource.
+ ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the resource data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_metrics" field which have their own schema_url field.
+ SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ResourceMetrics) Reset() {
+ *x = ResourceMetrics{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceMetrics) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceMetrics) ProtoMessage() {}
+
+func (x *ResourceMetrics) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceMetrics.ProtoReflect.Descriptor instead.
+func (*ResourceMetrics) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceMetrics) GetResource() *v1.Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *ResourceMetrics) GetScopeMetrics() []*ScopeMetrics {
+ if x != nil {
+ return x.ScopeMetrics
+ }
+ return nil
+}
+
+func (x *ResourceMetrics) GetSchemaUrl() string {
+ if x != nil {
+ return x.SchemaUrl
+ }
+ return ""
+}
+
+// A collection of Metrics produced by an Scope.
+type ScopeMetrics struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The instrumentation scope information for the metrics in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
+ // A list of metrics that originate from an instrumentation library.
+ Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the metric data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all metrics in the "metrics" field.
+ SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ScopeMetrics) Reset() {
+ *x = ScopeMetrics{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopeMetrics) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopeMetrics) ProtoMessage() {}
+
+func (x *ScopeMetrics) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopeMetrics.ProtoReflect.Descriptor instead.
+func (*ScopeMetrics) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ScopeMetrics) GetScope() *v11.InstrumentationScope {
+ if x != nil {
+ return x.Scope
+ }
+ return nil
+}
+
+func (x *ScopeMetrics) GetMetrics() []*Metric {
+ if x != nil {
+ return x.Metrics
+ }
+ return nil
+}
+
+func (x *ScopeMetrics) GetSchemaUrl() string {
+ if x != nil {
+ return x.SchemaUrl
+ }
+ return ""
+}
+
+// Defines a Metric which has one or more timeseries. The following is a
+// brief summary of the Metric data model. For more details, see:
+//
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
+//
+//
+// The data model and relation between entities is shown in the
+// diagram below. Here, "DataPoint" is the term used to refer to any
+// one of the specific data point value types, and "points" is the term used
+// to refer to any one of the lists of points contained in the Metric.
+//
+// - Metric is composed of a metadata and data.
+// - Metadata part contains a name, description, unit.
+// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
+// - DataPoint contains timestamps, attributes, and one of the possible value type
+// fields.
+//
+// Metric
+// +------------+
+// |name |
+// |description |
+// |unit | +------------------------------------+
+// |data |---> |Gauge, Sum, Histogram, Summary, ... |
+// +------------+ +------------------------------------+
+//
+// Data [One of Gauge, Sum, Histogram, Summary, ...]
+// +-----------+
+// |... | // Metadata about the Data.
+// |points |--+
+// +-----------+ |
+// | +---------------------------+
+// | |DataPoint 1 |
+// v |+------+------+ +------+ |
+// +-----+ ||label |label |...|label | |
+// | 1 |-->||value1|value2|...|valueN| |
+// +-----+ |+------+------+ +------+ |
+// | . | |+-----+ |
+// | . | ||value| |
+// | . | |+-----+ |
+// | . | +---------------------------+
+// | . | .
+// | . | .
+// | . | .
+// | . | +---------------------------+
+// | . | |DataPoint M |
+// +-----+ |+------+------+ +------+ |
+// | M |-->||label |label |...|label | |
+// +-----+ ||value1|value2|...|valueN| |
+// |+------+------+ +------+ |
+// |+-----+ |
+// ||value| |
+// |+-----+ |
+// +---------------------------+
+//
+// Each distinct type of DataPoint represents the output of a specific
+// aggregation function, the result of applying the DataPoint's
+// associated function of to one or more measurements.
+//
+// All DataPoint types have three common fields:
+// - Attributes includes key-value pairs associated with the data point
+// - TimeUnixNano is required, set to the end time of the aggregation
+// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
+// having an AggregationTemporality field, as discussed below.
+//
+// Both TimeUnixNano and StartTimeUnixNano values are expressed as
+// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+//
+// # TimeUnixNano
+//
+// This field is required, having consistent interpretation across
+// DataPoint types. TimeUnixNano is the moment corresponding to when
+// the data point's aggregate value was captured.
+//
+// Data points with the 0 value for TimeUnixNano SHOULD be rejected
+// by consumers.
+//
+// # StartTimeUnixNano
+//
+// StartTimeUnixNano in general allows detecting when a sequence of
+// observations is unbroken. This field indicates to consumers the
+// start time for points with cumulative and delta
+// AggregationTemporality, and it should be included whenever possible
+// to support correct rate calculation. Although it may be omitted
+// when the start time is truly unknown, setting StartTimeUnixNano is
+// strongly encouraged.
+type Metric struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // name of the metric.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // description of the metric, which can be used in documentation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // unit in which the metric value is reported. Follows the format
+ // described by http://unitsofmeasure.org/ucum.html.
+ Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
+ // Data determines the aggregation type (if any) of the metric, what is the
+ // reported value type for the data points, as well as the relatationship to
+ // the time interval over which they are reported.
+ //
+ // Types that are assignable to Data:
+ // *Metric_Gauge
+ // *Metric_Sum
+ // *Metric_Histogram
+ // *Metric_ExponentialHistogram
+ // *Metric_Summary
+ Data isMetric_Data `protobuf_oneof:"data"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Metric) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Metric) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Metric) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+func (m *Metric) GetData() isMetric_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (x *Metric) GetGauge() *Gauge {
+ if x, ok := x.GetData().(*Metric_Gauge); ok {
+ return x.Gauge
+ }
+ return nil
+}
+
+func (x *Metric) GetSum() *Sum {
+ if x, ok := x.GetData().(*Metric_Sum); ok {
+ return x.Sum
+ }
+ return nil
+}
+
+func (x *Metric) GetHistogram() *Histogram {
+ if x, ok := x.GetData().(*Metric_Histogram); ok {
+ return x.Histogram
+ }
+ return nil
+}
+
+func (x *Metric) GetExponentialHistogram() *ExponentialHistogram {
+ if x, ok := x.GetData().(*Metric_ExponentialHistogram); ok {
+ return x.ExponentialHistogram
+ }
+ return nil
+}
+
+func (x *Metric) GetSummary() *Summary {
+ if x, ok := x.GetData().(*Metric_Summary); ok {
+ return x.Summary
+ }
+ return nil
+}
+
+type isMetric_Data interface {
+ isMetric_Data()
+}
+
+type Metric_Gauge struct {
+ Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"`
+}
+
+type Metric_Sum struct {
+ Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"`
+}
+
+type Metric_Histogram struct {
+ Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"`
+}
+
+type Metric_ExponentialHistogram struct {
+ ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"`
+}
+
+type Metric_Summary struct {
+ Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"`
+}
+
+func (*Metric_Gauge) isMetric_Data() {}
+
+func (*Metric_Sum) isMetric_Data() {}
+
+func (*Metric_Histogram) isMetric_Data() {}
+
+func (*Metric_ExponentialHistogram) isMetric_Data() {}
+
+func (*Metric_Summary) isMetric_Data() {}
+
+// Gauge represents the type of a scalar metric that always exports the
+// "current value" for every data point. It should be used for an "unknown"
+// aggregation.
+//
+// A Gauge does not support different aggregation temporalities. Given the
+// aggregation is unknown, points cannot be combined using the same
+// aggregation, regardless of aggregation temporalities. Therefore,
+// AggregationTemporality is not included. Consequently, this also means
+// "StartTimeUnixNano" is ignored for all data points.
+type Gauge struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+}
+
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Gauge) GetDataPoints() []*NumberDataPoint {
+ if x != nil {
+ return x.DataPoints
+ }
+ return nil
+}
+
+// Sum represents the type of a scalar metric that is calculated as a sum of all
+// reported measurements over a time interval.
+type Sum struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+ // aggregation_temporality describes if the aggregator reports delta changes
+ // since last report time, or cumulative changes since a fixed start time.
+ AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+ // If "true" means that the sum is monotonic.
+ IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
+}
+
+func (x *Sum) Reset() {
+ *x = Sum{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Sum) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Sum) ProtoMessage() {}
+
+func (x *Sum) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Sum.ProtoReflect.Descriptor instead.
+func (*Sum) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Sum) GetDataPoints() []*NumberDataPoint {
+ if x != nil {
+ return x.DataPoints
+ }
+ return nil
+}
+
+func (x *Sum) GetAggregationTemporality() AggregationTemporality {
+ if x != nil {
+ return x.AggregationTemporality
+ }
+ return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
+}
+
+func (x *Sum) GetIsMonotonic() bool {
+ if x != nil {
+ return x.IsMonotonic
+ }
+ return false
+}
+
+// Histogram represents the type of a metric that is calculated by aggregating
+// as a Histogram of all reported measurements over a time interval.
+type Histogram struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+ // aggregation_temporality describes if the aggregator reports delta changes
+ // since last report time, or cumulative changes since a fixed start time.
+ AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Histogram) GetDataPoints() []*HistogramDataPoint {
+ if x != nil {
+ return x.DataPoints
+ }
+ return nil
+}
+
+func (x *Histogram) GetAggregationTemporality() AggregationTemporality {
+ if x != nil {
+ return x.AggregationTemporality
+ }
+ return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
+}
+
+// ExponentialHistogram represents the type of a metric that is calculated by aggregating
+// as a ExponentialHistogram of all reported double measurements over a time interval.
+type ExponentialHistogram struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+ // aggregation_temporality describes if the aggregator reports delta changes
+ // since last report time, or cumulative changes since a fixed start time.
+ AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+}
+
+func (x *ExponentialHistogram) Reset() {
+ *x = ExponentialHistogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExponentialHistogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExponentialHistogram) ProtoMessage() {}
+
+func (x *ExponentialHistogram) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExponentialHistogram.ProtoReflect.Descriptor instead.
+func (*ExponentialHistogram) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint {
+ if x != nil {
+ return x.DataPoints
+ }
+ return nil
+}
+
+func (x *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality {
+ if x != nil {
+ return x.AggregationTemporality
+ }
+ return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
+// data type. These data points cannot always be merged in a meaningful way.
+// While they can be useful in some applications, histogram data points are
+// recommended for new applications.
+type Summary struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+}
+
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Summary) GetDataPoints() []*SummaryDataPoint {
+ if x != nil {
+ return x.DataPoints
+ }
+ return nil
+}
+
+// NumberDataPoint is a single data point in a timeseries that describes the
+// time-varying scalar value of a metric.
+type NumberDataPoint struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of key/value pairs that uniquely identify the timeseries from
+ // where this point belongs. The list may be empty (may contain 0 elements).
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // StartTimeUnixNano is optional but strongly encouraged, see the
+ // the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+ // TimeUnixNano is required, see the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+ // The value itself. A point is considered invalid when one of the recognized
+ // value fields is not present inside this oneof.
+ //
+ // Types that are assignable to Value:
+ // *NumberDataPoint_AsDouble
+ // *NumberDataPoint_AsInt
+ Value isNumberDataPoint_Value `protobuf_oneof:"value"`
+ // (Optional) List of exemplars collected from
+ // measurements that were used to form the data point
+ Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+ // Flags that apply to this specific data point. See DataPointFlags
+ // for the available flags and their meaning.
+ Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *NumberDataPoint) Reset() {
+ *x = NumberDataPoint{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NumberDataPoint) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NumberDataPoint) ProtoMessage() {}
+
+func (x *NumberDataPoint) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NumberDataPoint.ProtoReflect.Descriptor instead.
+func (*NumberDataPoint) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *NumberDataPoint) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *NumberDataPoint) GetStartTimeUnixNano() uint64 {
+ if x != nil {
+ return x.StartTimeUnixNano
+ }
+ return 0
+}
+
+func (x *NumberDataPoint) GetTimeUnixNano() uint64 {
+ if x != nil {
+ return x.TimeUnixNano
+ }
+ return 0
+}
+
+func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *NumberDataPoint) GetAsDouble() float64 {
+ if x, ok := x.GetValue().(*NumberDataPoint_AsDouble); ok {
+ return x.AsDouble
+ }
+ return 0
+}
+
+func (x *NumberDataPoint) GetAsInt() int64 {
+ if x, ok := x.GetValue().(*NumberDataPoint_AsInt); ok {
+ return x.AsInt
+ }
+ return 0
+}
+
+func (x *NumberDataPoint) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
+ }
+ return nil
+}
+
+func (x *NumberDataPoint) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+type isNumberDataPoint_Value interface {
+ isNumberDataPoint_Value()
+}
+
+type NumberDataPoint_AsDouble struct {
+ AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"`
+}
+
+type NumberDataPoint_AsInt struct {
+ AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
+}
+
+func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
+
+func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
+
+// HistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Histogram. A Histogram contains summary statistics
+// for a population of values, it may optionally contain the distribution of
+// those values across a set of buckets.
+//
+// If the histogram contains the distribution of values, then both
+// "explicit_bounds" and "bucket counts" fields must be defined.
+// If the histogram does not contain the distribution of values, then both
+// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
+// "sum" are known.
+type HistogramDataPoint struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of key/value pairs that uniquely identify the timeseries from
+ // where this point belongs. The list may be empty (may contain 0 elements).
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // StartTimeUnixNano is optional but strongly encouraged, see the
+ // the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+ // TimeUnixNano is required, see the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+ // count is the number of values in the population. Must be non-negative. This
+ // value must be equal to the sum of the "count" fields in buckets if a
+ // histogram is provided.
+ Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+ // sum of the values in the population. If count is zero then this field
+ // must be zero.
+ //
+ // Note: Sum should only be filled out when measuring non-negative discrete
+ // events, and is assumed to be monotonic over the values of these events.
+ // Negative events *can* be recorded, but sum should not be filled out when
+ // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
+ // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+ Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
+ // bucket_counts is an optional field contains the count values of histogram
+ // for each bucket.
+ //
+ // The sum of the bucket_counts must equal the value in the count field.
+ //
+ // The number of elements in bucket_counts array must be by one greater than
+ // the number of elements in explicit_bounds array.
+ BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
+ // explicit_bounds specifies buckets with explicitly defined bounds for values.
+ //
+ // The boundaries for bucket at index i are:
+ //
+ // (-infinity, explicit_bounds[i]] for i == 0
+ // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
+ // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
+ //
+ // The values in the explicit_bounds array must be strictly increasing.
+ //
+ // Histogram buckets are inclusive of their upper boundary, except the last
+ // bucket where the boundary is at infinity. This format is intentionally
+ // compatible with the OpenMetrics histogram definition.
+ ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
+ // (Optional) List of exemplars collected from
+ // measurements that were used to form the data point
+ Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+ // Flags that apply to this specific data point. See DataPointFlags
+ // for the available flags and their meaning.
+ Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
+ // min is the minimum value over (start_time, end_time].
+ Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
+ // max is the maximum value over (start_time, end_time].
+ Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
+}
+
+func (x *HistogramDataPoint) Reset() {
+ *x = HistogramDataPoint{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HistogramDataPoint) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HistogramDataPoint) ProtoMessage() {}
+
+func (x *HistogramDataPoint) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HistogramDataPoint.ProtoReflect.Descriptor instead.
+func (*HistogramDataPoint) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *HistogramDataPoint) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *HistogramDataPoint) GetStartTimeUnixNano() uint64 {
+ if x != nil {
+ return x.StartTimeUnixNano
+ }
+ return 0
+}
+
+func (x *HistogramDataPoint) GetTimeUnixNano() uint64 {
+ if x != nil {
+ return x.TimeUnixNano
+ }
+ return 0
+}
+
+func (x *HistogramDataPoint) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *HistogramDataPoint) GetSum() float64 {
+ if x != nil && x.Sum != nil {
+ return *x.Sum
+ }
+ return 0
+}
+
+func (x *HistogramDataPoint) GetBucketCounts() []uint64 {
+ if x != nil {
+ return x.BucketCounts
+ }
+ return nil
+}
+
+func (x *HistogramDataPoint) GetExplicitBounds() []float64 {
+ if x != nil {
+ return x.ExplicitBounds
+ }
+ return nil
+}
+
+func (x *HistogramDataPoint) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
+ }
+ return nil
+}
+
+func (x *HistogramDataPoint) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+func (x *HistogramDataPoint) GetMin() float64 {
+ if x != nil && x.Min != nil {
+ return *x.Min
+ }
+ return 0
+}
+
+func (x *HistogramDataPoint) GetMax() float64 {
+ if x != nil && x.Max != nil {
+ return *x.Max
+ }
+ return 0
+}
+
+// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
+// summary statistics for a population of values, it may optionally contain the
+// distribution of those values across a set of buckets.
+//
+type ExponentialHistogramDataPoint struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of key/value pairs that uniquely identify the timeseries from
+ // where this point belongs. The list may be empty (may contain 0 elements).
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // StartTimeUnixNano is optional but strongly encouraged, see the
+ // the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+ // TimeUnixNano is required, see the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+ // count is the number of values in the population. Must be
+ // non-negative. This value must be equal to the sum of the "bucket_counts"
+ // values in the positive and negative Buckets plus the "zero_count" field.
+ Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+ // sum of the values in the population. If count is zero then this field
+ // must be zero.
+ //
+ // Note: Sum should only be filled out when measuring non-negative discrete
+ // events, and is assumed to be monotonic over the values of these events.
+ // Negative events *can* be recorded, but sum should not be filled out when
+ // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
+ // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+ Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
+ // scale describes the resolution of the histogram. Boundaries are
+ // located at powers of the base, where:
+ //
+ // base = (2^(2^-scale))
+ //
+ // The histogram bucket identified by `index`, a signed integer,
+ // contains values that are greater than (base^index) and
+ // less than or equal to (base^(index+1)).
+ //
+ // The positive and negative ranges of the histogram are expressed
+ // separately. Negative values are mapped by their absolute value
+ // into the negative range using the same scale as the positive range.
+ //
+ // scale is not restricted by the protocol, as the permissible
+ // values depend on the range of the data.
+ Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
+ // zero_count is the count of values that are either exactly zero or
+ // within the region considered zero by the instrumentation at the
+ // tolerated degree of precision. This bucket stores values that
+ // cannot be expressed using the standard exponential formula as
+ // well as values that have been rounded to zero.
+ //
+ // Implementations MAY consider the zero bucket to have probability
+ // mass equal to (zero_count / count).
+ ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
+ // positive carries the positive range of exponential bucket counts.
+ Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"`
+ // negative carries the negative range of exponential bucket counts.
+ Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"`
+ // Flags that apply to this specific data point. See DataPointFlags
+ // for the available flags and their meaning.
+ Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
+ // (Optional) List of exemplars collected from
+ // measurements that were used to form the data point
+ Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+ // min is the minimum value over (start_time, end_time].
+ Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
+ // max is the maximum value over (start_time, end_time].
+ Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
+ // ZeroThreshold may be optionally set to convey the width of the zero
+ // region. Where the zero region is defined as the closed interval
+ // [-ZeroThreshold, ZeroThreshold].
+ // When ZeroThreshold is 0, zero count bucket stores values that cannot be
+ // expressed using the standard exponential formula as well as values that
+ // have been rounded to zero.
+ ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
+}
+
+func (x *ExponentialHistogramDataPoint) Reset() {
+ *x = ExponentialHistogramDataPoint{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExponentialHistogramDataPoint) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExponentialHistogramDataPoint) ProtoMessage() {}
+
+func (x *ExponentialHistogramDataPoint) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExponentialHistogramDataPoint.ProtoReflect.Descriptor instead.
+func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ExponentialHistogramDataPoint) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 {
+ if x != nil {
+ return x.StartTimeUnixNano
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 {
+ if x != nil {
+ return x.TimeUnixNano
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetSum() float64 {
+ if x != nil && x.Sum != nil {
+ return *x.Sum
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetScale() int32 {
+ if x != nil {
+ return x.Scale
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetZeroCount() uint64 {
+ if x != nil {
+ return x.ZeroCount
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetPositive() *ExponentialHistogramDataPoint_Buckets {
+ if x != nil {
+ return x.Positive
+ }
+ return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetNegative() *ExponentialHistogramDataPoint_Buckets {
+ if x != nil {
+ return x.Negative
+ }
+ return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
+ }
+ return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetMin() float64 {
+ if x != nil && x.Min != nil {
+ return *x.Min
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetMax() float64 {
+ if x != nil && x.Max != nil {
+ return *x.Max
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetZeroThreshold() float64 {
+ if x != nil {
+ return x.ZeroThreshold
+ }
+ return 0
+}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of key/value pairs that uniquely identify the timeseries from
+ // where this point belongs. The list may be empty (may contain 0 elements).
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // StartTimeUnixNano is optional but strongly encouraged, see the
+ // the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+ // TimeUnixNano is required, see the detailed comments above Metric.
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+ // count is the number of values in the population. Must be non-negative.
+ Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+ // sum of the values in the population. If count is zero then this field
+ // must be zero.
+ //
+ // Note: Sum should only be filled out when measuring non-negative discrete
+ // events, and is assumed to be monotonic over the values of these events.
+ // Negative events *can* be recorded, but sum should not be filled out when
+ // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
+ // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
+ Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
+ // (Optional) list of values at different quantiles of the distribution calculated
+ // from the current snapshot. The quantiles must be strictly increasing.
+ QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
+ // Flags that apply to this specific data point. See DataPointFlags
+ // for the available flags and their meaning.
+ Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *SummaryDataPoint) Reset() {
+ *x = SummaryDataPoint{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SummaryDataPoint) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SummaryDataPoint) ProtoMessage() {}
+
+func (x *SummaryDataPoint) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SummaryDataPoint.ProtoReflect.Descriptor instead.
+func (*SummaryDataPoint) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *SummaryDataPoint) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *SummaryDataPoint) GetStartTimeUnixNano() uint64 {
+ if x != nil {
+ return x.StartTimeUnixNano
+ }
+ return 0
+}
+
+func (x *SummaryDataPoint) GetTimeUnixNano() uint64 {
+ if x != nil {
+ return x.TimeUnixNano
+ }
+ return 0
+}
+
+func (x *SummaryDataPoint) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *SummaryDataPoint) GetSum() float64 {
+ if x != nil {
+ return x.Sum
+ }
+ return 0
+}
+
+func (x *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile {
+ if x != nil {
+ return x.QuantileValues
+ }
+ return nil
+}
+
+func (x *SummaryDataPoint) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+// A representation of an exemplar, which is a sample input measurement.
+// Exemplars also hold information about the environment when the measurement
+// was recorded, for example the span and trace ID of the active span when the
+// exemplar was recorded.
+type Exemplar struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of key/value pairs that were filtered out by the aggregator, but
+ // recorded alongside the original measurement. Only key/value pairs that were
+ // filtered out by the aggregator should be included
+ FilteredAttributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"`
+ // time_unix_nano is the exact time when this exemplar was recorded
+ //
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+ // 1970.
+ TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+ // The value of the measurement that was recorded. An exemplar is
+ // considered invalid when one of the recognized value fields is not present
+ // inside this oneof.
+ //
+ // Types that are assignable to Value:
+ // *Exemplar_AsDouble
+ // *Exemplar_AsInt
+ Value isExemplar_Value `protobuf_oneof:"value"`
+ // (Optional) Span ID of the exemplar trace.
+ // span_id may be missing if the measurement is not recorded inside a trace
+ // or if the trace is not sampled.
+ SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // (Optional) Trace ID of the exemplar trace.
+ // trace_id may be missing if the measurement is not recorded inside a trace
+ // or if the trace is not sampled.
+ TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+}
+
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *Exemplar) GetFilteredAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.FilteredAttributes
+ }
+ return nil
+}
+
+func (x *Exemplar) GetTimeUnixNano() uint64 {
+ if x != nil {
+ return x.TimeUnixNano
+ }
+ return 0
+}
+
+func (m *Exemplar) GetValue() isExemplar_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *Exemplar) GetAsDouble() float64 {
+ if x, ok := x.GetValue().(*Exemplar_AsDouble); ok {
+ return x.AsDouble
+ }
+ return 0
+}
+
+func (x *Exemplar) GetAsInt() int64 {
+ if x, ok := x.GetValue().(*Exemplar_AsInt); ok {
+ return x.AsInt
+ }
+ return 0
+}
+
+func (x *Exemplar) GetSpanId() []byte {
+ if x != nil {
+ return x.SpanId
+ }
+ return nil
+}
+
+func (x *Exemplar) GetTraceId() []byte {
+ if x != nil {
+ return x.TraceId
+ }
+ return nil
+}
+
+type isExemplar_Value interface {
+ isExemplar_Value()
+}
+
+type Exemplar_AsDouble struct {
+ AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"`
+}
+
+type Exemplar_AsInt struct {
+ AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
+}
+
+func (*Exemplar_AsDouble) isExemplar_Value() {}
+
+func (*Exemplar_AsInt) isExemplar_Value() {}
+
+// Buckets are a set of bucket counts, encoded in a contiguous array
+// of counts.
+type ExponentialHistogramDataPoint_Buckets struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Offset is the bucket index of the first entry in the bucket_counts array.
+ //
+ // Note: This uses a varint encoding as a simple form of compression.
+ Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
+ // bucket_counts is an array of count values, where bucket_counts[i] carries
+ // the count of the bucket at index (offset+i). bucket_counts[i] is the count
+ // of values greater than base^(offset+i) and less than or equal to
+ // base^(offset+i+1).
+ //
+ // Note: By contrast, the explicit HistogramDataPoint uses
+ // fixed64. This field is expected to have many buckets,
+ // especially zeros, so uint64 has been selected to ensure
+ // varint encoding.
+ BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) Reset() {
+ *x = ExponentialHistogramDataPoint_Buckets{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {}
+
+func (x *ExponentialHistogramDataPoint_Buckets) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExponentialHistogramDataPoint_Buckets.ProtoReflect.Descriptor instead.
+func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{11, 0}
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 {
+ if x != nil {
+ return x.Offset
+ }
+ return 0
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 {
+ if x != nil {
+ return x.BucketCounts
+ }
+ return nil
+}
+
+// Represents the value at a given quantile of a distribution.
+//
+// To record Min and Max values following conventions are used:
+// - The 1.0 quantile is equivalent to the maximum value observed.
+// - The 0.0 quantile is equivalent to the minimum value observed.
+//
+// See the following issue for more context:
+// https://github.com/open-telemetry/opentelemetry-proto/issues/125
+type SummaryDataPoint_ValueAtQuantile struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The quantile of a distribution. Must be in the interval
+ // [0.0, 1.0].
+ Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
+ // The value at the given quantile of a distribution.
+ //
+ // Quantile values must NOT be negative.
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) Reset() {
+ *x = SummaryDataPoint_ValueAtQuantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {}
+
+func (x *SummaryDataPoint_ValueAtQuantile) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SummaryDataPoint_ValueAtQuantile.ProtoReflect.Descriptor instead.
+func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 {
+ if x != nil {
+ return x.Quantile
+ }
+ return 0
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) GetValue() float64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_opentelemetry_proto_metrics_v1_metrics_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31,
+ 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2a,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x69, 0x0a, 0x0b, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0xd2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x12, 0x51, 0x0a, 0x0d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72,
+ 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55,
+ 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x22, 0xba, 0x01, 0x0a, 0x0c, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x49, 0x0a, 0x05, 0x73,
+ 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72,
+ 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x52,
+ 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52,
+ 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xe1, 0x03, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x3d, 0x0a, 0x05,
+ 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x61, 0x75,
+ 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x03, 0x73,
+ 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x48, 0x00, 0x52,
+ 0x03, 0x73, 0x75, 0x6d, 0x12, 0x49, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72,
+ 0x61, 0x6d, 0x48, 0x00, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12,
+ 0x6b, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x68,
+ 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f,
+ 0x67, 0x72, 0x61, 0x6d, 0x48, 0x00, 0x52, 0x14, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x43, 0x0a, 0x07,
+ 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53,
+ 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
+ 0x79, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a,
+ 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x59, 0x0a, 0x05, 0x47,
+ 0x61, 0x75, 0x67, 0x65, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61,
+ 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xeb, 0x01, 0x0a, 0x03, 0x53, 0x75, 0x6d, 0x12, 0x50,
+ 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x50,
+ 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73,
+ 0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65,
+ 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x16, 0x61, 0x67, 0x67, 0x72, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74,
+ 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x6d, 0x6f, 0x6e, 0x6f, 0x74, 0x6f, 0x6e, 0x69,
+ 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4d, 0x6f, 0x6e, 0x6f, 0x74,
+ 0x6f, 0x6e, 0x69, 0x63, 0x22, 0xd1, 0x01, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72,
+ 0x61, 0x6d, 0x12, 0x53, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72,
+ 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74,
+ 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69,
+ 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79,
+ 0x52, 0x16, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d,
+ 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0xe7, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70,
+ 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x12, 0x5e, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+ 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61,
+ 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+ 0x73, 0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54,
+ 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x16, 0x61, 0x67, 0x67, 0x72,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69,
+ 0x74, 0x79, 0x22, 0x5c, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a,
+ 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50,
+ 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73,
+ 0x22, 0xd6, 0x02, 0x0a, 0x0f, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x50,
+ 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x0a,
+ 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78,
+ 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x24,
+ 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78,
+ 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x1d, 0x0a, 0x09, 0x61, 0x73, 0x5f, 0x64, 0x6f, 0x75, 0x62, 0x6c,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x73, 0x44, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x61, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x10, 0x48, 0x00, 0x52, 0x05, 0x61, 0x73, 0x49, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x09,
+ 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70,
+ 0x6c, 0x61, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x48, 0x69,
+ 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+ 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f,
+ 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52,
+ 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x15, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a,
+ 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x06,
+ 0x20, 0x03, 0x28, 0x06, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x62,
+ 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0e, 0x65, 0x78, 0x70,
+ 0x6c, 0x69, 0x63, 0x69, 0x74, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x12, 0x46, 0x0a, 0x09, 0x65,
+ 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
+ 0x61, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x69, 0x6e,
+ 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x88, 0x01, 0x01,
+ 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x48, 0x02, 0x52,
+ 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x73, 0x75, 0x6d, 0x42,
+ 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x61, 0x78, 0x4a,
+ 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xfa, 0x05, 0x0a, 0x1d, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65,
+ 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61,
+ 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
+ 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75,
+ 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+ 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e,
+ 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55,
+ 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x15, 0x0a,
+ 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75,
+ 0x6d, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x11, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65,
+ 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x09,
+ 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x08, 0x70, 0x6f, 0x73,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
+ 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x73, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x61, 0x0a, 0x08,
+ 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f,
+ 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x08, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x12,
+ 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05,
+ 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x46, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
+ 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c,
+ 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x15, 0x0a,
+ 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x03, 0x6d, 0x69,
+ 0x6e, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0d, 0x20, 0x01, 0x28,
+ 0x01, 0x48, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x7a,
+ 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f,
+ 0x6c, 0x64, 0x1a, 0x46, 0x0a, 0x07, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a,
+ 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f,
+ 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0c, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x73,
+ 0x75, 0x6d, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d,
+ 0x61, 0x78, 0x22, 0xa6, 0x03, 0x0a, 0x10, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61,
+ 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
+ 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75,
+ 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+ 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e,
+ 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55,
+ 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x10, 0x0a,
+ 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12,
+ 0x69, 0x0a, 0x0f, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72,
+ 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x0e, 0x71, 0x75, 0x61, 0x6e,
+ 0x74, 0x69, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73,
+ 0x1a, 0x43, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74,
+ 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x85, 0x02, 0x0a, 0x08,
+ 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x58, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+ 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12,
+ 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f,
+ 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65,
+ 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x1d, 0x0a, 0x09, 0x61, 0x73, 0x5f, 0x64,
+ 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61,
+ 0x73, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x61, 0x73, 0x5f, 0x69, 0x6e,
+ 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x10, 0x48, 0x00, 0x52, 0x05, 0x61, 0x73, 0x49, 0x6e, 0x74,
+ 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x49, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08,
+ 0x01, 0x10, 0x02, 0x2a, 0x8c, 0x01, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x27,
+ 0x0a, 0x23, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45,
+ 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x41, 0x47, 0x47, 0x52, 0x45,
+ 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49,
+ 0x54, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22, 0x41, 0x47,
+ 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52,
+ 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45,
+ 0x10, 0x02, 0x2a, 0x5e, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x46,
+ 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x50, 0x4f, 0x49,
+ 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f,
+ 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x27, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x50, 0x4f,
+ 0x49, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x4e, 0x4f, 0x5f, 0x52, 0x45, 0x43,
+ 0x4f, 0x52, 0x44, 0x45, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b,
+ 0x10, 0x01, 0x42, 0x7f, 0x0a, 0x21, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+ 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f,
+ 0x76, 0x31, 0xaa, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData = file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc
+)
+
+func file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData
+}
+
+var file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes = []interface{}{
+ (AggregationTemporality)(0), // 0: opentelemetry.proto.metrics.v1.AggregationTemporality
+ (DataPointFlags)(0), // 1: opentelemetry.proto.metrics.v1.DataPointFlags
+ (*MetricsData)(nil), // 2: opentelemetry.proto.metrics.v1.MetricsData
+ (*ResourceMetrics)(nil), // 3: opentelemetry.proto.metrics.v1.ResourceMetrics
+ (*ScopeMetrics)(nil), // 4: opentelemetry.proto.metrics.v1.ScopeMetrics
+ (*Metric)(nil), // 5: opentelemetry.proto.metrics.v1.Metric
+ (*Gauge)(nil), // 6: opentelemetry.proto.metrics.v1.Gauge
+ (*Sum)(nil), // 7: opentelemetry.proto.metrics.v1.Sum
+ (*Histogram)(nil), // 8: opentelemetry.proto.metrics.v1.Histogram
+ (*ExponentialHistogram)(nil), // 9: opentelemetry.proto.metrics.v1.ExponentialHistogram
+ (*Summary)(nil), // 10: opentelemetry.proto.metrics.v1.Summary
+ (*NumberDataPoint)(nil), // 11: opentelemetry.proto.metrics.v1.NumberDataPoint
+ (*HistogramDataPoint)(nil), // 12: opentelemetry.proto.metrics.v1.HistogramDataPoint
+ (*ExponentialHistogramDataPoint)(nil), // 13: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint
+ (*SummaryDataPoint)(nil), // 14: opentelemetry.proto.metrics.v1.SummaryDataPoint
+ (*Exemplar)(nil), // 15: opentelemetry.proto.metrics.v1.Exemplar
+ (*ExponentialHistogramDataPoint_Buckets)(nil), // 16: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets
+ (*SummaryDataPoint_ValueAtQuantile)(nil), // 17: opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile
+ (*v1.Resource)(nil), // 18: opentelemetry.proto.resource.v1.Resource
+ (*v11.InstrumentationScope)(nil), // 19: opentelemetry.proto.common.v1.InstrumentationScope
+ (*v11.KeyValue)(nil), // 20: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs = []int32{
+ 3, // 0: opentelemetry.proto.metrics.v1.MetricsData.resource_metrics:type_name -> opentelemetry.proto.metrics.v1.ResourceMetrics
+ 18, // 1: opentelemetry.proto.metrics.v1.ResourceMetrics.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+ 4, // 2: opentelemetry.proto.metrics.v1.ResourceMetrics.scope_metrics:type_name -> opentelemetry.proto.metrics.v1.ScopeMetrics
+ 19, // 3: opentelemetry.proto.metrics.v1.ScopeMetrics.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+ 5, // 4: opentelemetry.proto.metrics.v1.ScopeMetrics.metrics:type_name -> opentelemetry.proto.metrics.v1.Metric
+ 6, // 5: opentelemetry.proto.metrics.v1.Metric.gauge:type_name -> opentelemetry.proto.metrics.v1.Gauge
+ 7, // 6: opentelemetry.proto.metrics.v1.Metric.sum:type_name -> opentelemetry.proto.metrics.v1.Sum
+ 8, // 7: opentelemetry.proto.metrics.v1.Metric.histogram:type_name -> opentelemetry.proto.metrics.v1.Histogram
+ 9, // 8: opentelemetry.proto.metrics.v1.Metric.exponential_histogram:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogram
+ 10, // 9: opentelemetry.proto.metrics.v1.Metric.summary:type_name -> opentelemetry.proto.metrics.v1.Summary
+ 11, // 10: opentelemetry.proto.metrics.v1.Gauge.data_points:type_name -> opentelemetry.proto.metrics.v1.NumberDataPoint
+ 11, // 11: opentelemetry.proto.metrics.v1.Sum.data_points:type_name -> opentelemetry.proto.metrics.v1.NumberDataPoint
+ 0, // 12: opentelemetry.proto.metrics.v1.Sum.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality
+ 12, // 13: opentelemetry.proto.metrics.v1.Histogram.data_points:type_name -> opentelemetry.proto.metrics.v1.HistogramDataPoint
+ 0, // 14: opentelemetry.proto.metrics.v1.Histogram.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality
+ 13, // 15: opentelemetry.proto.metrics.v1.ExponentialHistogram.data_points:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint
+ 0, // 16: opentelemetry.proto.metrics.v1.ExponentialHistogram.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality
+ 14, // 17: opentelemetry.proto.metrics.v1.Summary.data_points:type_name -> opentelemetry.proto.metrics.v1.SummaryDataPoint
+ 20, // 18: opentelemetry.proto.metrics.v1.NumberDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 15, // 19: opentelemetry.proto.metrics.v1.NumberDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar
+ 20, // 20: opentelemetry.proto.metrics.v1.HistogramDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 15, // 21: opentelemetry.proto.metrics.v1.HistogramDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar
+ 20, // 22: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 16, // 23: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.positive:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets
+ 16, // 24: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.negative:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets
+ 15, // 25: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar
+ 20, // 26: opentelemetry.proto.metrics.v1.SummaryDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 17, // 27: opentelemetry.proto.metrics.v1.SummaryDataPoint.quantile_values:type_name -> opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile
+ 20, // 28: opentelemetry.proto.metrics.v1.Exemplar.filtered_attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 29, // [29:29] is the sub-list for method output_type
+ 29, // [29:29] is the sub-list for method input_type
+ 29, // [29:29] is the sub-list for extension type_name
+ 29, // [29:29] is the sub-list for extension extendee
+ 0, // [0:29] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_metrics_v1_metrics_proto_init() }
+func file_opentelemetry_proto_metrics_v1_metrics_proto_init() {
+ if File_opentelemetry_proto_metrics_v1_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricsData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceMetrics); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopeMetrics); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Sum); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExponentialHistogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NumberDataPoint); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HistogramDataPoint); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExponentialHistogramDataPoint); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SummaryDataPoint); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExponentialHistogramDataPoint_Buckets); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SummaryDataPoint_ValueAtQuantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*Metric_Gauge)(nil),
+ (*Metric_Sum)(nil),
+ (*Metric_Histogram)(nil),
+ (*Metric_ExponentialHistogram)(nil),
+ (*Metric_Summary)(nil),
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9].OneofWrappers = []interface{}{
+ (*NumberDataPoint_AsDouble)(nil),
+ (*NumberDataPoint_AsInt)(nil),
+ }
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10].OneofWrappers = []interface{}{}
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11].OneofWrappers = []interface{}{}
+ file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13].OneofWrappers = []interface{}{
+ (*Exemplar_AsDouble)(nil),
+ (*Exemplar_AsInt)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs,
+ EnumInfos: file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes,
+ MessageInfos: file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_metrics_v1_metrics_proto = out.File
+ file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc = nil
+ file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes = nil
+ file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1e687431b66..346762c81ba 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -38,7 +38,7 @@ github.com/bufbuild/protocompile/protoutil
github.com/bufbuild/protocompile/reporter
github.com/bufbuild/protocompile/sourceinfo
github.com/bufbuild/protocompile/walk
-# github.com/cenkalti/backoff/v4 v4.2.1
+# github.com/cenkalti/backoff/v4 v4.3.0
## explicit; go 1.18
github.com/cenkalti/backoff/v4
# github.com/cespare/xxhash/v2 v2.2.0
@@ -137,7 +137,7 @@ github.com/evanw/esbuild/pkg/api
github.com/fatih/color
# github.com/fsnotify/fsnotify v1.5.4
## explicit; go 1.16
-# github.com/go-logr/logr v1.4.1
+# github.com/go-logr/logr v1.4.2
## explicit; go 1.18
github.com/go-logr/logr
github.com/go-logr/logr/funcr
@@ -189,6 +189,9 @@ github.com/grafana/xk6-browser/trace
# github.com/grafana/xk6-dashboard v0.7.5
## explicit; go 1.20
github.com/grafana/xk6-dashboard/dashboard
+# github.com/grafana/xk6-output-opentelemetry v0.1.1
+## explicit; go 1.20
+github.com/grafana/xk6-output-opentelemetry/pkg/opentelemetry
# github.com/grafana/xk6-output-prometheus-remote v0.4.0
## explicit; go 1.20
github.com/grafana/xk6-output-prometheus-remote/pkg/remote
@@ -209,7 +212,7 @@ github.com/grafana/xk6-websockets/websockets/events
github.com/grpc-ecosystem/go-grpc-middleware/retry
github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
github.com/grpc-ecosystem/go-grpc-middleware/util/metautils
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1
## explicit; go 1.19
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
@@ -359,6 +362,22 @@ go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.24.0
+# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0
+## explicit; go 1.20
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform
+# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0
+## explicit; go 1.20
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform
# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/exporters/otlp/otlptrace
@@ -381,6 +400,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
## explicit; go 1.20
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
+go.opentelemetry.io/otel/metric/noop
# go.opentelemetry.io/otel/sdk v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/sdk
@@ -389,6 +409,14 @@ go.opentelemetry.io/otel/sdk/internal
go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
+# go.opentelemetry.io/otel/sdk/metric v1.24.0
+## explicit; go 1.20
+go.opentelemetry.io/otel/sdk/metric
+go.opentelemetry.io/otel/sdk/metric/internal
+go.opentelemetry.io/otel/sdk/metric/internal/aggregate
+go.opentelemetry.io/otel/sdk/metric/internal/exemplar
+go.opentelemetry.io/otel/sdk/metric/internal/x
+go.opentelemetry.io/otel/sdk/metric/metricdata
# go.opentelemetry.io/otel/trace v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/trace
@@ -396,8 +424,10 @@ go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/noop
# go.opentelemetry.io/proto/otlp v1.1.0
## explicit; go 1.17
+go.opentelemetry.io/proto/otlp/collector/metrics/v1
go.opentelemetry.io/proto/otlp/collector/trace/v1
go.opentelemetry.io/proto/otlp/common/v1
+go.opentelemetry.io/proto/otlp/metrics/v1
go.opentelemetry.io/proto/otlp/resource/v1
go.opentelemetry.io/proto/otlp/trace/v1
# go.uber.org/goleak v1.3.0
@@ -458,7 +488,7 @@ golang.org/x/time/rate
# google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237
## explicit; go 1.19
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status