diff --git a/go.mod b/go.mod index acdf36a5f8..526d1e7da8 100644 --- a/go.mod +++ b/go.mod @@ -34,10 +34,10 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/lib/pq v1.10.9 - github.com/minio/minio-go/v7 v7.0.79 + github.com/minio/minio-go/v7 v7.0.80 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 - github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e + github.com/opentracing-contrib/go-grpc v0.1.0 github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 @@ -46,31 +46,31 @@ require ( github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.60.1 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.55.0 + github.com/prometheus/prometheus v0.55.1 github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.9.0 github.com/thanos-io/objstore v0.0.0-20240913074259-63feed0da069 github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31 - github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290 + github.com/thanos-io/thanos v0.36.1 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.16 go.etcd.io/etcd/client/pkg/v3 v3.5.16 go.etcd.io/etcd/client/v3 v3.5.16 - go.opentelemetry.io/contrib/propagators/aws v1.31.0 - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/bridge/opentracing v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/contrib/propagators/aws v1.32.0 + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/bridge/opentracing v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/atomic v1.11.0 - golang.org/x/net v0.30.0 - golang.org/x/sync v0.8.0 - golang.org/x/time v0.7.0 - google.golang.org/grpc v1.67.1 + golang.org/x/net v0.31.0 + golang.org/x/sync v0.9.0 + golang.org/x/time v0.8.0 + google.golang.org/grpc v1.68.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -81,7 +81,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/google/go-cmp v0.6.0 github.com/sercand/kuberesolver/v4 v4.0.0 - go.opentelemetry.io/collector/pdata v1.18.0 + go.opentelemetry.io/collector/pdata v1.19.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 google.golang.org/protobuf v1.35.1 ) @@ -121,7 +121,6 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.5.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect @@ -153,7 +152,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -200,7 +199,6 @@ require ( github.com/rs/cors v1.11.0 // indirect github.com/rs/xid v1.6.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 // indirect github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -220,24 +218,24 @@ require ( go.opentelemetry.io/contrib/propagators/b3 v1.29.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.29.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.29.0 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/tools v0.24.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/api v0.195.0 // indirect google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect k8s.io/apimachinery v0.31.1 // indirect diff --git a/go.sum b/go.sum index a1b95fa256..3ef5a15165 100644 --- a/go.sum +++ b/go.sum @@ -953,8 +953,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 h1:BS21ZUJ/B5X2UVUbczfmdWH7GapPWAhxcMsDnjJTU1E= -github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.4.3 h1:wquqUxAFdcUgabAVLvSCOKOlag5cIZuaOjYIBOWdsR0= @@ -1268,9 +1266,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= @@ -1459,8 +1456,8 @@ github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcs github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.79 h1:SvJZpj3hT0RN+4KiuX/FxLfPZdsuegy6d/2PiemM/bM= -github.com/minio/minio-go/v7 v7.0.79/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= +github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -1505,15 +1502,15 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-grpc v0.1.0 h1:9JHDtQXv6UL0tFF8KJB/4ApJgeOcaHp1h07d0PJjESc= +github.com/opentracing-contrib/go-grpc v0.1.0/go.mod h1:i3/jx/TvJZ/HKidtT4XGIi/NosUEpzS9xjVJctbKZzI= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= @@ -1600,8 +1597,8 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.55.0 h1:ITinOi1zr3HemoVWHf679PfRRmpxZOcR4nEvsze6eB0= -github.com/prometheus/prometheus v0.55.0/go.mod h1:GGS7QlWKCqCbcEzWsVahYIfQwiGhcExkarHyLJTsv6I= +github.com/prometheus/prometheus v0.55.1 h1:+NM9V/h4A+wRkOyQzGewzgPPgq/iX2LUQoISNvmjZmI= +github.com/prometheus/prometheus v0.55.1/go.mod h1:GGS7QlWKCqCbcEzWsVahYIfQwiGhcExkarHyLJTsv6I= github.com/redis/rueidis v1.0.45-alpha.1 h1:69Bu1l7gVC/qDYuGGwMwGg2rjOjSyxESol/Zila62gY= github.com/redis/rueidis v1.0.45-alpha.1/go.mod h1:q7BfhDaPt7xxwy2nv2RqQO12/mmHflDjebpcNwWFjms= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1626,8 +1623,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 h1:emzAzMZ1L9iaKCTxdy3Em8Wv4ChIAGnfiz18Cda70g4= -github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771/go.mod h1:bR6DqgcAl1zTcOX8/pE2Qkj9XO00eCNqmKb7lXP8EAg= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= @@ -1688,8 +1683,8 @@ github.com/thanos-io/objstore v0.0.0-20240913074259-63feed0da069 h1:TUPZ6euAh8I6 github.com/thanos-io/objstore v0.0.0-20240913074259-63feed0da069/go.mod h1:Cba80S8NbVBBdyZKzra7San/jXvpAxArbpFymWzIZhg= github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31 h1:xPaP58g+3EPohdw4cv+6jv5+LcX6LynhHvQcYwTAMxQ= github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31/go.mod h1:wx0JlRZtsB2S10JYUgeg5GqLfMxw31SzArP+28yyE00= -github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290 h1:d58OLbcIC6F3TviRFK85Ucdxbs/3cPI1fcLRBWiNThA= -github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290/go.mod h1:kqF1rQspIAL+rktXL3OSKfiDjJmsCRezQblsR56degY= +github.com/thanos-io/thanos v0.36.1 h1:NsUBsWkJcZ6Uo2VuEr06mZZ9YNMLGVA2sIGVu+LsrNU= +github.com/thanos-io/thanos v0.36.1/go.mod h1:f7LiW4+/xvV5+gkseMuVbQnrbFTFnCPv5+X1M6mXkn4= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -1740,8 +1735,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg= -go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= +go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE= +go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= @@ -1750,28 +1745,28 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+n go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 h1:h/O1OcNbqrFilsMKfG6MJWWpx8gzCDfn9D+1W7lU3lE= go.opentelemetry.io/contrib/propagators/autoprop v0.54.0/go.mod h1:VIaPlErTgbng1UhrMA4N6Yy+f94PLA/qRPOCMATdoCs= -go.opentelemetry.io/contrib/propagators/aws v1.31.0 h1:OJHDboLd4zH1j0UrxoQbSDPEykmBJ/epVa/v+fRCRi0= -go.opentelemetry.io/contrib/propagators/aws v1.31.0/go.mod h1:mtT7x7gY+jL4fH34l8dkZeo6Jvf+3Fy002rjuEdRnTM= +go.opentelemetry.io/contrib/propagators/aws v1.32.0 h1:NELzr8bW7a7aHVZj5gaep1PfkvoSCGx+1qNGZx/uhhU= +go.opentelemetry.io/contrib/propagators/aws v1.32.0/go.mod h1:XKMrzHNka3eOA+nGEcNKYVL9s77TAhkwQEynYuaRFnQ= go.opentelemetry.io/contrib/propagators/b3 v1.29.0 h1:hNjyoRsAACnhoOLWupItUjABzeYmX3GTTZLzwJluJlk= go.opentelemetry.io/contrib/propagators/b3 v1.29.0/go.mod h1:E76MTitU1Niwo5NSN+mVxkyLu4h4h7Dp/yh38F2WuIU= go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 h1:+YPiqF5rR6PqHBlmEFLPumbSP0gY0WmCGFayXRcCLvs= go.opentelemetry.io/contrib/propagators/jaeger v1.29.0/go.mod h1:6PD7q7qquWSp3Z4HeM3e/2ipRubaY1rXZO8NIHVDZjs= go.opentelemetry.io/contrib/propagators/ot v1.29.0 h1:CaJU78FvXrA6ajjp1dOdcABBEjh529+hl396RTqc2LQ= go.opentelemetry.io/contrib/propagators/ot v1.29.0/go.mod h1:Sc0omwLb4eptUhwOAfYXfmPmErHPu2HV6vkeDge/3sY= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/bridge/opentracing v1.31.0 h1:S6SA1IQdNHgfZfgkaWBKQqNIlMNiPoyQDACii2uKQ9k= -go.opentelemetry.io/otel/bridge/opentracing v1.31.0/go.mod h1:DnEoPjq3eNCtnB41TqlUQYtarWe8PqJNWRt37daADe4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/bridge/opentracing v1.32.0 h1:eZ3h8PniTg1rZ5pgBCodc8zvLees+FOKI6KLtlSIZCE= +go.opentelemetry.io/otel/bridge/opentracing v1.32.0/go.mod h1:WnzpYWji96dHm4ol0DYYU7wOcwcoj0ySPLw6wBjAMgk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1822,8 +1817,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1900,7 +1895,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1961,8 +1955,8 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2015,8 +2009,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2126,8 +2120,8 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2145,8 +2139,8 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2167,16 +2161,16 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2495,8 +2489,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go. google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -2510,8 +2504,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2563,7 +2557,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/vendor/github.com/dgryski/go-metro/LICENSE b/vendor/github.com/dgryski/go-metro/LICENSE deleted file mode 100644 index 6243b617cf..0000000000 --- a/vendor/github.com/dgryski/go-metro/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This package is a mechanical translation of the reference C++ code for -MetroHash, available at https://github.com/jandrewrogers/MetroHash - -The MIT License (MIT) - -Copyright (c) 2016 Damian Gryski - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/dgryski/go-metro/README b/vendor/github.com/dgryski/go-metro/README deleted file mode 100644 index 5ecebb3853..0000000000 --- a/vendor/github.com/dgryski/go-metro/README +++ /dev/null @@ -1,6 +0,0 @@ -MetroHash - -This package is a mechanical translation of the reference C++ code for -MetroHash, available at https://github.com/jandrewrogers/MetroHash - -I claim no additional copyright over the original implementation. diff --git a/vendor/github.com/dgryski/go-metro/metro.py b/vendor/github.com/dgryski/go-metro/metro.py deleted file mode 100644 index 8dd4d26e6a..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro.py +++ /dev/null @@ -1,199 +0,0 @@ -import peachpy.x86_64 - -k0 = 0xD6D018F5 -k1 = 0xA2AA033B -k2 = 0x62992FC1 -k3 = 0x30BC5B29 - -def advance(p,l,c): - ADD(p,c) - SUB(l,c) - -def imul(r,k): - t = GeneralPurposeRegister64() - MOV(t, k) - IMUL(r, t) - -def update32(v, p,idx, k, vadd): - r = GeneralPurposeRegister64() - MOV(r, [p + idx]) - imul(r, k) - ADD(v, r) - ROR(v, 29) - ADD(v, vadd) - -def final32(v, regs, keys): - r = GeneralPurposeRegister64() - MOV(r, v[regs[1]]) - ADD(r, v[regs[2]]) - imul(r, keys[0]) - ADD(r, v[regs[3]]) - ROR(r, 37) - imul(r, keys[1]) - XOR(v[regs[0]], r) - -seed = Argument(uint64_t) -buffer_base = Argument(ptr()) -buffer_len = Argument(int64_t) -buffer_cap = Argument(int64_t) - -def makeHash(name, args): - with Function(name, args, uint64_t) as function: - - reg_ptr = GeneralPurposeRegister64() - reg_ptr_len = GeneralPurposeRegister64() - reg_hash = GeneralPurposeRegister64() - - LOAD.ARGUMENT(reg_hash, seed) - LOAD.ARGUMENT(reg_ptr, buffer_base) - LOAD.ARGUMENT(reg_ptr_len, buffer_len) - - imul(reg_hash, k0) - r = GeneralPurposeRegister64() - MOV(r, k2*k0) - ADD(reg_hash, r) - - after32 = Label("after32") - - CMP(reg_ptr_len, 32) - JL(after32) - v = [GeneralPurposeRegister64() for _ in range(4)] - for i in range(4): - MOV(v[i], reg_hash) - - with Loop() as loop: - update32(v[0], reg_ptr, 0, k0, v[2]) - update32(v[1], reg_ptr, 8, k1, v[3]) - update32(v[2], reg_ptr, 16, k2, v[0]) - update32(v[3], reg_ptr, 24, k3, v[1]) - - ADD(reg_ptr, 32) - SUB(reg_ptr_len, 32) - CMP(reg_ptr_len, 32) - JGE(loop.begin) - - final32(v, [2,0,3,1], [k0, k1]) - final32(v, [3,1,2,0], [k1, k0]) - final32(v, [0,0,2,3], [k0, k1]) - final32(v, [1,1,3,2], [k1, k0]) - - XOR(v[0], v[1]) - ADD(reg_hash, v[0]) - - LABEL(after32) - - after16 = Label("after16") - CMP(reg_ptr_len, 16) - JL(after16) - - for i in range(2): - MOV(v[i], [reg_ptr]) - imul(v[i], k2) - ADD(v[i], reg_hash) - - advance(reg_ptr, reg_ptr_len, 8) - - ROR(v[i], 29) - imul(v[i], k3) - - r = GeneralPurposeRegister64() - MOV(r, v[0]) - imul(r, k0) - ROR(r, 21) - ADD(r, v[1]) - XOR(v[0], r) - - MOV(r, v[1]) - imul(r, k3) - ROR(r, 21) - ADD(r, v[0]) - XOR(v[1], r) - - ADD(reg_hash, v[1]) - - LABEL(after16) - - after8 = Label("after8") - CMP(reg_ptr_len, 8) - JL(after8) - - r = GeneralPurposeRegister64() - MOV(r, [reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 8) - - MOV(r, reg_hash) - ROR(r, 55) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after8) - - after4 = Label("after4") - CMP(reg_ptr_len, 4) - JL(after4) - - r = GeneralPurposeRegister64() - XOR(r, r) - MOV(r.as_dword, dword[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 4) - - MOV(r, reg_hash) - ROR(r, 26) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after4) - - after2 = Label("after2") - CMP(reg_ptr_len, 2) - JL(after2) - - r = GeneralPurposeRegister64() - XOR(r,r) - MOV(r.as_word, word[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 2) - - MOV(r, reg_hash) - ROR(r, 48) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after2) - - after1 = Label("after1") - CMP(reg_ptr_len, 1) - JL(after1) - - r = GeneralPurposeRegister64() - MOVZX(r, byte[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - - MOV(r, reg_hash) - ROR(r, 37) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after1) - - r = GeneralPurposeRegister64() - MOV(r, reg_hash) - ROR(r, 28) - XOR(reg_hash, r) - - imul(reg_hash, k0) - - MOV(r, reg_hash) - ROR(r, 29) - XOR(reg_hash, r) - - RETURN(reg_hash) - -makeHash("Hash64", (buffer_base, buffer_len, buffer_cap, seed)) -makeHash("Hash64Str", (buffer_base, buffer_len, seed)) \ No newline at end of file diff --git a/vendor/github.com/dgryski/go-metro/metro128.go b/vendor/github.com/dgryski/go-metro/metro128.go deleted file mode 100644 index e8dd8ddbf5..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro128.go +++ /dev/null @@ -1,94 +0,0 @@ -package metro - -import "encoding/binary" - -func rotate_right(v uint64, k uint) uint64 { - return (v >> k) | (v << (64 - k)) -} - -func Hash128(buffer []byte, seed uint64) (uint64, uint64) { - - const ( - k0 = 0xC83A91E1 - k1 = 0x8648DBDB - k2 = 0x7BDEC03B - k3 = 0x2F5870A5 - ) - - ptr := buffer - - var v [4]uint64 - - v[0] = (seed - k0) * k3 - v[1] = (seed + k1) * k2 - - if len(ptr) >= 32 { - v[2] = (seed + k0) * k2 - v[3] = (seed - k1) * k3 - - for len(ptr) >= 32 { - v[0] += binary.LittleEndian.Uint64(ptr) * k0 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 29) + v[2] - v[1] += binary.LittleEndian.Uint64(ptr) * k1 - ptr = ptr[8:] - v[1] = rotate_right(v[1], 29) + v[3] - v[2] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[2] = rotate_right(v[2], 29) + v[0] - v[3] += binary.LittleEndian.Uint64(ptr) * k3 - ptr = ptr[8:] - v[3] = rotate_right(v[3], 29) + v[1] - } - - v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 21) * k1 - v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 21) * k0 - v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 21) * k1 - v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 21) * k0 - } - - if len(ptr) >= 16 { - v[0] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 33) * k3 - v[1] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[1] = rotate_right(v[1], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 45) * k1 - v[1] ^= rotate_right((v[1]*k3)+v[0], 45) * k0 - } - - if len(ptr) >= 8 { - v[0] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 27) * k1 - } - - if len(ptr) >= 4 { - v[1] += uint64(binary.LittleEndian.Uint32(ptr)) * k2 - ptr = ptr[4:] - v[1] = rotate_right(v[1], 33) * k3 - v[1] ^= rotate_right((v[1]*k3)+v[0], 46) * k0 - } - - if len(ptr) >= 2 { - v[0] += uint64(binary.LittleEndian.Uint16(ptr)) * k2 - ptr = ptr[2:] - v[0] = rotate_right(v[0], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 22) * k1 - } - - if len(ptr) >= 1 { - v[1] += uint64(ptr[0]) * k2 - v[1] = rotate_right(v[1], 33) * k3 - v[1] ^= rotate_right((v[1]*k3)+v[0], 58) * k0 - } - - v[0] += rotate_right((v[0]*k0)+v[1], 13) - v[1] += rotate_right((v[1]*k1)+v[0], 37) - v[0] += rotate_right((v[0]*k2)+v[1], 13) - v[1] += rotate_right((v[1]*k3)+v[0], 37) - - return v[0], v[1] -} diff --git a/vendor/github.com/dgryski/go-metro/metro64.go b/vendor/github.com/dgryski/go-metro/metro64.go deleted file mode 100644 index 1c04228a0b..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro64.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build noasm !amd64 gccgo - -package metro - -import ( - "encoding/binary" - "math/bits" -) - -func Hash64(buffer []byte, seed uint64) uint64 { - - const ( - k0 = 0xD6D018F5 - k1 = 0xA2AA033B - k2 = 0x62992FC1 - k3 = 0x30BC5B29 - ) - - ptr := buffer - - hash := (seed + k2) * k0 - - if len(ptr) >= 32 { - v0, v1, v2, v3 := hash, hash, hash, hash - - for len(ptr) >= 32 { - v0 += binary.LittleEndian.Uint64(ptr[:8]) * k0 - v0 = bits.RotateLeft64(v0, -29) + v2 - v1 += binary.LittleEndian.Uint64(ptr[8:16]) * k1 - v1 = bits.RotateLeft64(v1, -29) + v3 - v2 += binary.LittleEndian.Uint64(ptr[16:24]) * k2 - v2 = bits.RotateLeft64(v2, -29) + v0 - v3 += binary.LittleEndian.Uint64(ptr[24:32]) * k3 - v3 = bits.RotateLeft64(v3, -29) + v1 - ptr = ptr[32:] - } - - v2 ^= bits.RotateLeft64(((v0+v3)*k0)+v1, -37) * k1 - v3 ^= bits.RotateLeft64(((v1+v2)*k1)+v0, -37) * k0 - v0 ^= bits.RotateLeft64(((v0+v2)*k0)+v3, -37) * k1 - v1 ^= bits.RotateLeft64(((v1+v3)*k1)+v2, -37) * k0 - hash += v0 ^ v1 - } - - if len(ptr) >= 16 { - v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2) - v0 = bits.RotateLeft64(v0, -29) * k3 - v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2) - v1 = bits.RotateLeft64(v1, -29) * k3 - v0 ^= bits.RotateLeft64(v0*k0, -21) + v1 - v1 ^= bits.RotateLeft64(v1*k3, -21) + v0 - hash += v1 - ptr = ptr[16:] - } - - if len(ptr) >= 8 { - hash += binary.LittleEndian.Uint64(ptr[:8]) * k3 - ptr = ptr[8:] - hash ^= bits.RotateLeft64(hash, -55) * k1 - } - - if len(ptr) >= 4 { - hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3 - hash ^= bits.RotateLeft64(hash, -26) * k1 - ptr = ptr[4:] - } - - if len(ptr) >= 2 { - hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3 - ptr = ptr[2:] - hash ^= bits.RotateLeft64(hash, -48) * k1 - } - - if len(ptr) >= 1 { - hash += uint64(ptr[0]) * k3 - hash ^= bits.RotateLeft64(hash, -37) * k1 - } - - hash ^= bits.RotateLeft64(hash, -28) - hash *= k0 - hash ^= bits.RotateLeft64(hash, -29) - - return hash -} - -func Hash64Str(buffer string, seed uint64) uint64 { - return Hash64([]byte(buffer), seed) -} diff --git a/vendor/github.com/dgryski/go-metro/metro_amd64.s b/vendor/github.com/dgryski/go-metro/metro_amd64.s deleted file mode 100644 index 7fa4730a48..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro_amd64.s +++ /dev/null @@ -1,372 +0,0 @@ -// +build !noasm -// +build !gccgo - -// Generated by PeachPy 0.2.0 from metro.py - -// func Hash64(buffer_base uintptr, buffer_len int64, buffer_cap int64, seed uint64) uint64 -TEXT ·Hash64(SB),4,$0-40 - MOVQ seed+24(FP), AX - MOVQ buffer_base+0(FP), BX - MOVQ buffer_len+8(FP), CX - MOVQ $3603962101, DX - IMULQ DX, AX - MOVQ $5961697176435608501, DX - ADDQ DX, AX - CMPQ CX, $32 - JLT after32 - MOVQ AX, DX - MOVQ AX, DI - MOVQ AX, SI - MOVQ AX, BP -loop_begin: - MOVQ 0(BX), R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ R8, DX - RORQ $29, DX - ADDQ SI, DX - MOVQ 8(BX), R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ R8, DI - RORQ $29, DI - ADDQ BP, DI - MOVQ 16(BX), R8 - MOVQ $1654206401, R9 - IMULQ R9, R8 - ADDQ R8, SI - RORQ $29, SI - ADDQ DX, SI - MOVQ 24(BX), R8 - MOVQ $817650473, R9 - IMULQ R9, R8 - ADDQ R8, BP - RORQ $29, BP - ADDQ DI, BP - ADDQ $32, BX - SUBQ $32, CX - CMPQ CX, $32 - JGE loop_begin - MOVQ DX, R8 - ADDQ BP, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ DI, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, SI - MOVQ DI, R8 - ADDQ SI, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ DX, R8 - RORQ $37, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - XORQ R8, BP - MOVQ DX, R8 - ADDQ SI, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ BP, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, DX - MOVQ DI, R8 - ADDQ BP, R8 - MOVQ $2729050939, BP - IMULQ BP, R8 - ADDQ SI, R8 - RORQ $37, R8 - MOVQ $3603962101, SI - IMULQ SI, R8 - XORQ R8, DI - XORQ DI, DX - ADDQ DX, AX -after32: - CMPQ CX, $16 - JLT after16 - MOVQ 0(BX), DX - MOVQ $1654206401, DI - IMULQ DI, DX - ADDQ AX, DX - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DX - MOVQ $817650473, DI - IMULQ DI, DX - MOVQ 0(BX), DI - MOVQ $1654206401, SI - IMULQ SI, DI - ADDQ AX, DI - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DI - MOVQ $817650473, SI - IMULQ SI, DI - MOVQ DX, SI - MOVQ $3603962101, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DI, SI - XORQ SI, DX - MOVQ DI, SI - MOVQ $817650473, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DX, SI - XORQ SI, DI - ADDQ DI, AX -after16: - CMPQ CX, $8 - JLT after8 - MOVQ 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $8, BX - SUBQ $8, CX - MOVQ AX, DX - RORQ $55, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after8: - CMPQ CX, $4 - JLT after4 - XORQ DX, DX - MOVL 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $4, BX - SUBQ $4, CX - MOVQ AX, DX - RORQ $26, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after4: - CMPQ CX, $2 - JLT after2 - XORQ DX, DX - MOVW 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $2, BX - SUBQ $2, CX - MOVQ AX, DX - RORQ $48, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after2: - CMPQ CX, $1 - JLT after1 - MOVBQZX 0(BX), BX - MOVQ $817650473, CX - IMULQ CX, BX - ADDQ BX, AX - MOVQ AX, BX - RORQ $37, BX - MOVQ $2729050939, CX - IMULQ CX, BX - XORQ BX, AX -after1: - MOVQ AX, BX - RORQ $28, BX - XORQ BX, AX - MOVQ $3603962101, BX - IMULQ BX, AX - MOVQ AX, BX - RORQ $29, BX - XORQ BX, AX - MOVQ AX, ret+32(FP) - RET - -// func Hash64Str(buffer_base uintptr, buffer_len int64, seed uint64) uint64 -TEXT ·Hash64Str(SB),4,$0-32 - MOVQ seed+16(FP), AX - MOVQ buffer_base+0(FP), BX - MOVQ buffer_len+8(FP), CX - MOVQ $3603962101, DX - IMULQ DX, AX - MOVQ $5961697176435608501, DX - ADDQ DX, AX - CMPQ CX, $32 - JLT after32 - MOVQ AX, DX - MOVQ AX, DI - MOVQ AX, SI - MOVQ AX, BP -loop_begin: - MOVQ 0(BX), R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ R8, DX - RORQ $29, DX - ADDQ SI, DX - MOVQ 8(BX), R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ R8, DI - RORQ $29, DI - ADDQ BP, DI - MOVQ 16(BX), R8 - MOVQ $1654206401, R9 - IMULQ R9, R8 - ADDQ R8, SI - RORQ $29, SI - ADDQ DX, SI - MOVQ 24(BX), R8 - MOVQ $817650473, R9 - IMULQ R9, R8 - ADDQ R8, BP - RORQ $29, BP - ADDQ DI, BP - ADDQ $32, BX - SUBQ $32, CX - CMPQ CX, $32 - JGE loop_begin - MOVQ DX, R8 - ADDQ BP, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ DI, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, SI - MOVQ DI, R8 - ADDQ SI, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ DX, R8 - RORQ $37, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - XORQ R8, BP - MOVQ DX, R8 - ADDQ SI, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ BP, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, DX - MOVQ DI, R8 - ADDQ BP, R8 - MOVQ $2729050939, BP - IMULQ BP, R8 - ADDQ SI, R8 - RORQ $37, R8 - MOVQ $3603962101, SI - IMULQ SI, R8 - XORQ R8, DI - XORQ DI, DX - ADDQ DX, AX -after32: - CMPQ CX, $16 - JLT after16 - MOVQ 0(BX), DX - MOVQ $1654206401, DI - IMULQ DI, DX - ADDQ AX, DX - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DX - MOVQ $817650473, DI - IMULQ DI, DX - MOVQ 0(BX), DI - MOVQ $1654206401, SI - IMULQ SI, DI - ADDQ AX, DI - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DI - MOVQ $817650473, SI - IMULQ SI, DI - MOVQ DX, SI - MOVQ $3603962101, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DI, SI - XORQ SI, DX - MOVQ DI, SI - MOVQ $817650473, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DX, SI - XORQ SI, DI - ADDQ DI, AX -after16: - CMPQ CX, $8 - JLT after8 - MOVQ 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $8, BX - SUBQ $8, CX - MOVQ AX, DX - RORQ $55, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after8: - CMPQ CX, $4 - JLT after4 - XORQ DX, DX - MOVL 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $4, BX - SUBQ $4, CX - MOVQ AX, DX - RORQ $26, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after4: - CMPQ CX, $2 - JLT after2 - XORQ DX, DX - MOVW 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $2, BX - SUBQ $2, CX - MOVQ AX, DX - RORQ $48, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after2: - CMPQ CX, $1 - JLT after1 - MOVBQZX 0(BX), BX - MOVQ $817650473, CX - IMULQ CX, BX - ADDQ BX, AX - MOVQ AX, BX - RORQ $37, BX - MOVQ $2729050939, CX - IMULQ CX, BX - XORQ BX, AX -after1: - MOVQ AX, BX - RORQ $28, BX - XORQ BX, AX - MOVQ $3603962101, BX - IMULQ BX, AX - MOVQ AX, BX - RORQ $29, BX - XORQ BX, AX - MOVQ AX, ret+24(FP) - RET diff --git a/vendor/github.com/dgryski/go-metro/metro_stub.go b/vendor/github.com/dgryski/go-metro/metro_stub.go deleted file mode 100644 index 86ddcb4705..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro_stub.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !noasm,amd64 -// +build !gccgo - -package metro - -//go:generate python -m peachpy.x86_64 metro.py -S -o metro_amd64.s -mabi=goasm -//go:noescape - -func Hash64(buffer []byte, seed uint64) uint64 -func Hash64Str(buffer string, seed uint64) uint64 diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb3..2e50082ad1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 01f5734191..41cd4f5030 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0bf..2fcd7af3c4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e89..07c28112c8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f20..f710036b35 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de4864..38ca39cc53 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776c..66aa5f2dcc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 54a20e7116..380ec4fdef 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -99,6 +99,7 @@ type Client struct { healthStatus int32 trailingHeaderSupport bool + maxRetries int } // Options for New method @@ -123,12 +124,16 @@ type Options struct { // Custom hash routines. Leave nil to use standard. CustomMD5 func() md5simd.Hasher CustomSHA256 func() md5simd.Hasher + + // Number of times a request is retried. Defaults to 10 retries if this option is not configured. + // Set to 1 to disable retries. + MaxRetries int } // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.79" + libraryVersion = "v7.0.80" ) // User Agent should always following the below style. @@ -278,6 +283,11 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { // healthcheck is not initialized clnt.healthStatus = unknown + clnt.maxRetries = MaxRetry + if opts.MaxRetries > 0 { + clnt.maxRetries = opts.MaxRetries + } + // Return. return clnt, nil } @@ -590,9 +600,9 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, errors.New(c.endpointURL.String() + " is offline.") } - var retryable bool // Indicates if request can be retried. - var bodySeeker io.Seeker // Extracted seeker from io.Reader. - reqRetry := MaxRetry // Indicates how many times we can retry the request + var retryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + var reqRetry = c.maxRetries // Indicates how many times we can retry the request if metadata.contentBody != nil { // Check if body is seekable then it is retryable. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index e706b57de6..344af2b780 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -434,12 +434,34 @@ func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElemen return enc.EncodeElement(delMarkerExp(de), start) } +// AllVersionsExpiration represents AllVersionsExpiration actions element in an ILM policy +type AllVersionsExpiration struct { + XMLName xml.Name `xml:"AllVersionsExpiration" json:"-"` + Days int `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"DeleteMarker,omitempty" json:"DeleteMarker,omitempty"` +} + +// IsNull returns true if days field is 0 +func (e AllVersionsExpiration) IsNull() bool { + return e.Days == 0 +} + +// MarshalXML satisfies xml.Marshaler to provide custom encoding +func (e AllVersionsExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if e.IsNull() { + return nil + } + type allVersionsExp AllVersionsExpiration + return enc.EncodeElement(allVersionsExp(e), start) +} + // MarshalJSON customizes json encoding by omitting empty values func (r Rule) MarshalJSON() ([]byte, error) { type rule struct { AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` Expiration *Expiration `json:"Expiration,omitempty"` DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"` + AllVersionsExpiration *AllVersionsExpiration `json:"AllVersionsExpiration,omitempty"` ID string `json:"ID"` RuleFilter *Filter `json:"Filter,omitempty"` NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` @@ -475,6 +497,9 @@ func (r Rule) MarshalJSON() ([]byte, error) { if !r.NoncurrentVersionTransition.isNull() { newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition } + if !r.AllVersionsExpiration.IsNull() { + newr.AllVersionsExpiration = &r.AllVersionsExpiration + } return json.Marshal(newr) } @@ -485,6 +510,7 @@ type Rule struct { AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"` + AllVersionsExpiration AllVersionsExpiration `xml:"AllVersionsExpiration,omitempty" json:"AllVersionsExpiration,omitempty"` ID string `xml:"ID" json:"ID"` RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` diff --git a/vendor/github.com/opentracing-contrib/go-grpc/.editorconfig b/vendor/github.com/opentracing-contrib/go-grpc/.editorconfig new file mode 100644 index 0000000000..302cfc4277 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-grpc/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +indent_size = 4 +indent_style = tab + +[*.{md,yml,yaml}] +indent_size = 2 +indent_style = space diff --git a/vendor/github.com/opentracing-contrib/go-grpc/.golangci.yml b/vendor/github.com/opentracing-contrib/go-grpc/.golangci.yml new file mode 100644 index 0000000000..30c2b21b02 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-grpc/.golangci.yml @@ -0,0 +1,91 @@ +linters-settings: + misspell: + locale: US + revive: + ignore-generated-header: true + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: empty-block + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: increment-decrement + - name: indent-error-flow + - name: package-comments + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + - name: time-naming + - name: unexported-return + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + govet: + enable-all: true +linters: + enable: + - asasalint + - asciicheck + - bidichk + # - contextcheck + - copyloopvar + - dupword + - durationcheck + - errcheck + - errchkjson + - errname + - errorlint + - fatcontext + - forcetypeassert + - gocheckcompilerdirectives + - gochecksumtype + - gocritic + - godot + - gofmt + - gofumpt + - goimports + - gosec + - gosimple + - gosmopolitan + - govet + - ineffassign + - intrange + - makezero + - misspell + - musttag + - nilerr + - noctx + - nolintlint + - paralleltest + - perfsprint + - prealloc + - predeclared + - reassign + - revive + - staticcheck + - stylecheck + - tagalign + - tenv + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + - wastedassign + - whitespace + # - wrapcheck + disable-all: true +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +run: + timeout: 5m diff --git a/vendor/github.com/opentracing-contrib/go-grpc/README.md b/vendor/github.com/opentracing-contrib/go-grpc/README.md index e6b1065808..b6981f1f9b 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/README.md +++ b/vendor/github.com/opentracing-contrib/go-grpc/README.md @@ -1,11 +1,16 @@ # OpenTracing support for gRPC in Go +[![CI](https://github.com/opentracing-contrib/go-grpc/actions/workflows/ci.yml/badge.svg)](https://github.com/opentracing-contrib/go-grpc/actions/workflows/ci.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/opentracing-contrib/go-grpc)](https://goreportcard.com/report/github.com/opentracing-contrib/go-grpc) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/opentracing-contrib/go-grpc) +[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/opentracing-contrib/go-grpc?logo=github&sort=semver)](https://github.com/opentracing-contrib/go-grpc/releases/latest) + The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based systems in Go. ## Installation -``` +```shell go get github.com/opentracing-contrib/go-grpc ``` @@ -54,4 +59,3 @@ s := grpc.NewServer( // All future RPC activity involving `s` will be automatically traced. ``` - diff --git a/vendor/github.com/opentracing-contrib/go-grpc/client.go b/vendor/github.com/opentracing-contrib/go-grpc/client.go index c9496c85af..2f7cdf7611 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/client.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/client.go @@ -1,12 +1,13 @@ package otgrpc import ( + "context" + "errors" "io" "runtime" "sync/atomic" opentracing "github.com/opentracing/opentracing-go" - "context" "github.com/opentracing/opentracing-go/ext" "github.com/opentracing/opentracing-go/log" "google.golang.org/grpc" @@ -18,10 +19,10 @@ import ( // // For example: // -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) // // All gRPC client spans will inject the OpenTracing SpanContext into the gRPC // metadata; they will also look in the context.Context for an active @@ -80,10 +81,10 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) // // For example: // -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) // // All gRPC client spans will inject the OpenTracing SpanContext into the gRPC // metadata; they will also look in the context.Context for an active @@ -206,7 +207,7 @@ func (cs *openTracingClientStream) SendMsg(m interface{}) error { func (cs *openTracingClientStream) RecvMsg(m interface{}) error { err := cs.ClientStream.RecvMsg(m) - if err == io.EOF { + if errors.Is(err, io.EOF) { cs.finishFunc(nil) return err } else if err != nil { diff --git a/vendor/github.com/opentracing-contrib/go-grpc/errors.go b/vendor/github.com/opentracing-contrib/go-grpc/errors.go index 2f202af726..b8212b2ace 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/errors.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/errors.go @@ -21,7 +21,7 @@ const ( ServerError Class = "5xx" ) -// ErrorClass returns the class of the given error +// ErrorClass returns the class of the given error. func ErrorClass(err error) Class { if s, ok := status.FromError(err); ok { switch s.Code() { diff --git a/vendor/github.com/opentracing-contrib/go-grpc/options.go b/vendor/github.com/opentracing-contrib/go-grpc/options.go index 5fac41e761..1fdbe24a81 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/options.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/options.go @@ -32,7 +32,7 @@ type SpanInclusionFunc func( method string, req, resp interface{}) bool -// IncludingSpans binds a IncludeSpanFunc to the options +// IncludingSpans binds a IncludeSpanFunc to the options. func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { return func(o *options) { o.inclusionFunc = inclusionFunc @@ -60,10 +60,9 @@ func SpanDecorator(decorator SpanDecoratorFunc) Option { // scale well as production use dictates other configuration and tuning // parameters. type options struct { - logPayloads bool - decorator SpanDecoratorFunc - // May be nil. + decorator SpanDecoratorFunc inclusionFunc SpanInclusionFunc + logPayloads bool } // newOptions returns the default options. diff --git a/vendor/github.com/opentracing-contrib/go-grpc/renovate.json b/vendor/github.com/opentracing-contrib/go-grpc/renovate.json new file mode 100644 index 0000000000..321957144b --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-grpc/renovate.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "github>opentracing-contrib/common", + "schedule:daily" + ] +} diff --git a/vendor/github.com/opentracing-contrib/go-grpc/server.go b/vendor/github.com/opentracing-contrib/go-grpc/server.go index c79d511bc5..71f7e1a841 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/server.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/server.go @@ -1,8 +1,9 @@ package otgrpc import ( - opentracing "github.com/opentracing/opentracing-go" "context" + + opentracing "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" "github.com/opentracing/opentracing-go/log" "google.golang.org/grpc" @@ -14,9 +15,9 @@ import ( // // For example: // -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) // // All gRPC server spans will look for an OpenTracing SpanContext in the gRPC // metadata; if found, the server span will act as the ChildOf that RPC @@ -33,12 +34,12 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (resp interface{}, err error) { - spanContext, err := extractSpanContext(ctx, tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } + spanContext, _ := extractSpanContext(ctx, tracer) + // if err != nil && !errors.Is(err, opentracing.ErrSpanContextNotFound) { + // // TODO: establish some sort of error reporting mechanism here. We + // // don't know where to put such an error and must rely on Tracer + // // implementations to do something appropriate for the time being. + // } if otgrpcOpts.inclusionFunc != nil && !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { return handler(ctx, req) @@ -76,9 +77,9 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) // // For example: // -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) // // All gRPC server spans will look for an OpenTracing SpanContext in the gRPC // metadata; if found, the server span will act as the ChildOf that RPC @@ -90,12 +91,12 @@ func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...O otgrpcOpts := newOptions() otgrpcOpts.apply(optFuncs...) return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - spanContext, err := extractSpanContext(ss.Context(), tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } + spanContext, _ := extractSpanContext(ss.Context(), tracer) + // if err != nil && !errors.Is(err, opentracing.ErrSpanContextNotFound) { + // // TODO: establish some sort of error reporting mechanism here. We + // // don't know where to put such an error and must rely on Tracer + // // implementations to do something appropriate for the time being. + // } if otgrpcOpts.inclusionFunc != nil && !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { return handler(srv, ss) @@ -111,7 +112,7 @@ func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...O ServerStream: ss, ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), } - err = handler(srv, ss) + err := handler(srv, ss) if err != nil { SetSpanTags(serverSpan, err, false) serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) diff --git a/vendor/github.com/opentracing-contrib/go-grpc/shared.go b/vendor/github.com/opentracing-contrib/go-grpc/shared.go index 9abd5eaa62..88f9cd13e6 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/shared.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/shared.go @@ -8,10 +8,8 @@ import ( "google.golang.org/grpc/metadata" ) -var ( - // Morally a const: - gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} -) +// Morally a const:. +var gRPCComponentTag = opentracing.Tag{Key: string(ext.Component), Value: "gRPC"} // metadataReaderWriter satisfies both the opentracing.TextMapReader and // opentracing.TextMapWriter interfaces. diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index cc5f19dae7..a509f783fa 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -534,6 +534,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper for _, el := range vec { f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ Metric: el.Metric, F: f, diff --git a/vendor/github.com/seiflotfy/cuckoofilter/.gitignore b/vendor/github.com/seiflotfy/cuckoofilter/.gitignore deleted file mode 100644 index 11b90db8d9..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea diff --git a/vendor/github.com/seiflotfy/cuckoofilter/LICENSE b/vendor/github.com/seiflotfy/cuckoofilter/LICENSE deleted file mode 100644 index 58393c98c1..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Seif Lotfy - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/seiflotfy/cuckoofilter/README.md b/vendor/github.com/seiflotfy/cuckoofilter/README.md deleted file mode 100644 index 2a77fb393f..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Cuckoo Filter - -[![GoDoc](https://godoc.org/github.com/seiflotfy/cuckoofilter?status.svg)](https://godoc.org/github.com/seiflotfy/cuckoofilter) [![CodeHunt.io](https://img.shields.io/badge/vote-codehunt.io-02AFD1.svg)](http://codehunt.io/sub/cuckoo-filter/?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) - -Cuckoo filter is a Bloom filter replacement for approximated set-membership queries. While Bloom filters are well-known space-efficient data structures to serve queries like "if item x is in a set?", they do not support deletion. Their variances to enable deletion (like counting Bloom filters) usually require much more space. - -Cuckoo filters provide the flexibility to add and remove items dynamically. A cuckoo filter is based on cuckoo hashing (and therefore named as cuckoo filter). It is essentially a cuckoo hash table storing each key's fingerprint. Cuckoo hash tables can be highly compact, thus a cuckoo filter could use less space than conventional Bloom filters, for applications that require low false positive rates (< 3%). - -For details about the algorithm and citations please use this article for now - -["Cuckoo Filter: Better Than Bloom" by Bin Fan, Dave Andersen and Michael Kaminsky](https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf) - -## Implementation details - -The paper cited above leaves several parameters to choose. In this implementation - -1. Every element has 2 possible bucket indices -2. Buckets have a static size of 4 fingerprints -3. Fingerprints have a static size of 8 bits - -1 and 2 are suggested to be the optimum by the authors. The choice of 3 comes down to the desired false positive rate. Given a target false positive rate of `r` and a bucket size `b`, they suggest choosing the fingerprint size `f` using - - f >= log2(2b/r) bits - -With the 8 bit fingerprint size in this repository, you can expect `r ~= 0.03`. -[Other implementations](https://github.com/panmari/cuckoofilter) use 16 bit, which correspond to a false positive rate of `r ~= 0.0001`. - -## Example usage: -```go -package main - -import "fmt" -import cuckoo "github.com/seiflotfy/cuckoofilter" - -func main() { - cf := cuckoo.NewFilter(1000) - cf.InsertUnique([]byte("geeky ogre")) - - // Lookup a string (and it a miss) if it exists in the cuckoofilter - cf.Lookup([]byte("hello")) - - count := cf.Count() - fmt.Println(count) // count == 1 - - // Delete a string (and it a miss) - cf.Delete([]byte("hello")) - - count = cf.Count() - fmt.Println(count) // count == 1 - - // Delete a string (a hit) - cf.Delete([]byte("geeky ogre")) - - count = cf.Count() - fmt.Println(count) // count == 0 - - cf.Reset() // reset -} -``` - -## Documentation: -["Cuckoo Filter on GoDoc"](http://godoc.org/github.com/seiflotfy/cuckoofilter) diff --git a/vendor/github.com/seiflotfy/cuckoofilter/bucket.go b/vendor/github.com/seiflotfy/cuckoofilter/bucket.go deleted file mode 100644 index 4a83fc5030..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/bucket.go +++ /dev/null @@ -1,45 +0,0 @@ -package cuckoo - -type fingerprint byte - -type bucket [bucketSize]fingerprint - -const ( - nullFp = 0 - bucketSize = 4 -) - -func (b *bucket) insert(fp fingerprint) bool { - for i, tfp := range b { - if tfp == nullFp { - b[i] = fp - return true - } - } - return false -} - -func (b *bucket) delete(fp fingerprint) bool { - for i, tfp := range b { - if tfp == fp { - b[i] = nullFp - return true - } - } - return false -} - -func (b *bucket) getFingerprintIndex(fp fingerprint) int { - for i, tfp := range b { - if tfp == fp { - return i - } - } - return -1 -} - -func (b *bucket) reset() { - for i := range b { - b[i] = nullFp - } -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go b/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go deleted file mode 100644 index ec0d246de2..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go +++ /dev/null @@ -1,165 +0,0 @@ -package cuckoo - -import ( - "fmt" - "math/bits" - "math/rand" -) - -const maxCuckooCount = 500 - -// Filter is a probabilistic counter -type Filter struct { - buckets []bucket - count uint - bucketPow uint -} - -// NewFilter returns a new cuckoofilter with a given capacity. -// A capacity of 1000000 is a normal default, which allocates -// about ~1MB on 64-bit machines. -func NewFilter(capacity uint) *Filter { - capacity = getNextPow2(uint64(capacity)) / bucketSize - if capacity == 0 { - capacity = 1 - } - buckets := make([]bucket, capacity) - return &Filter{ - buckets: buckets, - count: 0, - bucketPow: uint(bits.TrailingZeros(capacity)), - } -} - -// Lookup returns true if data is in the counter -func (cf *Filter) Lookup(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.buckets[i1].getFingerprintIndex(fp) > -1 { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - return cf.buckets[i2].getFingerprintIndex(fp) > -1 -} - -// Reset ... -func (cf *Filter) Reset() { - for i := range cf.buckets { - cf.buckets[i].reset() - } - cf.count = 0 -} - -func randi(i1, i2 uint) uint { - if rand.Intn(2) == 0 { - return i1 - } - return i2 -} - -// Insert inserts data into the counter and returns true upon success -func (cf *Filter) Insert(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.insert(fp, i1) { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - if cf.insert(fp, i2) { - return true - } - return cf.reinsert(fp, randi(i1, i2)) -} - -// InsertUnique inserts data into the counter if not exists and returns true upon success -func (cf *Filter) InsertUnique(data []byte) bool { - if cf.Lookup(data) { - return false - } - return cf.Insert(data) -} - -func (cf *Filter) insert(fp fingerprint, i uint) bool { - if cf.buckets[i].insert(fp) { - cf.count++ - return true - } - return false -} - -func (cf *Filter) reinsert(fp fingerprint, i uint) bool { - for k := 0; k < maxCuckooCount; k++ { - j := rand.Intn(bucketSize) - oldfp := fp - fp = cf.buckets[i][j] - cf.buckets[i][j] = oldfp - - // look in the alternate location for that random element - i = getAltIndex(fp, i, cf.bucketPow) - if cf.insert(fp, i) { - return true - } - } - return false -} - -// Delete data from counter if exists and return if deleted or not -func (cf *Filter) Delete(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.delete(fp, i1) { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - return cf.delete(fp, i2) -} - -func (cf *Filter) delete(fp fingerprint, i uint) bool { - if cf.buckets[i].delete(fp) { - if cf.count > 0 { - cf.count-- - } - return true - } - return false -} - -// Count returns the number of items in the counter -func (cf *Filter) Count() uint { - return cf.count -} - -// Encode returns a byte slice representing a Cuckoofilter -func (cf *Filter) Encode() []byte { - bytes := make([]byte, len(cf.buckets)*bucketSize) - for i, b := range cf.buckets { - for j, f := range b { - index := (i * len(b)) + j - bytes[index] = byte(f) - } - } - return bytes -} - -// Decode returns a Cuckoofilter from a byte slice -func Decode(bytes []byte) (*Filter, error) { - var count uint - if len(bytes)%bucketSize != 0 { - return nil, fmt.Errorf("expected bytes to be multiple of %d, got %d", bucketSize, len(bytes)) - } - if len(bytes) == 0 { - return nil, fmt.Errorf("bytes can not be empty") - } - buckets := make([]bucket, len(bytes)/4) - for i, b := range buckets { - for j := range b { - index := (i * len(b)) + j - if bytes[index] != 0 { - buckets[i][j] = fingerprint(bytes[index]) - count++ - } - } - } - return &Filter{ - buckets: buckets, - count: count, - bucketPow: uint(bits.TrailingZeros(uint(len(buckets)))), - }, nil -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/doc.go b/vendor/github.com/seiflotfy/cuckoofilter/doc.go deleted file mode 100644 index 6f6cbf8281..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -/* -Package cuckoo provides a Cuckoo Filter, a Bloom filter replacement for approximated set-membership queries. - -While Bloom filters are well-known space-efficient data structures to serve queries like "if item x is in a set?", they do not support deletion. Their variances to enable deletion (like counting Bloom filters) usually require much more space. - -Cuckoo filters provide the flexibility to add and remove items dynamically. A cuckoo filter is based on cuckoo hashing (and therefore named as cuckoo filter). It is essentially a cuckoo hash table storing each key's fingerprint. Cuckoo hash tables can be highly compact, thus a cuckoo filter could use less space than conventional Bloom filters, for applications that require low false positive rates (< 3%). - -For details about the algorithm and citations please use this article: - -"Cuckoo Filter: Better Than Bloom" by Bin Fan, Dave Andersen and Michael Kaminsky -(https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf) - -Note: -This implementation uses a a static bucket size of 4 fingerprints and a fingerprint size of 1 byte based on my understanding of an optimal bucket/fingerprint/size ratio from the aforementioned paper.*/ -package cuckoo diff --git a/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go b/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go deleted file mode 100644 index 693184c9d4..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go +++ /dev/null @@ -1,170 +0,0 @@ -package cuckoo - -import ( - "bytes" - "encoding/gob" -) - -const ( - DefaultLoadFactor = 0.9 - DefaultCapacity = 10000 -) - -type ScalableCuckooFilter struct { - filters []*Filter - loadFactor float32 - //when scale(last filter size * loadFactor >= capacity) get new filter capacity - scaleFactor func(capacity uint) uint -} - -type option func(*ScalableCuckooFilter) - -type Store struct { - Bytes [][]byte - LoadFactor float32 -} - -/* - by default option the grow capacity is: - capacity , total - 4096 4096 - 8192 12288 -16384 28672 -32768 61440 -65536 126,976 -*/ -func NewScalableCuckooFilter(opts ...option) *ScalableCuckooFilter { - sfilter := new(ScalableCuckooFilter) - for _, opt := range opts { - opt(sfilter) - } - configure(sfilter) - return sfilter -} - -func (sf *ScalableCuckooFilter) Lookup(data []byte) bool { - for _, filter := range sf.filters { - if filter.Lookup(data) { - return true - } - } - return false -} - -func (sf *ScalableCuckooFilter) Reset() { - for _, filter := range sf.filters { - filter.Reset() - } -} - -func (sf *ScalableCuckooFilter) Insert(data []byte) bool { - needScale := false - lastFilter := sf.filters[len(sf.filters)-1] - if (float32(lastFilter.count) / float32(len(lastFilter.buckets))) > sf.loadFactor { - needScale = true - } else { - b := lastFilter.Insert(data) - needScale = !b - } - if !needScale { - return true - } - newFilter := NewFilter(sf.scaleFactor(uint(len(lastFilter.buckets)))) - sf.filters = append(sf.filters, newFilter) - return newFilter.Insert(data) -} - -func (sf *ScalableCuckooFilter) InsertUnique(data []byte) bool { - if sf.Lookup(data) { - return false - } - return sf.Insert(data) -} - -func (sf *ScalableCuckooFilter) Delete(data []byte) bool { - for _, filter := range sf.filters { - if filter.Delete(data) { - return true - } - } - return false -} - -func (sf *ScalableCuckooFilter) Count() uint { - var sum uint - for _, filter := range sf.filters { - sum += filter.count - } - return sum - -} - -func (sf *ScalableCuckooFilter) Encode() []byte { - slice := make([][]byte, len(sf.filters)) - for i, filter := range sf.filters { - encode := filter.Encode() - slice[i] = encode - } - store := &Store{ - Bytes: slice, - LoadFactor: sf.loadFactor, - } - buf := bytes.NewBuffer(nil) - enc := gob.NewEncoder(buf) - err := enc.Encode(store) - if err != nil { - return nil - } - return buf.Bytes() -} - -func (sf *ScalableCuckooFilter) DecodeWithParam(fBytes []byte, opts ...option) (*ScalableCuckooFilter, error) { - instance, err := DecodeScalableFilter(fBytes) - if err != nil { - return nil, err - } - for _, opt := range opts { - opt(instance) - } - return instance, nil -} - -func DecodeScalableFilter(fBytes []byte) (*ScalableCuckooFilter, error) { - buf := bytes.NewBuffer(fBytes) - dec := gob.NewDecoder(buf) - store := &Store{} - err := dec.Decode(store) - if err != nil { - return nil, err - } - filterSize := len(store.Bytes) - instance := NewScalableCuckooFilter(func(filter *ScalableCuckooFilter) { - filter.filters = make([]*Filter, filterSize) - }, func(filter *ScalableCuckooFilter) { - filter.loadFactor = store.LoadFactor - }) - for i, oneBytes := range store.Bytes { - filter, err := Decode(oneBytes) - if err != nil { - return nil, err - } - instance.filters[i] = filter - } - return instance, nil - -} - -func configure(sfilter *ScalableCuckooFilter) { - if sfilter.loadFactor == 0 { - sfilter.loadFactor = DefaultLoadFactor - } - if sfilter.scaleFactor == nil { - sfilter.scaleFactor = func(currentSize uint) uint { - return currentSize * bucketSize * 2 - } - } - if sfilter.filters == nil { - initFilter := NewFilter(DefaultCapacity) - sfilter.filters = []*Filter{initFilter} - } -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/util.go b/vendor/github.com/seiflotfy/cuckoofilter/util.go deleted file mode 100644 index 840932e288..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/util.go +++ /dev/null @@ -1,71 +0,0 @@ -package cuckoo - -import ( - metro "github.com/dgryski/go-metro" -) - -var ( - altHash = [256]uint{} - masks = [65]uint{} -) - -func init() { - for i := 0; i < 256; i++ { - altHash[i] = (uint(metro.Hash64([]byte{byte(i)}, 1337))) - } - for i := uint(0); i <= 64; i++ { - masks[i] = (1 << i) - 1 - } -} - -func getAltIndex(fp fingerprint, i uint, bucketPow uint) uint { - mask := masks[bucketPow] - hash := altHash[fp] & mask - return (i & mask) ^ hash -} - -func getFingerprint(hash uint64) byte { - // Use least significant bits for fingerprint. - fp := byte(hash%255 + 1) - return fp -} - -// getIndicesAndFingerprint returns the 2 bucket indices and fingerprint to be used -func getIndexAndFingerprint(data []byte, bucketPow uint) (uint, fingerprint) { - hash := defaultHasher.Hash64(data) - fp := getFingerprint(hash) - // Use most significant bits for deriving index. - i1 := uint(hash>>32) & masks[bucketPow] - return i1, fingerprint(fp) -} - -func getNextPow2(n uint64) uint { - n-- - n |= n >> 1 - n |= n >> 2 - n |= n >> 4 - n |= n >> 8 - n |= n >> 16 - n |= n >> 32 - n++ - return uint(n) -} - -var defaultHasher Hasher = new(metrotHasher) - -func SetDefaultHasher(hasher Hasher) { - defaultHasher = hasher -} - -type Hasher interface { - Hash64([]byte) uint64 -} - -var _ Hasher = new(metrotHasher) - -type metrotHasher struct{} - -func (h *metrotHasher) Hash64(data []byte) uint64 { - hash := metro.Hash64(data, 1337) - return hash -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 00fda38831..64add7fb51 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -130,7 +130,7 @@ func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st } if checkExternalLabels { - if len(meta.Thanos.Labels) == 0 { + if meta.Thanos.Labels == nil || len(meta.Thanos.Labels) == 0 { return errors.New("empty external labels are not allowed for Thanos block.") } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 772d37a48b..883b8e0608 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -257,11 +257,7 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch c mu.Unlock() continue } - select { - case <-ctx.Done(): - return ctx.Err() - case ch <- uid: - } + ch <- uid } return nil }) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index ff3975663c..e9fe5eb7dc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -32,7 +32,7 @@ func NewReaderPoolMetrics(reg prometheus.Registerer) *ReaderPoolMetrics { } } -// ReaderPool is used to instantiate new index-header readers and keep track of them. +// ReaderPool is used to istantiate new index-header readers and keep track of them. // When the lazy reader is enabled, the pool keeps track of all instantiated readers // and automatically close them once the idle timeout is reached. A closed lazy reader // will be automatically re-opened upon next usage. @@ -73,7 +73,7 @@ func (s IndexHeaderLazyDownloadStrategy) StrategyToDownloadFunc() LazyDownloadIn } } -// LazyDownloadIndexHeaderFunc is used to determine whether to download the index header lazily +// LazyDownloadIndexHeaderFunc is used to determinte whether to download the index header lazily // or not by checking its block metadata. Usecase can be by time or by index file size. type LazyDownloadIndexHeaderFunc func(meta *metadata.Meta) bool diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go index a5b0c5b2a4..8c10a9a874 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go @@ -134,7 +134,7 @@ type MemcachedClientConfig struct { MaxItemSize model.Bytes `yaml:"max_item_size"` // MaxGetMultiBatchSize specifies the maximum number of keys a single underlying - // GetMulti() should run. If more keys are specified, internally keys are split + // GetMulti() should run. If more keys are specified, internally keys are splitted // into multiple batches and fetched concurrently, honoring MaxGetMultiConcurrency parallelism. // If set to 0, the max batch size is unlimited. MaxGetMultiBatchSize int `yaml:"max_get_multi_batch_size"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 7f08297671..522e4c9d4c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -894,7 +894,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp _, _ = sb.WriteString(",") } } - rerr = fmt.Errorf("panicked while compacting %s: %v", sb.String(), p) + rerr = fmt.Errorf("paniced while compacting %s: %v", sb.String(), p) } }() diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 46d590186e..6aa2b23dfe 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -149,7 +149,7 @@ func Downsample( // Raw and already downsampled data need different processing. if origMeta.Thanos.Downsample.Resolution == 0 { for _, c := range chks { - // TODO(bwplotka): We can optimize this further by using in WriteSeries iterators of each chunk instead of + // TODO(bwplotka): We can optimze this further by using in WriteSeries iterators of each chunk instead of // samples. Also ensure 120 sample limit, otherwise we have gigantic chunks. // https://github.com/thanos-io/thanos/issues/2542. if err := expandChunkIterator(c.Chunk.Iterator(reuseIt), &all); err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go index 394e33185b..6d7d03eea2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -68,7 +68,7 @@ func (p *tsdbBasedPlanner) plan(noCompactMarked map[ulid.ULID]*metadata.NoCompac } // No overlapping blocks, do compaction the usual way. - // We do not include a recently produced block with max(minTime), so the block which was just uploaded to bucket. + // We do not include a recently producted block with max(minTime), so the block which was just uploaded to bucket. // This gives users a window of a full block size maintenance if needed. if _, excluded := noCompactMarked[metasByMinTime[len(metasByMinTime)-1].ULID]; !excluded { notExcludedMetasByMinTime = notExcludedMetasByMinTime[:len(notExcludedMetasByMinTime)-1] @@ -200,7 +200,7 @@ func splitByRange(metasByMinTime []*metadata.Meta, tr int64) [][]*metadata.Meta t0 = tr * ((m.MinTime - tr + 1) / tr) } - // Skip blocks that don't fall into the range. This can happen via misalignment or + // Skip blocks that don't fall into the range. This can happen via mis-alignment or // by being the multiple of the intended range. if m.MaxTime > t0+tr { i++ diff --git a/vendor/github.com/thanos-io/thanos/pkg/component/component.go b/vendor/github.com/thanos-io/thanos/pkg/component/component.go index 6c52aef138..dfbae08289 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/component/component.go +++ b/vendor/github.com/thanos-io/thanos/pkg/component/component.go @@ -3,6 +3,12 @@ package component +import ( + "strings" + + "github.com/thanos-io/thanos/pkg/store/storepb" +) + // Component is a generic component interface. type Component interface { String() string @@ -12,6 +18,7 @@ type Component interface { type StoreAPI interface { implementsStoreAPI() String() string + ToProto() storepb.StoreType } // Source is a Thanos component that produce blocks of metrics. @@ -26,6 +33,7 @@ type SourceStoreAPI interface { implementsStoreAPI() producesBlocks() String() string + ToProto() storepb.StoreType } type component struct { @@ -40,6 +48,14 @@ type storeAPI struct { func (storeAPI) implementsStoreAPI() {} +func (s sourceStoreAPI) ToProto() storepb.StoreType { + return storepb.StoreType(storepb.StoreType_value[strings.ToUpper(s.String())]) +} + +func (s storeAPI) ToProto() storepb.StoreType { + return storepb.StoreType(storepb.StoreType_value[strings.ToUpper(s.String())]) +} + type source struct { component } @@ -52,6 +68,26 @@ type sourceStoreAPI struct { storeAPI } +// FromProto converts from a gRPC StoreType to StoreAPI. +func FromProto(storeType storepb.StoreType) StoreAPI { + switch storeType { + case storepb.StoreType_QUERY: + return Query + case storepb.StoreType_RULE: + return Rule + case storepb.StoreType_SIDECAR: + return Sidecar + case storepb.StoreType_STORE: + return Store + case storepb.StoreType_RECEIVE: + return Receive + case storepb.StoreType_DEBUG: + return Debug + default: + return UnknownStoreAPI + } +} + func FromString(storeType string) StoreAPI { switch storeType { case "query": @@ -89,24 +125,4 @@ var ( Store = storeAPI{component: component{name: "store"}} UnknownStoreAPI = storeAPI{component: component{name: "unknown-store-api"}} Query = storeAPI{component: component{name: "query"}} - - All = []Component{ - Bucket, - Cleanup, - Mark, - Upload, - Rewrite, - Retention, - Compact, - Downsample, - Replicate, - QueryFrontend, - Debug, - Receive, - Rule, - Sidecar, - Store, - UnknownStoreAPI, - Query, - } ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go deleted file mode 100644 index 4e315596df..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package dns - -import ( - "context" - "sync" - "time" - - grpcresolver "google.golang.org/grpc/resolver" -) - -var ( - _ grpcresolver.Builder = &builder{} - _ grpcresolver.Resolver = &resolver{} -) - -type builder struct { - resolveInterval time.Duration - provider *Provider -} - -func RegisterGRPCResolver(provider *Provider, interval time.Duration) { - grpcresolver.Register(&builder{ - resolveInterval: interval, - provider: provider, - }) -} - -func (b *builder) Scheme() string { return "thanos" } - -func (b *builder) Build(t grpcresolver.Target, cc grpcresolver.ClientConn, _ grpcresolver.BuildOptions) (grpcresolver.Resolver, error) { - ctx, cancel := context.WithCancel(context.Background()) - r := &resolver{ - provider: b.provider, - target: t.Endpoint(), - ctx: ctx, - cancel: cancel, - cc: cc, - interval: b.resolveInterval, - } - r.wg.Add(1) - go r.run() - - return r, nil -} - -type resolver struct { - provider *Provider - - target string - ctx context.Context - cancel context.CancelFunc - cc grpcresolver.ClientConn - interval time.Duration - - wg sync.WaitGroup -} - -func (r *resolver) Close() { - r.cancel() - r.wg.Wait() -} - -func (r *resolver) ResolveNow(_ grpcresolver.ResolveNowOptions) {} - -func (r *resolver) resolve() error { - ctx, cancel := context.WithTimeout(r.ctx, r.interval) - defer cancel() - return r.provider.Resolve(ctx, []string{r.target}) -} - -func (r *resolver) addresses() []string { - return r.provider.AddressesForHost(r.target) -} - -func (r *resolver) run() { - defer r.wg.Done() - for { - if err := r.resolve(); err != nil { - r.cc.ReportError(err) - } else { - state := grpcresolver.State{} - for _, addr := range r.addresses() { - raddr := grpcresolver.Address{Addr: addr} - state.Addresses = append(state.Addresses, raddr) - } - _ = r.cc.UpdateState(state) - } - select { - case <-r.ctx.Done(): - return - case <-time.After(r.interval): - } - } -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index 8f42bf4d26..3ec032a654 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -164,16 +164,3 @@ func (p *Provider) Addresses() []string { } return result } - -// AddressesForHost returns the latest addresses present for the host in the Provider. -func (p *Provider) AddressesForHost(host string) []string { - p.RLock() - defer p.RUnlock() - - addrs := p.resolved[host] - - res := make([]string, len(addrs)) - copy(res, addrs) - - return res -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go index 0025178607..d0078aea57 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go @@ -108,9 +108,7 @@ func (s *dnsSD) Resolve(ctx context.Context, name string, qtype QType) ([]string } if qtype == SRVNoA { - // Remove the final dot from rooted DNS names (this is for compatibility with Prometheus) - target := strings.TrimRight(rec.Target, ".") - res = append(res, appendScheme(scheme, net.JoinHostPort(target, resPort))) + res = append(res, appendScheme(scheme, net.JoinHostPort(rec.Target, resPort))) continue } // Do A lookup for the domain in SRV answer. diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go index ebdda5fbe1..75d33243c7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go @@ -27,8 +27,6 @@ type ClientMetrics struct { // e.g. 1 ClientMetrics should be used for all the clients that talk to Alertmanager. func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { var m ClientMetrics - const maxBucketNumber = 256 - const bucketFactor = 1.1 m.inFlightGauge = promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Subsystem: "http_client", @@ -48,9 +46,6 @@ func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { Name: "dns_duration_seconds", Help: "Trace dns latency histogram.", Buckets: []float64{0.025, .05, .1, .5, 1, 5, 10}, - - NativeHistogramBucketFactor: bucketFactor, - NativeHistogramMaxBucketNumber: maxBucketNumber, }, []string{"event"}, ) @@ -61,9 +56,6 @@ func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { Name: "tls_duration_seconds", Help: "Trace tls latency histogram.", Buckets: []float64{0.025, .05, .1, .5, 1, 5, 10}, - - NativeHistogramBucketFactor: bucketFactor, - NativeHistogramMaxBucketNumber: maxBucketNumber, }, []string{"event"}, ) @@ -74,9 +66,6 @@ func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { Name: "request_duration_seconds", Help: "A histogram of request latencies.", Buckets: []float64{0.025, .05, .1, .5, 1, 5, 10}, - - NativeHistogramBucketFactor: bucketFactor, - NativeHistogramMaxBucketNumber: maxBucketNumber, }, []string{"code", "method"}, ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/filter/cuckoo.go b/vendor/github.com/thanos-io/thanos/pkg/filter/cuckoo.go deleted file mode 100644 index 0cdce6dc94..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/filter/cuckoo.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filter - -import ( - "sync" - "unsafe" - - "github.com/prometheus/prometheus/model/labels" - cuckoo "github.com/seiflotfy/cuckoofilter" -) - -type CuckooMetricNameStoreFilter struct { - filter *cuckoo.Filter - mtx sync.RWMutex -} - -func NewCuckooMetricNameStoreFilter(capacity uint) *CuckooMetricNameStoreFilter { - return &CuckooMetricNameStoreFilter{ - filter: cuckoo.NewFilter(capacity), - } -} - -func (f *CuckooMetricNameStoreFilter) Matches(matchers []*labels.Matcher) bool { - f.mtx.RLock() - defer f.mtx.RUnlock() - - for _, m := range matchers { - if m.Type == labels.MatchEqual && m.Name == labels.MetricName { - return f.filter.Lookup([]byte(m.Value)) - } - } - - return true -} - -func (f *CuckooMetricNameStoreFilter) ResetAndSet(values ...string) { - f.mtx.Lock() - defer f.mtx.Unlock() - f.filter.Reset() - for _, value := range values { - f.filter.Insert(unsafe.Slice(unsafe.StringData(value), len(value))) - } -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/filter/filter.go b/vendor/github.com/thanos-io/thanos/pkg/filter/filter.go deleted file mode 100644 index f5cc068cf5..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/filter/filter.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filter - -import "github.com/prometheus/prometheus/model/labels" - -type StoreFilter interface { - // Matches returns true if the filter matches the given matchers. - Matches(matchers []*labels.Matcher) bool - - // ResetAndSet resets the filter and sets it to the given values. - ResetAndSet(values ...string) -} - -type AllowAllStoreFilter struct{} - -func (f AllowAllStoreFilter) Matches(matchers []*labels.Matcher) bool { - return true -} - -func (f AllowAllStoreFilter) ResetAndSet(values ...string) {} diff --git a/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go index f3481257f2..159966d0cb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go @@ -9,52 +9,51 @@ import ( "github.com/pkg/errors" ) -// Pool is a pool for slices of type T that can be reused. -type Pool[T any] interface { - // Get returns a new T slice that fits the given size. - Get(sz int) (*[]T, error) - // Put returns a T slice to the right bucket in the pool. - Put(b *[]T) +// Bytes is a pool of bytes that can be reused. +type Bytes interface { + // Get returns a new byte slices that fits the given size. + Get(sz int) (*[]byte, error) + // Put returns a byte slice to the right bucket in the pool. + Put(b *[]byte) } -// NoopPool is pool that always allocated required slice on heap and ignore puts. -type NoopPool[T any] struct{} +// NoopBytes is pool that always allocated required slice on heap and ignore puts. +type NoopBytes struct{} -func (p NoopPool[T]) Get(sz int) (*[]T, error) { - b := make([]T, 0, sz) +func (p NoopBytes) Get(sz int) (*[]byte, error) { + b := make([]byte, 0, sz) return &b, nil } -func (p NoopPool[T]) Put(*[]T) {} +func (p NoopBytes) Put(*[]byte) {} -// BucketedPool is a bucketed pool for variably sized T slices. It can be -// configured to not allow more than a maximum number of T items being used at a -// given time. Every slice obtained from the pool must be returned. -type BucketedPool[T any] struct { +// BucketedBytes is a bucketed pool for variably sized byte slices. It can be configured to not allow +// more than a maximum number of bytes being used at a given time. +// Every byte slice obtained from the pool must be returned. +type BucketedBytes struct { buckets []sync.Pool sizes []int maxTotal uint64 usedTotal uint64 mtx sync.RWMutex - new func(s int) *[]T + new func(s int) *[]byte } -// MustNewBucketedPool is like NewBucketedPool but panics if construction fails. +// MustNewBucketedBytes is like NewBucketedBytes but panics if construction fails. // Useful for package internal pools. -func MustNewBucketedPool[T any](minSize, maxSize int, factor float64, maxTotal uint64) *BucketedPool[T] { - pool, err := NewBucketedPool[T](minSize, maxSize, factor, maxTotal) +func MustNewBucketedBytes(minSize, maxSize int, factor float64, maxTotal uint64) *BucketedBytes { + pool, err := NewBucketedBytes(minSize, maxSize, factor, maxTotal) if err != nil { panic(err) } return pool } -// NewBucketedPool returns a new BucketedPool with size buckets for minSize to -// maxSize increasing by the given factor and maximum number of used items. No -// more than maxTotal items can be used at any given time unless maxTotal is set -// to 0. -func NewBucketedPool[T any](minSize, maxSize int, factor float64, maxTotal uint64) (*BucketedPool[T], error) { +// NewBucketedBytes returns a new Bytes with size buckets for minSize to maxSize +// increasing by the given factor and maximum number of used bytes. +// No more than maxTotal bytes can be used at any given time unless maxTotal is set to 0. +func NewBucketedBytes(minSize, maxSize int, factor float64, maxTotal uint64) (*BucketedBytes, error) { if minSize < 1 { return nil, errors.New("invalid minimum pool size") } @@ -70,23 +69,23 @@ func NewBucketedPool[T any](minSize, maxSize int, factor float64, maxTotal uint6 for s := minSize; s <= maxSize; s = int(float64(s) * factor) { sizes = append(sizes, s) } - p := &BucketedPool[T]{ + p := &BucketedBytes{ buckets: make([]sync.Pool, len(sizes)), sizes: sizes, maxTotal: maxTotal, - new: func(sz int) *[]T { - s := make([]T, 0, sz) + new: func(sz int) *[]byte { + s := make([]byte, 0, sz) return &s }, } return p, nil } -// ErrPoolExhausted is returned if a pool cannot provide the requested slice. +// ErrPoolExhausted is returned if a pool cannot provide the request bytes. var ErrPoolExhausted = errors.New("pool exhausted") -// Get returns a slice into from the bucket that fits the given size. -func (p *BucketedPool[T]) Get(sz int) (*[]T, error) { +// Get returns a new byte slice that fits the given size. +func (p *BucketedBytes) Get(sz int) (*[]byte, error) { p.mtx.Lock() defer p.mtx.Unlock() @@ -98,7 +97,7 @@ func (p *BucketedPool[T]) Get(sz int) (*[]T, error) { if sz > bktSize { continue } - b, ok := p.buckets[i].Get().(*[]T) + b, ok := p.buckets[i].Get().(*[]byte) if !ok { b = p.new(bktSize) } @@ -112,8 +111,8 @@ func (p *BucketedPool[T]) Get(sz int) (*[]T, error) { return p.new(sz), nil } -// Put returns a slice to the right bucket in the pool. -func (p *BucketedPool[T]) Put(b *[]T) { +// Put returns a byte slice to the right bucket in the pool. +func (p *BucketedBytes) Put(b *[]byte) { if b == nil { return } @@ -139,7 +138,7 @@ func (p *BucketedPool[T]) Put(b *[]T) { } } -func (p *BucketedPool[T]) UsedBytes() uint64 { +func (p *BucketedBytes) UsedBytes() uint64 { p.mtx.RLock() defer p.mtx.RUnlock() diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index 1f96f4c666..5dde62c5ee 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -734,7 +734,7 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string // SeriesInGRPC returns the labels from Prometheus series API. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]map[string]string, error) { +func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]map[string]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/series") q := u.Query() @@ -742,7 +742,6 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la q.Add("match[]", storepb.PromMatchersToString(matchers...)) q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) - q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -754,7 +753,7 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la // LabelNamesInGRPC returns all known label names constrained by the given matchers. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { +func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/labels") q := u.Query() @@ -764,7 +763,6 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) - q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -775,7 +773,7 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ // LabelValuesInGRPC returns all known label values for a given label name. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { +func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/label/", label, "/values") q := u.Query() @@ -785,7 +783,6 @@ func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label str } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) - q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go index 4c519bf925..b1faff425b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go @@ -83,9 +83,91 @@ func (es *endpointRef) Metadata(ctx context.Context, infoClient infopb.InfoClien return &endpointMetadata{resp}, nil } } + + // Call Info method of StoreAPI, this way querier will be able to discovery old components not exposing InfoAPI. + if storeClient != nil { + metadata, err := es.getMetadataUsingStoreAPI(ctx, storeClient) + if err != nil { + return nil, errors.Wrapf(err, "fallback fetching info from %s", es.addr) + } + return metadata, nil + } + return nil, errors.New(noMetadataEndpointMessage) } +func (es *endpointRef) getMetadataUsingStoreAPI(ctx context.Context, client storepb.StoreClient) (*endpointMetadata, error) { + resp, err := client.Info(ctx, &storepb.InfoRequest{}) + if err != nil { + return nil, err + } + + infoResp := fillExpectedAPIs(component.FromProto(resp.StoreType), resp.MinTime, resp.MaxTime) + infoResp.LabelSets = resp.LabelSets + infoResp.ComponentType = component.FromProto(resp.StoreType).String() + + return &endpointMetadata{ + &infoResp, + }, nil +} + +func fillExpectedAPIs(componentType component.Component, mintime, maxTime int64) infopb.InfoResponse { + switch componentType { + case component.Sidecar: + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Rules: &infopb.RulesInfo{}, + Targets: &infopb.TargetsInfo{}, + MetricMetadata: &infopb.MetricMetadataInfo{}, + Exemplars: &infopb.ExemplarsInfo{}, + } + case component.Query: + { + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Rules: &infopb.RulesInfo{}, + Targets: &infopb.TargetsInfo{}, + MetricMetadata: &infopb.MetricMetadataInfo{}, + Exemplars: &infopb.ExemplarsInfo{}, + Query: &infopb.QueryAPIInfo{}, + } + } + case component.Receive: + { + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Exemplars: &infopb.ExemplarsInfo{}, + } + } + case component.Store: + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + } + case component.Rule: + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Rules: &infopb.RulesInfo{}, + } + default: + return infopb.InfoResponse{} + } +} + // stringError forces the error to be a string // when marshaled into a JSON. type stringError struct { @@ -117,7 +199,7 @@ type EndpointStatus struct { // TODO(hitanshu-mehta) Currently,only collecting metrics of storeEndpoints. Make this struct generic. type endpointSetNodeCollector struct { mtx sync.Mutex - storeNodes map[string]map[string]int + storeNodes map[component.Component]map[string]int storePerExtLset map[string]int logger log.Logger @@ -131,7 +213,7 @@ func newEndpointSetNodeCollector(logger log.Logger, labels ...string) *endpointS } return &endpointSetNodeCollector{ logger: logger, - storeNodes: map[string]map[string]int{}, + storeNodes: map[component.Component]map[string]int{}, connectionsDesc: prometheus.NewDesc( "thanos_store_nodes_grpc_connections", "Number of gRPC connection to Store APIs. Opened connection means healthy store APIs available for Querier.", @@ -154,8 +236,8 @@ func truncateExtLabels(s string, threshold int) string { } return s } -func (c *endpointSetNodeCollector) Update(nodes map[string]map[string]int) { - storeNodes := make(map[string]map[string]int, len(nodes)) +func (c *endpointSetNodeCollector) Update(nodes map[component.Component]map[string]int) { + storeNodes := make(map[component.Component]map[string]int, len(nodes)) storePerExtLset := map[string]int{} for storeType, occurrencesPerExtLset := range nodes { @@ -181,8 +263,12 @@ func (c *endpointSetNodeCollector) Collect(ch chan<- prometheus.Metric) { c.mtx.Lock() defer c.mtx.Unlock() - for k, occurrencesPerExtLset := range c.storeNodes { + for storeType, occurrencesPerExtLset := range c.storeNodes { for externalLabels, occurrences := range occurrencesPerExtLset { + var storeTypeStr string + if storeType != nil { + storeTypeStr = storeType.String() + } // Select only required labels. lbls := []string{} for _, lbl := range c.labels { @@ -190,7 +276,7 @@ func (c *endpointSetNodeCollector) Collect(ch chan<- prometheus.Metric) { case string(ExternalLabels): lbls = append(lbls, externalLabels) case string(StoreType): - lbls = append(lbls, k) + lbls = append(lbls, storeTypeStr) } } select { @@ -368,12 +454,12 @@ func (e *EndpointSet) Update(ctx context.Context) { // All producers that expose StoreAPI should have unique external labels. Check all which connect to our Querier. if er.HasStoreAPI() && (er.ComponentType() == component.Sidecar || er.ComponentType() == component.Rule) && - stats[component.Sidecar.String()][extLset]+stats[component.Rule.String()][extLset] > 0 { + stats[component.Sidecar][extLset]+stats[component.Rule][extLset] > 0 { level.Warn(e.logger).Log("msg", "found duplicate storeEndpoints producer (sidecar or ruler). This is not advices as it will malform data in in the same bucket", - "address", addr, "extLset", extLset, "duplicates", fmt.Sprintf("%v", stats[component.Sidecar.String()][extLset]+stats[component.Rule.String()][extLset]+1)) + "address", addr, "extLset", extLset, "duplicates", fmt.Sprintf("%v", stats[component.Sidecar][extLset]+stats[component.Rule][extLset]+1)) } - stats[er.ComponentType().String()][extLset]++ + stats[er.ComponentType()][extLset]++ } e.endpointsMetric.Update(stats) @@ -631,7 +717,7 @@ func (er *endpointRef) updateMetadata(metadata *endpointMetadata, err error) { } // isQueryable returns true if an endpointRef should be used for querying. -// A strict endpointRef is always queryable. A non-strict endpointRef +// A strict endpointRef is always queriable. A non-strict endpointRef // is queryable if the last health check (info call) succeeded. func (er *endpointRef) isQueryable() bool { er.mtx.RLock() @@ -711,7 +797,11 @@ func (er *endpointRef) labelSets() []labels.Labels { labelSet := make([]labels.Labels, 0, len(er.metadata.LabelSets)) for _, ls := range labelpb.ZLabelSetsToPromLabelSets(er.metadata.LabelSets...) { - if ls.Len() == 0 { + if len(ls) == 0 { + continue + } + // Compatibility label for Queriers pre 0.8.1. Filter it out now. + if ls[0].Name == store.CompatibilityTypeLabelName { continue } labelSet = append(labelSet, ls.Copy()) @@ -782,7 +872,7 @@ func (er *endpointRef) Addr() (string, bool) { } func (er *endpointRef) Close() { - runutil.CloseWithLogOnErr(er.logger, er.cc, "endpoint %v connection closed", er.addr) + runutil.CloseWithLogOnErr(er.logger, er.cc, fmt.Sprintf("endpoint %v connection closed", er.addr)) } func (er *endpointRef) apisPresent() []string { @@ -815,18 +905,14 @@ func (er *endpointRef) apisPresent() []string { return apisPresent } -func (er *endpointRef) Matches(matchers []*labels.Matcher) bool { - return true -} - type endpointMetadata struct { *infopb.InfoResponse } -func newEndpointAPIStats() map[string]map[string]int { - nodes := make(map[string]map[string]int, len(component.All)) - for _, comp := range component.All { - nodes[comp.String()] = map[string]int{} +func newEndpointAPIStats() map[component.Component]map[string]int { + nodes := make(map[component.Component]map[string]int, len(storepb.StoreType_name)) + for i := range storepb.StoreType_name { + nodes[component.FromProto(storepb.StoreType(i))] = map[string]int{} } return nodes } diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go index e084344ed9..9a1a311097 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go @@ -331,7 +331,6 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . req := storepb.SeriesRequest{ MinTime: hints.Start, MaxTime: hints.End, - Limit: int64(hints.Limit), Matchers: sms, MaxResolutionWindow: q.maxResolutionMillis, Aggregates: aggrs, @@ -374,7 +373,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . } // LabelValues returns all potential values for a label name. -func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_values") defer span.Finish() @@ -385,18 +384,12 @@ func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.L if err != nil { return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } - - if hints == nil { - hints = &storage.LabelHints{} - } - req := &storepb.LabelValuesRequest{ Label: name, PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, - Limit: int64(hints.Limit), } if q.isDedupEnabled() { @@ -418,7 +411,7 @@ func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.L // LabelNames returns all the unique label names present in the block in sorted order constrained // by the given matchers. -func (q *querier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_names") defer span.Finish() @@ -430,16 +423,11 @@ func (q *querier) LabelNames(ctx context.Context, hints *storage.LabelHints, mat return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } - if hints == nil { - hints = &storage.LabelHints{} - } - req := &storepb.LabelNamesRequest{ PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, - Limit: int64(hints.Limit), } if q.isDedupEnabled() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go index c625cab5ba..f379d38017 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go @@ -35,7 +35,6 @@ import ( type Opts struct { AutoDownsample bool ReplicaLabels []string - PartitionLabels []string Timeout time.Duration EnablePartialResponse bool } @@ -119,7 +118,7 @@ func (r *remoteEngine) MinT() int64 { hashBuf = make([]byte, 0, 128) highestMintByLabelSet = make(map[uint64]int64) ) - for _, lset := range r.adjustedInfos() { + for _, lset := range r.infosWithoutReplicaLabels() { key, _ := labelpb.ZLabelsToPromLabels(lset.Labels.Labels).HashWithoutLabels(hashBuf) lsetMinT, ok := highestMintByLabelSet[key] if !ok { @@ -153,22 +152,16 @@ func (r *remoteEngine) MaxT() int64 { func (r *remoteEngine) LabelSets() []labels.Labels { r.labelSetsOnce.Do(func() { - r.labelSets = r.adjustedInfos().LabelSets() + r.labelSets = r.infosWithoutReplicaLabels().LabelSets() }) return r.labelSets } -// adjustedInfos strips out replica labels and scopes the remaining labels -// onto the partition labels if they are set. -func (r *remoteEngine) adjustedInfos() infopb.TSDBInfos { +func (r *remoteEngine) infosWithoutReplicaLabels() infopb.TSDBInfos { replicaLabelSet := make(map[string]struct{}) for _, lbl := range r.opts.ReplicaLabels { replicaLabelSet[lbl] = struct{}{} } - partitionLabelsSet := make(map[string]struct{}) - for _, lbl := range r.opts.PartitionLabels { - partitionLabelsSet[lbl] = struct{}{} - } // Strip replica labels from the result. infos := make(infopb.TSDBInfos, 0, len(r.client.tsdbInfos)) @@ -179,9 +172,6 @@ func (r *remoteEngine) adjustedInfos() infopb.TSDBInfos { if _, ok := replicaLabelSet[lbl.Name]; ok { continue } - if _, ok := partitionLabelsSet[lbl.Name]; !ok && len(partitionLabelsSet) > 0 { - continue - } builder.Add(lbl.Name, lbl.Value) } infos = append(infos, infopb.NewTSDBInfo( diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go index 298ce03063..941014a06a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go @@ -109,9 +109,6 @@ type RulesRequest struct { Type RulesRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.RulesRequest_Type" json:"type,omitempty"` PartialResponseStrategy storepb.PartialResponseStrategy `protobuf:"varint,2,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` MatcherString []string `protobuf:"bytes,3,rep,name=matcher_string,json=matcherString,proto3" json:"matcher_string,omitempty"` - RuleName []string `protobuf:"bytes,4,rep,name=rule_name,json=ruleName,proto3" json:"rule_name,omitempty"` - RuleGroup []string `protobuf:"bytes,5,rep,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"` - File []string `protobuf:"bytes,6,rep,name=file,proto3" json:"file,omitempty"` } func (m *RulesRequest) Reset() { *m = RulesRequest{} } @@ -557,76 +554,74 @@ func init() { func init() { proto.RegisterFile("rules/rulespb/rpc.proto", fileDescriptor_91b1d28f30eb5efb) } var fileDescriptor_91b1d28f30eb5efb = []byte{ - // 1096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0x47, - 0x10, 0xf6, 0xd8, 0x9e, 0xb1, 0xa7, 0x8c, 0x59, 0xb6, 0x17, 0xc4, 0x00, 0x89, 0x07, 0x59, 0x22, - 0x22, 0x51, 0xd6, 0x8e, 0x40, 0xbb, 0xd1, 0x9e, 0x22, 0xcc, 0xcf, 0x82, 0x84, 0xc8, 0xaa, 0x8d, - 0x72, 0xd8, 0x1c, 0x9c, 0xc6, 0x34, 0x66, 0x94, 0xf1, 0xcc, 0x6c, 0x4f, 0x9b, 0x88, 0xb7, 0xd8, - 0x73, 0x5e, 0x24, 0xca, 0x1b, 0x70, 0xcb, 0x1e, 0x73, 0x72, 0x12, 0xb8, 0xf9, 0x90, 0x67, 0x88, - 0xba, 0x7a, 0xc6, 0x63, 0x08, 0x84, 0xdd, 0x84, 0x5c, 0xdc, 0xdd, 0x5f, 0x7d, 0xd5, 0x3f, 0x55, - 0x5f, 0x95, 0x07, 0xe6, 0xc5, 0xc0, 0xe7, 0x71, 0x13, 0x7f, 0xa3, 0xa3, 0xa6, 0x88, 0xba, 0x8d, - 0x48, 0x84, 0x32, 0x24, 0x96, 0x3c, 0x65, 0x41, 0x18, 0x2f, 0x2e, 0xc4, 0x32, 0x14, 0xbc, 0x89, - 0xbf, 0xd1, 0x51, 0x53, 0x9e, 0x47, 0x3c, 0xd6, 0x94, 0xd4, 0xe4, 0xb3, 0x23, 0xee, 0xdf, 0x30, - 0xcd, 0xf6, 0xc2, 0x5e, 0x88, 0xd3, 0xa6, 0x9a, 0x25, 0xa8, 0xdb, 0x0b, 0xc3, 0x9e, 0xcf, 0x9b, - 0xb8, 0x3a, 0x1a, 0x9c, 0x34, 0xa5, 0xd7, 0xe7, 0xb1, 0x64, 0xfd, 0x48, 0x13, 0xea, 0x3f, 0xe7, - 0x61, 0x8a, 0xaa, 0xab, 0x50, 0xfe, 0x66, 0xc0, 0x63, 0x49, 0x9e, 0x42, 0x51, 0x6d, 0xeb, 0x18, - 0xcb, 0xc6, 0xea, 0xf4, 0xda, 0x42, 0x43, 0x5f, 0xaa, 0x31, 0xc9, 0x69, 0x1c, 0x9e, 0x47, 0x9c, - 0x22, 0x8d, 0x7c, 0x0b, 0x0b, 0x11, 0x13, 0xd2, 0x63, 0x7e, 0x47, 0xf0, 0x38, 0x0a, 0x83, 0x98, - 0x77, 0x62, 0x29, 0x98, 0xe4, 0xbd, 0x73, 0x27, 0x8f, 0x7b, 0xb8, 0xe9, 0x1e, 0xaf, 0x34, 0x91, - 0x26, 0xbc, 0x76, 0x42, 0xa3, 0xf3, 0xd1, 0xed, 0x06, 0xb2, 0x02, 0xd3, 0x7d, 0x26, 0xbb, 0xa7, - 0x5c, 0xa8, 0x3d, 0xbd, 0xa0, 0xe7, 0x14, 0x96, 0x0b, 0xab, 0x36, 0xad, 0x26, 0x68, 0x1b, 0x41, - 0xb2, 0x04, 0xb6, 0x8a, 0x66, 0x27, 0x60, 0x7d, 0xee, 0x14, 0x91, 0x51, 0x56, 0xc0, 0x01, 0xeb, - 0x73, 0xf2, 0x31, 0x00, 0x1a, 0x7b, 0x22, 0x1c, 0x44, 0x8e, 0x89, 0x56, 0xa4, 0xbf, 0x54, 0x00, - 0x21, 0x50, 0x3c, 0xf1, 0x7c, 0xee, 0x58, 0x68, 0xc0, 0x79, 0xfd, 0x13, 0x28, 0xaa, 0x17, 0x92, - 0x12, 0x14, 0x36, 0xf6, 0xf7, 0x67, 0x72, 0xc4, 0x06, 0x73, 0x63, 0x7f, 0x9b, 0x1e, 0xce, 0x18, - 0x04, 0xc0, 0xa2, 0xdb, 0x9b, 0x5f, 0xd3, 0xad, 0x99, 0x7c, 0xfd, 0x3b, 0xa8, 0x26, 0x61, 0xd1, - 0xf7, 0x26, 0x9f, 0x82, 0xa9, 0x8f, 0x51, 0xc1, 0xab, 0xac, 0x3d, 0x9e, 0x0c, 0x1e, 0x1e, 0xb7, - 0x9b, 0xa3, 0x9a, 0x41, 0x16, 0xa1, 0xf4, 0x03, 0x13, 0x81, 0x7a, 0x93, 0x8a, 0x92, 0xbd, 0x9b, - 0xa3, 0x29, 0xd0, 0x2a, 0x83, 0x25, 0x78, 0x3c, 0xf0, 0x65, 0x7d, 0x13, 0x60, 0xec, 0x1b, 0x93, - 0x67, 0x60, 0xa1, 0x73, 0xec, 0x18, 0xcb, 0x85, 0x5b, 0xf7, 0x6f, 0xc1, 0x68, 0xe8, 0x26, 0x24, - 0x9a, 0x8c, 0xf5, 0x3f, 0x0b, 0x60, 0x8f, 0x19, 0xe4, 0x23, 0x28, 0x62, 0x9c, 0xd4, 0x15, 0xed, - 0x56, 0x79, 0x34, 0x74, 0x71, 0x4d, 0xf1, 0x57, 0x59, 0x31, 0x1c, 0xf9, 0xcc, 0xaa, 0xd6, 0x3a, - 0x30, 0xe4, 0x29, 0x98, 0x28, 0x5b, 0x4c, 0x43, 0x65, 0x6d, 0x6a, 0xf2, 0xfc, 0x96, 0x3d, 0x1a, - 0xba, 0xda, 0x4c, 0xf5, 0x40, 0x56, 0xa1, 0xec, 0x05, 0x92, 0x8b, 0x33, 0xe6, 0x3b, 0xc5, 0x65, - 0x63, 0xd5, 0x68, 0x4d, 0x8d, 0x86, 0xee, 0x18, 0xa3, 0xe3, 0x19, 0xa1, 0xb0, 0xc4, 0xcf, 0x98, - 0x3f, 0x60, 0xd2, 0x0b, 0x83, 0xce, 0xf1, 0x40, 0xe8, 0x49, 0xcc, 0xbb, 0x61, 0x70, 0x1c, 0x3b, - 0x26, 0x3a, 0x93, 0xd1, 0xd0, 0x9d, 0xce, 0x68, 0x87, 0x5e, 0x9f, 0xd3, 0x85, 0x6c, 0xbd, 0x95, - 0x78, 0xb5, 0xb5, 0x13, 0xe9, 0xc0, 0x23, 0x9f, 0xc5, 0xb2, 0x93, 0x31, 0x1c, 0x0b, 0xd3, 0xb2, - 0xd8, 0xd0, 0x45, 0xd1, 0x48, 0x8b, 0xa2, 0x71, 0x98, 0x16, 0x45, 0x6b, 0xf1, 0x62, 0xe8, 0xe6, - 0xd4, 0x39, 0xca, 0x75, 0x7b, 0xec, 0xf9, 0xf6, 0x37, 0xd7, 0xa0, 0x37, 0x30, 0xe2, 0x82, 0xe9, - 0x7b, 0x7d, 0x4f, 0x3a, 0xf6, 0xb2, 0xb1, 0x5a, 0xd0, 0xef, 0x47, 0x80, 0xea, 0x81, 0x9c, 0xc1, - 0xfc, 0x1d, 0x92, 0x77, 0xca, 0xef, 0x55, 0x19, 0xad, 0xa5, 0xd1, 0xd0, 0xbd, 0xab, 0x3a, 0xe8, - 0x5d, 0x9b, 0xd7, 0x03, 0x28, 0xaa, 0x8c, 0x90, 0x67, 0x60, 0x0b, 0xde, 0x0d, 0xc5, 0xb1, 0x52, - 0x99, 0x96, 0xe4, 0xdc, 0x38, 0x65, 0xa9, 0x41, 0x31, 0x77, 0x73, 0x34, 0x63, 0x92, 0x15, 0x30, - 0x99, 0xcf, 0x85, 0x44, 0x11, 0x54, 0xd6, 0xaa, 0xa9, 0xcb, 0x86, 0x02, 0x95, 0x82, 0xd1, 0x3a, - 0xa1, 0xd2, 0x9f, 0x0a, 0x50, 0x45, 0xe3, 0x5e, 0x10, 0x4b, 0x16, 0x74, 0x39, 0x79, 0x01, 0x16, - 0xf6, 0xa8, 0xf8, 0x66, 0x25, 0xbc, 0xde, 0x57, 0x70, 0x9b, 0xcb, 0xd6, 0x74, 0x12, 0xe9, 0x84, - 0x48, 0x93, 0x91, 0xec, 0x42, 0x85, 0x05, 0x41, 0x28, 0x31, 0xc6, 0x71, 0x72, 0x87, 0x5b, 0xfc, - 0x9f, 0x24, 0xfe, 0x93, 0x6c, 0x3a, 0xb9, 0x20, 0xeb, 0x60, 0xc6, 0x92, 0x49, 0xee, 0x14, 0x30, - 0xd8, 0xe4, 0xda, 0x3b, 0xda, 0xca, 0xa2, 0x73, 0x86, 0x24, 0xaa, 0x07, 0xd2, 0x06, 0x9b, 0x75, - 0xa5, 0x77, 0xc6, 0x3b, 0x4c, 0xa2, 0x68, 0xef, 0xd1, 0xcb, 0x68, 0xe8, 0x12, 0xed, 0xb0, 0x21, - 0x3f, 0x0f, 0xfb, 0x9e, 0xe4, 0xfd, 0x48, 0x9e, 0xa3, 0x5e, 0xca, 0x29, 0xae, 0x94, 0xa2, 0x64, - 0xc3, 0x51, 0xc8, 0xb6, 0x3e, 0x15, 0x01, 0xaa, 0x87, 0x7f, 0x52, 0x8a, 0xf5, 0x7f, 0x2a, 0xe5, - 0x17, 0x13, 0x4c, 0x0c, 0x47, 0x16, 0x2c, 0xe3, 0x03, 0x82, 0x95, 0xf6, 0x92, 0xfc, 0xad, 0xbd, - 0xc4, 0x05, 0xf3, 0xcd, 0x80, 0x8b, 0x73, 0x8c, 0x7f, 0xf2, 0x6a, 0x04, 0xa8, 0x1e, 0xc8, 0x97, - 0x30, 0xf3, 0xb7, 0x52, 0x9f, 0xe8, 0x13, 0xa9, 0x8d, 0x3e, 0x3a, 0xbe, 0x51, 0xda, 0x99, 0xbc, - 0xcc, 0xff, 0x28, 0x2f, 0xeb, 0xdf, 0xcb, 0xeb, 0x05, 0x58, 0x58, 0x08, 0xb1, 0x53, 0xc2, 0x6e, - 0x38, 0x77, 0x2d, 0x64, 0x69, 0x29, 0xe8, 0x8e, 0xac, 0x89, 0x34, 0x19, 0x49, 0x1d, 0xac, 0x53, - 0xce, 0x7c, 0x79, 0x8a, 0x7d, 0xc0, 0xd6, 0x1c, 0x8d, 0xd0, 0x64, 0x24, 0xcf, 0x01, 0x74, 0xfb, - 0x12, 0x22, 0x14, 0xd8, 0x62, 0xec, 0xd6, 0xfc, 0x68, 0xe8, 0x3e, 0xc1, 0x2e, 0xa4, 0xc0, 0x4c, - 0x6e, 0xd4, 0x1e, 0x83, 0xf7, 0xb5, 0x52, 0x78, 0xa0, 0x56, 0x5a, 0x79, 0xd0, 0x56, 0xba, 0x0b, - 0xf3, 0xdf, 0x73, 0x1e, 0x75, 0x4e, 0x3c, 0xf5, 0x87, 0xde, 0x39, 0x09, 0xc5, 0xf8, 0xc2, 0x53, - 0x78, 0xe1, 0xc7, 0xa3, 0xa1, 0x5b, 0x55, 0x94, 0x1d, 0x64, 0xec, 0x84, 0x82, 0xce, 0x5e, 0x5b, - 0x26, 0x57, 0xad, 0xff, 0x58, 0x80, 0xea, 0xb5, 0xde, 0x76, 0xcf, 0x1f, 0xde, 0x58, 0xa4, 0xf9, - 0x3b, 0x44, 0x9a, 0x69, 0xad, 0xf0, 0xa1, 0x5a, 0xcb, 0xd2, 0x5c, 0x7c, 0xcf, 0x34, 0x9b, 0x0f, - 0x95, 0x66, 0xeb, 0x81, 0xd2, 0x5c, 0x7a, 0xc8, 0x34, 0x7f, 0xb6, 0x0e, 0x90, 0xf5, 0x13, 0x32, - 0x05, 0xe5, 0xbd, 0x83, 0x8d, 0xcd, 0xc3, 0xbd, 0x6f, 0xb6, 0x67, 0x72, 0xa4, 0x02, 0xa5, 0x57, - 0xdb, 0x07, 0x5b, 0x7b, 0x07, 0x2f, 0xf5, 0x57, 0xd6, 0xce, 0x1e, 0x55, 0xf3, 0xfc, 0xda, 0x57, - 0x60, 0xe2, 0x57, 0x16, 0x79, 0x9e, 0x4e, 0x66, 0x6f, 0xfb, 0x28, 0x5d, 0x9c, 0xbb, 0x81, 0xea, - 0x56, 0xf7, 0x85, 0xd1, 0x5a, 0xb9, 0xf8, 0xa3, 0x96, 0xbb, 0xb8, 0xac, 0x19, 0xef, 0x2e, 0x6b, - 0xc6, 0xef, 0x97, 0x35, 0xe3, 0xed, 0x55, 0x2d, 0xf7, 0xee, 0xaa, 0x96, 0xfb, 0xf5, 0xaa, 0x96, - 0x7b, 0x5d, 0x4a, 0x3e, 0xc4, 0x8f, 0x2c, 0x7c, 0xdc, 0xfa, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0xa0, 0xc1, 0xf7, 0x10, 0xa0, 0x0b, 0x00, 0x00, + // 1058 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0xc7, + 0x13, 0xf7, 0x60, 0xcf, 0xd8, 0x53, 0xc6, 0x2c, 0xdb, 0x0b, 0x62, 0x60, 0xff, 0xf2, 0x20, 0x4b, + 0xfc, 0x45, 0xa2, 0xac, 0x1d, 0x81, 0x76, 0xa3, 0x3d, 0x45, 0x98, 0x8f, 0x05, 0x09, 0x91, 0x55, + 0x1b, 0xe5, 0xb0, 0x39, 0x38, 0x8d, 0x69, 0xcc, 0x28, 0xe3, 0x99, 0xd9, 0xee, 0x36, 0x11, 0x6f, + 0xb1, 0xe7, 0xbc, 0x48, 0x5e, 0x81, 0x5b, 0xf6, 0x98, 0x93, 0x93, 0xc0, 0x29, 0x3e, 0xe4, 0x19, + 0xa2, 0xae, 0x9e, 0xb1, 0x0d, 0x81, 0xb0, 0x9b, 0x90, 0xcb, 0x54, 0x77, 0xd5, 0xaf, 0x7a, 0xea, + 0xe3, 0xd7, 0x35, 0x03, 0x0b, 0xa2, 0x1f, 0x72, 0xd9, 0xc0, 0x67, 0x72, 0xd4, 0x10, 0x49, 0xa7, + 0x9e, 0x88, 0x58, 0xc5, 0xc4, 0x51, 0xa7, 0x2c, 0x8a, 0xe5, 0xd2, 0xa2, 0x54, 0xb1, 0xe0, 0x0d, + 0x7c, 0x26, 0x47, 0x0d, 0x75, 0x9e, 0x70, 0x69, 0x20, 0x99, 0x29, 0x64, 0x47, 0x3c, 0xbc, 0x61, + 0x9a, 0xeb, 0xc6, 0xdd, 0x18, 0x97, 0x0d, 0xbd, 0x4a, 0xb5, 0x7e, 0x37, 0x8e, 0xbb, 0x21, 0x6f, + 0xe0, 0xee, 0xa8, 0x7f, 0xd2, 0x50, 0x41, 0x8f, 0x4b, 0xc5, 0x7a, 0x89, 0x01, 0xd4, 0x7e, 0xb7, + 0x60, 0x9a, 0xea, 0x50, 0x28, 0x7f, 0xdb, 0xe7, 0x52, 0x91, 0x67, 0x50, 0xd0, 0xc7, 0x7a, 0xd6, + 0xb2, 0xb5, 0x3a, 0xb3, 0xb6, 0x58, 0x37, 0x41, 0xd5, 0x27, 0x31, 0xf5, 0xc3, 0xf3, 0x84, 0x53, + 0x84, 0x91, 0x6f, 0x60, 0x31, 0x61, 0x42, 0x05, 0x2c, 0x6c, 0x0b, 0x2e, 0x93, 0x38, 0x92, 0xbc, + 0x2d, 0x95, 0x60, 0x8a, 0x77, 0xcf, 0xbd, 0x29, 0x3c, 0xc3, 0xcf, 0xce, 0x78, 0x6d, 0x80, 0x34, + 0xc5, 0xb5, 0x52, 0x18, 0x5d, 0x48, 0x6e, 0x37, 0x90, 0x15, 0x98, 0xe9, 0x31, 0xd5, 0x39, 0xe5, + 0x42, 0x9f, 0x19, 0x44, 0x5d, 0x2f, 0xbf, 0x9c, 0x5f, 0x75, 0x69, 0x25, 0xd5, 0xb6, 0x50, 0x59, + 0xfb, 0x3f, 0x14, 0x74, 0x44, 0xa4, 0x08, 0xf9, 0x8d, 0xfd, 0xfd, 0xd9, 0x1c, 0x71, 0xc1, 0xde, + 0xd8, 0xdf, 0xa6, 0x87, 0xb3, 0x16, 0x01, 0x70, 0xe8, 0xf6, 0xe6, 0x57, 0x74, 0x6b, 0x76, 0xaa, + 0xf6, 0x2d, 0x54, 0xd2, 0x34, 0xcc, 0x7b, 0xc8, 0x27, 0x60, 0x77, 0x45, 0xdc, 0x4f, 0x30, 0xd9, + 0xf2, 0xda, 0xe3, 0xc9, 0x64, 0x5f, 0x69, 0xc3, 0x6e, 0x8e, 0x1a, 0x04, 0x59, 0x82, 0xe2, 0xf7, + 0x4c, 0x44, 0x3a, 0x06, 0x9d, 0x95, 0xbb, 0x9b, 0xa3, 0x99, 0xa2, 0x59, 0x02, 0x47, 0x70, 0xd9, + 0x0f, 0x55, 0x6d, 0x13, 0x60, 0xe4, 0x2b, 0xc9, 0x73, 0x70, 0xd0, 0x59, 0x7a, 0xd6, 0x72, 0xfe, + 0xd6, 0xf3, 0x9b, 0x30, 0x1c, 0xf8, 0x29, 0x88, 0xa6, 0xb2, 0xf6, 0x47, 0x1e, 0xdc, 0x11, 0x82, + 0xfc, 0x0f, 0x0a, 0x11, 0xeb, 0x99, 0x7e, 0xb8, 0xcd, 0xd2, 0x70, 0xe0, 0xe3, 0x9e, 0xe2, 0x53, + 0x5b, 0x4f, 0x82, 0x90, 0x9b, 0x98, 0x8c, 0x55, 0xef, 0x29, 0x3e, 0xc9, 0x33, 0xb0, 0x91, 0x66, + 0x58, 0xb6, 0xf2, 0xda, 0xf4, 0xe4, 0xfb, 0x9b, 0xee, 0x70, 0xe0, 0x1b, 0x33, 0x35, 0x82, 0xac, + 0x42, 0x29, 0x88, 0x14, 0x17, 0x67, 0x2c, 0xf4, 0x0a, 0xcb, 0xd6, 0xaa, 0xd5, 0x9c, 0x1e, 0x0e, + 0xfc, 0x91, 0x8e, 0x8e, 0x56, 0x84, 0xc2, 0x53, 0x7e, 0xc6, 0xc2, 0x3e, 0x53, 0x41, 0x1c, 0xb5, + 0x8f, 0xfb, 0xc2, 0x2c, 0x24, 0xef, 0xc4, 0xd1, 0xb1, 0xf4, 0x6c, 0x74, 0x26, 0xc3, 0x81, 0x3f, + 0x33, 0x86, 0x1d, 0x06, 0x3d, 0x4e, 0x17, 0xc7, 0xfb, 0xad, 0xd4, 0xab, 0x65, 0x9c, 0x48, 0x1b, + 0x1e, 0x85, 0x4c, 0xaa, 0xf6, 0x18, 0xe1, 0x39, 0xd8, 0x96, 0xa5, 0xba, 0x21, 0x71, 0x3d, 0x23, + 0x71, 0xfd, 0x30, 0x23, 0x71, 0x73, 0xe9, 0x62, 0xe0, 0xe7, 0xf4, 0x7b, 0xb4, 0xeb, 0xf6, 0xc8, + 0xf3, 0xdd, 0x2f, 0xbe, 0x45, 0x6f, 0xe8, 0x88, 0x0f, 0x76, 0x18, 0xf4, 0x02, 0xe5, 0xb9, 0xcb, + 0xd6, 0x6a, 0xde, 0xe4, 0x8f, 0x0a, 0x6a, 0x04, 0x39, 0x83, 0x85, 0x3b, 0x28, 0xea, 0x95, 0x3e, + 0x88, 0xc9, 0xcd, 0xa7, 0xc3, 0x81, 0x7f, 0x17, 0x9b, 0xe9, 0x5d, 0x87, 0xd7, 0x22, 0x28, 0xe8, + 0x8e, 0x90, 0xe7, 0xe0, 0x0a, 0xde, 0x89, 0xc5, 0xb1, 0x66, 0x99, 0xa1, 0xe4, 0xfc, 0xa8, 0x65, + 0x99, 0x41, 0x23, 0x77, 0x73, 0x74, 0x8c, 0x24, 0x2b, 0x60, 0xb3, 0x90, 0x0b, 0x85, 0x24, 0x28, + 0xaf, 0x55, 0x32, 0x97, 0x0d, 0xad, 0xd4, 0x0c, 0x46, 0xeb, 0x04, 0x4b, 0x7f, 0xcc, 0x43, 0x05, + 0x8d, 0x7b, 0x91, 0x54, 0x2c, 0xea, 0x70, 0xf2, 0x12, 0x1c, 0x9c, 0x29, 0xf2, 0xe6, 0x4d, 0x78, + 0xb3, 0xaf, 0xd5, 0x2d, 0xae, 0x9a, 0x33, 0x69, 0xa5, 0x53, 0x20, 0x4d, 0x25, 0xd9, 0x85, 0x32, + 0x8b, 0xa2, 0x58, 0x61, 0x8d, 0x65, 0x1a, 0xc3, 0x2d, 0xfe, 0x4f, 0x52, 0xff, 0x49, 0x34, 0x9d, + 0xdc, 0x90, 0x75, 0xb0, 0xa5, 0x62, 0x8a, 0x7b, 0x79, 0x2c, 0x36, 0xb9, 0x96, 0x47, 0x4b, 0x5b, + 0x4c, 0xcf, 0x10, 0x44, 0x8d, 0x20, 0x2d, 0x70, 0x59, 0x47, 0x05, 0x67, 0xbc, 0xcd, 0x14, 0x92, + 0xf6, 0x1e, 0xbe, 0x0c, 0x07, 0x3e, 0x31, 0x0e, 0x1b, 0xea, 0xb3, 0xb8, 0x17, 0x28, 0xde, 0x4b, + 0xd4, 0x39, 0xf2, 0xa5, 0x94, 0xe9, 0x35, 0x53, 0x34, 0x6d, 0x38, 0x12, 0xd9, 0x35, 0x6f, 0x45, + 0x05, 0x35, 0xe2, 0xef, 0x98, 0xe2, 0xfc, 0x97, 0x4c, 0xf9, 0xc9, 0x06, 0x1b, 0xcb, 0x31, 0x2e, + 0x96, 0xf5, 0x11, 0xc5, 0xca, 0x66, 0xc9, 0xd4, 0xad, 0xb3, 0xc4, 0x07, 0xfb, 0x6d, 0x9f, 0x8b, + 0x73, 0xac, 0x7f, 0x9a, 0x35, 0x2a, 0xa8, 0x11, 0xe4, 0x0b, 0x98, 0xfd, 0xcb, 0x55, 0x9f, 0x98, + 0x13, 0x99, 0x8d, 0x3e, 0x3a, 0xbe, 0x71, 0xb5, 0xc7, 0xf4, 0xb2, 0xff, 0x25, 0xbd, 0x9c, 0x7f, + 0x4e, 0xaf, 0x97, 0xe0, 0xe0, 0x45, 0x90, 0x5e, 0x11, 0xa7, 0xe1, 0xfc, 0xb5, 0x92, 0x65, 0x57, + 0xc1, 0x4c, 0x64, 0x03, 0xa4, 0xa9, 0x24, 0x35, 0x70, 0x4e, 0x39, 0x0b, 0xd5, 0x29, 0xce, 0x01, + 0xd7, 0x60, 0x8c, 0x86, 0xa6, 0x92, 0xbc, 0x00, 0x30, 0xe3, 0x4b, 0x88, 0x58, 0xe0, 0x88, 0x71, + 0x9b, 0x0b, 0xc3, 0x81, 0xff, 0x04, 0xa7, 0x90, 0x56, 0x8e, 0xe9, 0x46, 0xdd, 0x91, 0xf2, 0xbe, + 0x51, 0x0a, 0x0f, 0x34, 0x4a, 0xcb, 0x0f, 0x3a, 0x4a, 0x77, 0x61, 0xe1, 0x3b, 0xce, 0x93, 0xf6, + 0x49, 0xa0, 0x3f, 0xc0, 0xed, 0x93, 0x58, 0x8c, 0x02, 0x9e, 0xc6, 0x80, 0x1f, 0x0f, 0x07, 0x7e, + 0x45, 0x43, 0x76, 0x10, 0xb1, 0x13, 0x0b, 0x3a, 0x77, 0x6d, 0x9b, 0x86, 0x5a, 0xfb, 0x21, 0x0f, + 0x95, 0x6b, 0xb3, 0xed, 0x9e, 0x0f, 0xde, 0x88, 0xa4, 0x53, 0x77, 0x90, 0x74, 0xcc, 0xb5, 0xfc, + 0xc7, 0x72, 0x6d, 0xdc, 0xe6, 0xc2, 0x07, 0xb6, 0xd9, 0x7e, 0xa8, 0x36, 0x3b, 0x0f, 0xd4, 0xe6, + 0xe2, 0x43, 0xb6, 0xf9, 0xd3, 0x75, 0x80, 0xf1, 0x3c, 0x21, 0xd3, 0x50, 0xda, 0x3b, 0xd8, 0xd8, + 0x3c, 0xdc, 0xfb, 0x7a, 0x7b, 0x36, 0x47, 0xca, 0x50, 0x7c, 0xbd, 0x7d, 0xb0, 0xb5, 0x77, 0xf0, + 0xca, 0xfc, 0x65, 0xed, 0xec, 0x51, 0xbd, 0x9e, 0x5a, 0xfb, 0x12, 0x6c, 0xfc, 0xcb, 0x22, 0x2f, + 0xb2, 0xc5, 0xdc, 0x6d, 0x3f, 0x91, 0x4b, 0xf3, 0x37, 0xb4, 0x66, 0xd4, 0x7d, 0x6e, 0x35, 0x57, + 0x2e, 0x7e, 0xab, 0xe6, 0x2e, 0x2e, 0xab, 0xd6, 0xfb, 0xcb, 0xaa, 0xf5, 0xeb, 0x65, 0xd5, 0x7a, + 0x77, 0x55, 0xcd, 0xbd, 0xbf, 0xaa, 0xe6, 0x7e, 0xbe, 0xaa, 0xe6, 0xde, 0x14, 0xd3, 0x1f, 0xe7, + 0x23, 0x07, 0x93, 0x5b, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xed, 0x6d, 0x27, 0x50, 0x0b, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -760,33 +755,6 @@ func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.File) > 0 { - for iNdEx := len(m.File) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.File[iNdEx]) - copy(dAtA[i:], m.File[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.File[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.RuleGroup) > 0 { - for iNdEx := len(m.RuleGroup) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RuleGroup[iNdEx]) - copy(dAtA[i:], m.RuleGroup[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleGroup[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.RuleName) > 0 { - for iNdEx := len(m.RuleName) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RuleName[iNdEx]) - copy(dAtA[i:], m.RuleName[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleName[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } if len(m.MatcherString) > 0 { for iNdEx := len(m.MatcherString) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.MatcherString[iNdEx]) @@ -1358,24 +1326,6 @@ func (m *RulesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if len(m.RuleName) > 0 { - for _, s := range m.RuleName { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.RuleGroup) > 0 { - for _, s := range m.RuleGroup { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.File) > 0 { - for _, s := range m.File { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } return n } @@ -1714,102 +1664,6 @@ func (m *RulesRequest) Unmarshal(dAtA []byte) error { } m.MatcherString = append(m.MatcherString, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuleName = append(m.RuleName, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuleGroup = append(m.RuleGroup, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.File = append(m.File, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto index f5fc8a038b..25d809ede9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto @@ -41,9 +41,6 @@ message RulesRequest { Type type = 1; PartialResponseStrategy partial_response_strategy = 2; repeated string matcher_string = 3; - repeated string rule_name = 4; - repeated string rule_group = 5; - repeated string file = 6; } message RulesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index 9aaeeca615..809dfce36b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -3,7 +3,7 @@ // Package runutil provides helpers to advanced function scheduling control like repeat or retry. // -// It's very often the case when you need to executes some code every fixed intervals or have it retried automatically. +// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. // To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. // Below function does it for you. // diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 865ea3878d..75a85dd9fb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -42,11 +42,11 @@ import ( "google.golang.org/grpc/status" "github.com/thanos-io/objstore" - "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/indexheader" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" + "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" "github.com/thanos-io/thanos/pkg/info/infopb" @@ -117,9 +117,6 @@ const ( // SeriesBatchSize is the default batch size when fetching series from object storage. SeriesBatchSize = 10000 - - // checkContextEveryNIterations is used in some tight loops to check if the context is done. - checkContextEveryNIterations = 128 ) var ( @@ -381,7 +378,7 @@ type BucketStore struct { indexCache storecache.IndexCache indexReaderPool *indexheader.ReaderPool buffers sync.Pool - chunkPool pool.Pool[byte] + chunkPool pool.Bytes seriesBatchSize int // Sets of blocks that have the same labels. They are indexed by a hash over their label set. @@ -501,7 +498,7 @@ func WithQueryGate(queryGate gate.Gate) BucketStoreOption { } // WithChunkPool sets a pool.Bytes to use for chunks. -func WithChunkPool(chunkPool pool.Pool[byte]) BucketStoreOption { +func WithChunkPool(chunkPool pool.Bytes) BucketStoreOption { return func(s *BucketStore) { s.chunkPool = chunkPool } @@ -597,7 +594,7 @@ func NewBucketStore( b := make([]byte, 0, initialBufSize) return &b }}, - chunkPool: pool.NoopPool[byte]{}, + chunkPool: pool.NoopBytes{}, blocks: map[ulid.ULID]*bucketBlock{}, blockSets: map[uint64]*bucketBlockSet{}, blockSyncConcurrency: blockSyncConcurrency, @@ -950,6 +947,19 @@ func (s *BucketStore) LabelSet() []labelpb.ZLabelSet { return labelSets } +// Info implements the storepb.StoreServer interface. +func (s *BucketStore) Info(context.Context, *storepb.InfoRequest) (*storepb.InfoResponse, error) { + mint, maxt := s.TimeRange() + res := &storepb.InfoResponse{ + StoreType: component.Store.ToProto(), + MinTime: mint, + MaxTime: maxt, + LabelSets: s.LabelSet(), + } + + return res, nil +} + func (s *BucketStore) limitMinTime(mint int64) int64 { if s.filterConfig == nil { return mint @@ -995,7 +1005,6 @@ type blockSeriesClient struct { mint int64 maxt int64 - seriesLimit int indexr *bucketIndexReader chunkr *bucketChunkReader loadAggregates []storepb.Aggr @@ -1071,7 +1080,6 @@ func newBlockSeriesClient( mint: req.MinTime, maxt: req.MaxTime, - seriesLimit: int(req.Limit), indexr: b.indexReader(logger), chunkr: chunkr, seriesLimiter: seriesLimiter, @@ -1151,20 +1159,14 @@ func (b *blockSeriesClient) ExpandPostings( b.expandedPostings = make([]storage.SeriesRef, 0, len(b.lazyPostings.postings)/2) b.lazyExpandedPostingsCount.Inc() } else { - // If seriesLimit is set, it can be applied here to limit the amount of series. - // Note: This can only be done when postings are not expanded lazily. - if b.seriesLimit > 0 && len(b.lazyPostings.postings) > b.seriesLimit { - b.lazyPostings.postings = b.lazyPostings.postings[:b.seriesLimit] - } - // Apply series limiter eargerly if lazy postings not enabled. - if err := seriesLimiter.Reserve(uint64(len(b.lazyPostings.postings))); err != nil { + if err := seriesLimiter.Reserve(uint64(len(ps.postings))); err != nil { return httpgrpc.Errorf(int(codes.ResourceExhausted), "exceeded series limit: %s", err) } } - if b.batchSize > len(b.lazyPostings.postings) { - b.batchSize = len(b.lazyPostings.postings) + if b.batchSize > len(ps.postings) { + b.batchSize = len(ps.postings) } b.entries = make([]seriesEntry, 0, b.batchSize) @@ -1286,11 +1288,6 @@ OUTER: } seriesMatched++ - if b.seriesLimit > 0 && seriesMatched > b.seriesLimit { - // Exit early if seriesLimit is set. - b.hasMorePostings = false - break - } s := seriesEntry{lset: completeLabelset} if b.skipChunks { b.entries = append(b.entries, s) @@ -1694,12 +1691,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store tracing.DoInSpan(ctx, "bucket_store_merge_all", func(ctx context.Context) { begin := time.Now() set := NewResponseDeduplicator(NewProxyResponseLoserTree(respSets...)) - i := 0 for set.Next() { - i++ - if req.Limit > 0 && i > int(req.Limit) { - break - } at := set.At() warn := at.GetWarning() if warn != "" { @@ -1855,7 +1847,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq } }) - result = strutil.MergeSlices(int(req.Limit), res, extRes) + result = strutil.MergeSlices(res, extRes) } else { seriesReq := &storepb.SeriesRequest{ MinTime: req.Start, @@ -1950,10 +1942,8 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label names response hints").Error()) } - names := strutil.MergeSlices(int(req.Limit), sets...) - return &storepb.LabelNamesResponse{ - Names: names, + Names: strutil.MergeSlices(sets...), Hints: anyHints, }, nil } @@ -1967,7 +1957,7 @@ func (b *bucketBlock) FilterExtLabelsMatchers(matchers []*labels.Matcher) ([]*la // If value is empty string the matcher is a valid one since it's not part of external labels. if v == "" { result = append(result, m) - } else if v != "" && !m.Matches(v) { + } else if v != "" && v != m.Value { // If matcher is external label but value is different we don't want to look in block anyway. return []*labels.Matcher{}, false } @@ -2069,7 +2059,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR // Add the external label value as well. if extLabelValue := b.extLset.Get(req.Label); extLabelValue != "" { - res = strutil.MergeSlices(int(req.Limit), res, []string{extLabelValue}) + res = strutil.MergeSlices(res, []string{extLabelValue}) } result = res } else { @@ -2167,10 +2157,8 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label values response hints").Error()) } - vals := strutil.MergeSlices(int(req.Limit), sets...) - return &storepb.LabelValuesResponse{ - Values: vals, + Values: strutil.MergeSlices(sets...), Hints: anyHints, }, nil } @@ -2318,7 +2306,7 @@ type bucketBlock struct { meta *metadata.Meta dir string indexCache storecache.IndexCache - chunkPool pool.Pool[byte] + chunkPool pool.Bytes extLset labels.Labels indexHeaderReader indexheader.Reader @@ -2344,7 +2332,7 @@ func newBucketBlock( bkt objstore.BucketReader, dir string, indexCache storecache.IndexCache, - chunkPool pool.Pool[byte], + chunkPool pool.Bytes, indexHeadReader indexheader.Reader, p Partitioner, maxSeriesSizeFunc BlockEstimator, @@ -2617,15 +2605,10 @@ func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatch } // ExpandPostingsWithContext returns the postings expanded as a slice and considers context. -func ExpandPostingsWithContext(ctx context.Context, p index.Postings) ([]storage.SeriesRef, error) { - res := make([]storage.SeriesRef, 0, 1024) // Pre-allocate slice with initial capacity - i := 0 +func ExpandPostingsWithContext(ctx context.Context, p index.Postings) (res []storage.SeriesRef, err error) { for p.Next() { - i++ - if i%checkContextEveryNIterations == 0 { - if err := ctx.Err(); err != nil { - return nil, err - } + if ctx.Err() != nil { + return nil, ctx.Err() } res = append(res, p.At()) } @@ -2848,8 +2831,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er return nil, nil, err } - for i, val := range vals { - if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { + for _, val := range vals { + if ctx.Err() != nil { return nil, nil, ctx.Err() } if !m.Matches(val) { @@ -2877,8 +2860,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er } var toAdd []string - for i, val := range vals { - if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { + for _, val := range vals { + if ctx.Err() != nil { return nil, nil, ctx.Err() } if m.Matches(val) { @@ -2981,10 +2964,8 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab // If we have a miss, mark key to be fetched in `ptrs` slice. // Overlaps are well handled by partitioner, so we don't need to deduplicate keys. for ix, key := range keys { - if (ix+1)%checkContextEveryNIterations == 0 { - if err := ctx.Err(); err != nil { - return nil, closeFns, err - } + if err := ctx.Err(); err != nil { + return nil, closeFns, err } // Get postings for the given key from cache first. if b, ok := fromCache[key]; ok { @@ -3586,10 +3567,10 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a bufPooled, err := r.block.chunkPool.Get(r.block.estimatedMaxChunkSize) if err == nil { buf = *bufPooled - defer r.block.chunkPool.Put(&buf) } else { buf = make([]byte, r.block.estimatedMaxChunkSize) } + defer r.block.chunkPool.Put(&buf) for i, pIdx := range pIdxs { // Fast forward range reader to the next chunk start in case of sparse (for our purposes) byte range. @@ -3865,6 +3846,6 @@ func (s *queryStats) toHints() *hintspb.QueryStats { } // NewDefaultChunkBytesPool returns a chunk bytes pool with default settings. -func NewDefaultChunkBytesPool(maxChunkPoolBytes uint64) (pool.Pool[byte], error) { - return pool.NewBucketedPool[byte](chunkBytesPoolMinSize, chunkBytesPoolMaxSize, 2, maxChunkPoolBytes) +func NewDefaultChunkBytesPool(maxChunkPoolBytes uint64) (pool.Bytes, error) { + return pool.NewBucketedBytes(chunkBytesPoolMinSize, chunkBytesPoolMaxSize, 2, maxChunkPoolBytes) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go index 3a8ddbb86d..42e6de55a7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go @@ -30,12 +30,7 @@ var ( } ) -const ( - maxInt = int(^uint(0) >> 1) - - // checkContextEveryNIterations is used in some tight loops to check if the context is done. - checkContextEveryNIterations = 128 -) +const maxInt = int(^uint(0) >> 1) type InMemoryIndexCache struct { mtx sync.Mutex @@ -307,13 +302,11 @@ func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID uli blockIDKey := blockID.String() requests := 0 hit := 0 - for i, key := range keys { - if (i+1)%checkContextEveryNIterations == 0 { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) - return hits, misses - } + for _, key := range keys { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) + return hits, misses } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeyPostings(key), ""}); ok { @@ -370,13 +363,11 @@ func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid. blockIDKey := blockID.String() requests := 0 hit := 0 - for i, id := range ids { - if (i+1)%checkContextEveryNIterations == 0 { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) - return hits, misses - } + for _, id := range ids { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) + return hits, misses } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeySeries(id), ""}); ok { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go index 38a0f61822..a72ce0d664 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go @@ -66,8 +66,8 @@ func (c *TracingIndexCache) FetchExpandedPostings(ctx context.Context, blockID u return data, exists } -// StoreSeries stores a single series. Skip instrumenting this method -// excessive spans as a single request can store millions of series. +// StoreSeries stores a single series. Skip intrumenting this method +// excessive spans as a single request can store millions of serieses. func (c *TracingIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { c.cache.StoreSeries(blockID, id, v, tenant) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/io.go b/vendor/github.com/thanos-io/thanos/pkg/store/io.go index f2356e6759..657f3134d2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/io.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/io.go @@ -109,7 +109,7 @@ func readByteRanges(src io.Reader, dst []byte, byteRanges byteRanges) ([]byte, e if err != nil { // We get an ErrUnexpectedEOF if EOF is reached before we fill the slice. // Due to how the reading logic works in the bucket store, we may try to overread - // the last byte range so, if the error occurs on the last one, we consider it legit. + // the last byte range so, if the error occurrs on the last one, we consider it legit. if err == io.ErrUnexpectedEOF && idx == len(byteRanges)-1 { return dst, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go index f8363ab477..1858b7dee4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go @@ -308,8 +308,8 @@ func fetchAndExpandPostingGroups(ctx context.Context, r *bucketIndexReader, post result := index.Without(index.Intersect(groupAdds...), index.Merge(ctx, groupRemovals...)) - if err := ctx.Err(); err != nil { - return nil, nil, err + if ctx.Err() != nil { + return nil, nil, ctx.Err() } ps, err := ExpandPostingsWithContext(ctx, result) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/local.go b/vendor/github.com/thanos-io/thanos/pkg/store/local.go index cb80f8f8cb..4e88c0a7e3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/local.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/local.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "io" + "math" "sort" "github.com/go-kit/log" @@ -32,7 +33,8 @@ type LocalStore struct { logger log.Logger extLabels labels.Labels - c io.Closer + info *storepb.InfoResponse + c io.Closer // TODO(bwplotka): This is very naive in-memory DB. We can support much larger files, by // indexing labels, symbolizing strings and get chunk refs only without storing protobufs in memory. @@ -65,6 +67,14 @@ func NewLocalStoreFromJSONMmappableFile( logger: logger, extLabels: extLabels, c: f, + info: &storepb.InfoResponse{ + LabelSets: []labelpb.ZLabelSet{ + {Labels: labelpb.ZLabelsFromPromLabels(extLabels)}, + }, + StoreType: component.ToProto(), + MinTime: math.MaxInt64, + MaxTime: math.MinInt64, + }, } // Do quick pass for in-mem index. @@ -91,7 +101,13 @@ func NewLocalStoreFromJSONMmappableFile( } chks := make([]int, 0, len(series.Chunks)) // Sort chunks in separate slice by MinTime for easier lookup. Find global max and min. - for ci := range series.Chunks { + for ci, c := range series.Chunks { + if s.info.MinTime > c.MinTime { + s.info.MinTime = c.MinTime + } + if s.info.MaxTime < c.MaxTime { + s.info.MaxTime = c.MaxTime + } chks = append(chks, ci) } @@ -105,7 +121,7 @@ func NewLocalStoreFromJSONMmappableFile( if err := skanner.Err(); err != nil { return nil, errors.Wrapf(err, "scanning file %s", path) } - level.Info(logger).Log("msg", "loading JSON file succeeded", "file", path, "series", len(s.series)) + level.Info(logger).Log("msg", "loading JSON file succeeded", "file", path, "info", s.info.String(), "series", len(s.series)) return s, nil } @@ -127,6 +143,11 @@ func ScanGRPCCurlProtoStreamMessages(data []byte, atEOF bool) (advance int, toke return len(delim), nil, nil } +// Info returns store information about the Prometheus instance. +func (s *LocalStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + return s.info, nil +} + // Series returns all series for a requested time range and label matcher. The returned data may // exceed the requested time bounds. func (s *LocalStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go index e5e472b675..f1f89fbd44 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/index" - extsnappy "github.com/thanos-io/thanos/pkg/extgrpc/snappy" "github.com/thanos-io/thanos/pkg/pool" ) @@ -193,7 +192,7 @@ func maximumDecodedLenSnappyStreamed(in []byte) (int, error) { return maxDecodedLen, nil } -var decodedBufPool = pool.MustNewBucketedPool[byte](1024, 65536, 2, 0) +var decodedBufPool = pool.MustNewBucketedBytes(1024, 65536, 2, 0) func newStreamedDiffVarintPostings(input []byte, disablePooling bool) (closeablePostings, error) { // We can't use the regular s2.Reader because it assumes a stream. @@ -450,7 +449,7 @@ func diffVarintEncodeNoHeader(p index.Postings, length int) ([]byte, error) { } // Creating 15 buckets from 1k to 32mb. -var snappyDecodePool = pool.MustNewBucketedPool[byte](1024, 32*1024*1024, 2, 0) +var snappyDecodePool = pool.MustNewBucketedBytes(1024, 32*1024*1024, 2, 0) type closeablePostings interface { index.Postings diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 11d7f1ff77..721e9ed51e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -24,7 +24,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -110,6 +109,31 @@ func (p *PrometheusStore) labelCallsSupportMatchers() bool { return parseErr == nil && version.GTE(baseVer) } +// Info returns store information about the Prometheus instance. +// NOTE(bwplotka): MaxTime & MinTime are not accurate nor adjusted dynamically. +// This is fine for now, but might be needed in future. +func (p *PrometheusStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + lset := p.externalLabelsFn() + mint, maxt := p.timestamps() + + res := &storepb.InfoResponse{ + Labels: labelpb.ZLabelsFromPromLabels(lset), + StoreType: p.component.ToProto(), + MinTime: mint, + MaxTime: maxt, + } + + // Until we deprecate the single labels in the reply, we just duplicate + // them here for migration/compatibility purposes. + res.LabelSets = []labelpb.ZLabelSet{} + if len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{ + Labels: res.Labels, + }) + } + return res, nil +} + func (p *PrometheusStore) getBuffer() *[]byte { b := p.buffers.Get() return b.(*[]byte) @@ -149,7 +173,7 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Sto if r.SkipChunks { finalExtLset := rmLabels(extLset.Copy(), extLsetToRemove) - labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime, int(r.Limit)) + labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime) if err != nil { return err } @@ -296,7 +320,7 @@ func (p *PrometheusStore) handleStreamedPrometheusResponse( seriesStats := &storepb.SeriesStatsCounter{} // TODO(bwplotka): Put read limit as a flag. - stream := remote.NewChunkedReader(bodySizer, config.DefaultChunkedReadLimit, *data) + stream := remote.NewChunkedReader(bodySizer, remote.DefaultChunkedReadLimit, *data) hasher := hashPool.Get().(hash.Hash64) defer hashPool.Put(hasher) for { @@ -547,12 +571,12 @@ func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesR var lbls []string if len(matchers) == 0 || p.labelCallsSupportMatchers() { - lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } } else { - sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } @@ -618,7 +642,7 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue if len(matchers) == 0 { return &storepb.LabelValuesResponse{Values: []string{val}}, nil } - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } @@ -629,12 +653,12 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue } if len(matchers) == 0 || p.labelCallsSupportMatchers() { - vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End, int(r.Limit)) + vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End) if err != nil { return nil, err } } else { - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index 498c80e2e7..0ac1fc659c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -71,9 +71,6 @@ type Client interface { // Addr returns address of the store client. If second parameter is true, the client // represents a local client (server-as-client) and has no remote address. Addr() (addr string, isLocalClient bool) - - // Matches returns true if provided label matchers are allowed in the store. - Matches(matches []*labels.Matcher) bool } // ProxyStore implements the store API that proxies request to all given underlying stores. @@ -89,7 +86,6 @@ type ProxyStore struct { retrievalStrategy RetrievalStrategy debugLogging bool tsdbSelector *TSDBSelector - enableDedup bool } type proxyStoreMetrics struct { @@ -130,13 +126,6 @@ func WithTSDBSelector(selector *TSDBSelector) ProxyStoreOption { } } -// WithoutDedup disabled chunk deduplication when streaming series. -func WithoutDedup() ProxyStoreOption { - return func(s *ProxyStore) { - s.enableDedup = false - } -} - // NewProxyStore returns a new ProxyStore that uses the given clients that implements storeAPI to fan-in all series to the client. // Note that there is no deduplication support. Deduplication should be done on the highest level (just before PromQL). func NewProxyStore( @@ -167,7 +156,6 @@ func NewProxyStore( metrics: metrics, retrievalStrategy: retrievalStrategy, tsdbSelector: DefaultSelector, - enableDedup: true, } for _, option := range options { @@ -177,6 +165,62 @@ func NewProxyStore( return s } +// Info returns store information about the external labels this store have. +func (s *ProxyStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + res := &storepb.InfoResponse{ + StoreType: s.component.ToProto(), + Labels: labelpb.ZLabelsFromPromLabels(s.selectorLabels), + } + + minTime := int64(math.MaxInt64) + maxTime := int64(0) + stores := s.stores() + + // Edge case: we have no data if there are no stores. + if len(stores) == 0 { + res.MaxTime = 0 + res.MinTime = 0 + + return res, nil + } + + for _, s := range stores { + mint, maxt := s.TimeRange() + if mint < minTime { + minTime = mint + } + if maxt > maxTime { + maxTime = maxt + } + } + + res.MaxTime = maxTime + res.MinTime = minTime + + labelSets := make(map[uint64]labelpb.ZLabelSet, len(stores)) + for _, st := range stores { + for _, lset := range st.LabelSets() { + mergedLabelSet := labelpb.ExtendSortedLabels(lset, s.selectorLabels) + labelSets[mergedLabelSet.Hash()] = labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(mergedLabelSet)} + } + } + + res.LabelSets = make([]labelpb.ZLabelSet, 0, len(labelSets)) + for _, v := range labelSets { + res.LabelSets = append(res.LabelSets, v) + } + + // We always want to enforce announcing the subset of data that + // selector-labels represents. If no label-sets are announced by the + // store-proxy's discovered stores, then we still want to enforce + // announcing this subset by announcing the selector as the label-set. + if len(res.LabelSets) == 0 && len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{Labels: res.Labels}) + } + + return res, nil +} + func (s *ProxyStore) LabelSet() []labelpb.ZLabelSet { stores := s.stores() if len(stores) == 0 { @@ -242,7 +286,7 @@ func (s *ProxyStore) TSDBInfos() []infopb.TSDBInfo { func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // triggered by tracing span to reduce cognitive load. + // tiggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -283,7 +327,6 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. r := &storepb.SeriesRequest{ MinTime: originalRequest.MinTime, MaxTime: originalRequest.MaxTime, - Limit: originalRequest.Limit, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), Aggregates: originalRequest.Aggregates, MaxResolutionWindow: originalRequest.MaxResolutionWindow, @@ -319,17 +362,8 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. level.Debug(reqLogger).Log("msg", "Series: started fanout streams", "status", strings.Join(storeDebugMsgs, ";")) - var respHeap seriesStream = NewProxyResponseLoserTree(storeResponses...) - if s.enableDedup { - respHeap = NewResponseDeduplicator(respHeap) - } - - i := 0 + respHeap := NewResponseDeduplicator(NewProxyResponseLoserTree(storeResponses...)) for respHeap.Next() { - i++ - if r.Limit > 0 && i > int(r.Limit) { - break - } resp := respHeap.At() if resp.GetWarning() != "" && (r.PartialResponseDisabled || r.PartialResponseStrategy == storepb.PartialResponseStrategy_ABORT) { @@ -337,7 +371,6 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. } if err := srv.Send(resp); err != nil { - level.Error(reqLogger).Log("msg", "failed to stream response", "error", err) return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) } } @@ -348,7 +381,7 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. // LabelNames returns all known label names. func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // triggered by tracing span to reduce cognitive load. + // tiggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -386,7 +419,6 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, - Hints: originalRequest.Hints, } var ( @@ -433,10 +465,8 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La return nil, err } - result := strutil.MergeUnsortedSlices(int(originalRequest.Limit), names...) - return &storepb.LabelNamesResponse{ - Names: result, + Names: strutil.MergeUnsortedSlices(names...), Warnings: warnings, }, nil } @@ -446,7 +476,7 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L *storepb.LabelValuesResponse, error, ) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // triggered by tracing span to reduce cognitive load. + // tiggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -490,7 +520,6 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, - Limit: originalRequest.Limit, } var ( @@ -538,10 +567,8 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L return nil, err } - vals := strutil.MergeUnsortedSlices(int(originalRequest.Limit), all...) - return &storepb.LabelValuesResponse{ - Values: vals, + Values: strutil.MergeUnsortedSlices(all...), Warnings: warnings, }, nil } @@ -565,32 +592,26 @@ func (s *ProxyStore) matchingStores(ctx context.Context, minTime, maxTime int64, ) for _, st := range s.stores() { // We might be able to skip the store if its meta information indicates it cannot have series matching our query. - if ok, reason := storeMatches(ctx, s.debugLogging, st, minTime, maxTime, matchers...); !ok { - if s.debugLogging { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, reason)) - } + if ok, reason := storeMatches(ctx, st, minTime, maxTime, matchers...); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, reason)) continue } matches, extraMatchers := s.tsdbSelector.MatchLabelSets(st.LabelSets()...) if !matches { - if s.debugLogging { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, "tsdb selector")) - } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, "tsdb selector")) continue } storeLabelSets = append(storeLabelSets, extraMatchers...) stores = append(stores, st) - if s.debugLogging { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) - } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) } return stores, storeLabelSets, storeDebugMsgs } // storeMatches returns boolean if the given store may hold data for the given label matchers, time ranges and debug store matches gathered from context. -func storeMatches(ctx context.Context, debugLogging bool, s Client, mint, maxt int64, matchers ...*labels.Matcher) (ok bool, reason string) { +func storeMatches(ctx context.Context, s Client, mint, maxt int64, matchers ...*labels.Matcher) (ok bool, reason string) { var storeDebugMatcher [][]*labels.Matcher if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { if value, ok := ctxVal.([][]*labels.Matcher); ok { @@ -600,35 +621,22 @@ func storeMatches(ctx context.Context, debugLogging bool, s Client, mint, maxt i storeMinTime, storeMaxTime := s.TimeRange() if mint > storeMaxTime || maxt < storeMinTime { - const s string = "does not have data within this time period" - if debugLogging { - return false, fmt.Sprintf("%s: [%v,%v]. Store time ranges: [%v,%v]", s, mint, maxt, storeMinTime, storeMaxTime) - } - return false, s + return false, fmt.Sprintf("does not have data within this time period: [%v,%v]. Store time ranges: [%v,%v]", mint, maxt, storeMinTime, storeMaxTime) } - if ok, reason := storeMatchDebugMetadata(s, debugLogging, storeDebugMatcher); !ok { + if ok, reason := storeMatchDebugMetadata(s, storeDebugMatcher); !ok { return false, reason } extLset := s.LabelSets() if !LabelSetsMatch(matchers, extLset...) { - const s string = "external labels does not match request label matchers" - if debugLogging { - return false, fmt.Sprintf("external labels %v does not match request label matchers: %v", extLset, matchers) - } - return false, s - } - - if !s.Matches(matchers) { - return false, fmt.Sprintf("store does not match filter for matchers: %v", matchers) + return false, fmt.Sprintf("external labels %v does not match request label matchers: %v", extLset, matchers) } - return true, "" } // storeMatchDebugMetadata return true if the store's address match the storeDebugMatchers. -func storeMatchDebugMetadata(s Client, debugLogging bool, storeDebugMatchers [][]*labels.Matcher) (ok bool, reason string) { +func storeMatchDebugMetadata(s Client, storeDebugMatchers [][]*labels.Matcher) (ok bool, reason string) { if len(storeDebugMatchers) == 0 { return true, "" } @@ -643,11 +651,7 @@ func storeMatchDebugMetadata(s Client, debugLogging bool, storeDebugMatchers [][ match = match || LabelSetsMatch(sm, labels.FromStrings("__address__", addr)) } if !match { - const s string = "__address__ does not match debug store metadata matchers" - if debugLogging { - return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", addr, storeDebugMatchers) - } - return false, s + return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", addr, storeDebugMatchers) } return true, "" } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go index 29d1e6560a..e2764d574a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go @@ -26,13 +26,8 @@ import ( "github.com/thanos-io/thanos/pkg/tracing" ) -type seriesStream interface { - Next() bool - At() *storepb.SeriesResponse -} - type responseDeduplicator struct { - h seriesStream + h *losertree.Tree[*storepb.SeriesResponse, respSet] bufferedSameSeries []*storepb.SeriesResponse @@ -41,23 +36,20 @@ type responseDeduplicator struct { prev *storepb.SeriesResponse ok bool - - chunkDedupMap map[uint64]storepb.AggrChunk } // NewResponseDeduplicator returns a wrapper around a loser tree that merges duplicated series messages into one. // It also deduplicates identical chunks identified by the same checksum from each series message. -func NewResponseDeduplicator(h seriesStream) *responseDeduplicator { +func NewResponseDeduplicator(h *losertree.Tree[*storepb.SeriesResponse, respSet]) *responseDeduplicator { ok := h.Next() var prev *storepb.SeriesResponse if ok { prev = h.At() } return &responseDeduplicator{ - h: h, - ok: ok, - prev: prev, - chunkDedupMap: make(map[uint64]storepb.AggrChunk), + h: h, + ok: ok, + prev: prev, } } @@ -81,7 +73,7 @@ func (d *responseDeduplicator) Next() bool { d.ok = d.h.Next() if !d.ok { if len(d.bufferedSameSeries) > 0 { - d.bufferedResp = append(d.bufferedResp, d.chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) + d.bufferedResp = append(d.bufferedResp, chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) } return len(d.bufferedResp) > 0 } @@ -109,15 +101,15 @@ func (d *responseDeduplicator) Next() bool { continue } - d.bufferedResp = append(d.bufferedResp, d.chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) + d.bufferedResp = append(d.bufferedResp, chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) d.prev = s return true } } -func (d *responseDeduplicator) chainSeriesAndRemIdenticalChunks(series []*storepb.SeriesResponse) *storepb.SeriesResponse { - clear(d.chunkDedupMap) +func chainSeriesAndRemIdenticalChunks(series []*storepb.SeriesResponse) *storepb.SeriesResponse { + chunkDedupMap := map[uint64]*storepb.AggrChunk{} for _, s := range series { for _, chk := range s.GetSeries().Chunks { @@ -132,9 +124,9 @@ func (d *responseDeduplicator) chainSeriesAndRemIdenticalChunks(series []*storep hash = xxhash.Sum64(field.Data) } - if _, ok := d.chunkDedupMap[hash]; !ok { + if _, ok := chunkDedupMap[hash]; !ok { chk := chk - d.chunkDedupMap[hash] = chk + chunkDedupMap[hash] = &chk break } } @@ -142,13 +134,13 @@ func (d *responseDeduplicator) chainSeriesAndRemIdenticalChunks(series []*storep } // If no chunks were requested. - if len(d.chunkDedupMap) == 0 { + if len(chunkDedupMap) == 0 { return series[0] } - finalChunks := make([]storepb.AggrChunk, 0, len(d.chunkDedupMap)) - for _, chk := range d.chunkDedupMap { - finalChunks = append(finalChunks, chk) + finalChunks := make([]storepb.AggrChunk, 0, len(chunkDedupMap)) + for _, chk := range chunkDedupMap { + finalChunks = append(finalChunks, *chk) } sort.Slice(finalChunks, func(i, j int) bool { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go index d5461a5947..faed79bc7b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -329,10 +329,7 @@ func (m *Chunk) Compare(b *Chunk) int { func (x *PartialResponseStrategy) UnmarshalJSON(entry []byte) error { fieldStr, err := strconv.Unquote(string(entry)) if err != nil { - return errors.Wrapf( - err, - "failed to unqote %v, in order to unmarshal as 'partial_response_strategy'. Possible values are %s", string(entry), strings.Join(PartialResponseStrategyValues, ","), - ) + return errors.Wrapf(err, fmt.Sprintf("failed to unqote %v, in order to unmarshal as 'partial_response_strategy'. Possible values are %s", string(entry), strings.Join(PartialResponseStrategyValues, ","))) } if fieldStr == "" { @@ -343,11 +340,7 @@ func (x *PartialResponseStrategy) UnmarshalJSON(entry []byte) error { strategy, ok := PartialResponseStrategy_value[strings.ToUpper(fieldStr)] if !ok { - return errors.Errorf( - "failed to unmarshal %v as 'partial_response_strategy'. Possible values are %s", - string(entry), - strings.Join(PartialResponseStrategyValues, ","), - ) + return errors.Errorf(fmt.Sprintf("failed to unmarshal %v as 'partial_response_strategy'. Possible values are %s", string(entry), strings.Join(PartialResponseStrategyValues, ","))) } *x = PartialResponseStrategy(strategy) return nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go index a5b792bca1..e09210d442 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go @@ -6,97 +6,99 @@ package storepb import ( "context" "io" - "iter" "google.golang.org/grpc" ) -type inProcessServer struct { - Store_SeriesServer - ctx context.Context - yield func(response *SeriesResponse, err error) bool +func ServerAsClient(srv StoreServer) StoreClient { + return &serverAsClient{srv: srv} } -func newInProcessServer(ctx context.Context, yield func(*SeriesResponse, error) bool) *inProcessServer { - return &inProcessServer{ - ctx: ctx, - yield: yield, - } +// serverAsClient allows to use servers as clients. +// NOTE: Passing CallOptions does not work - it would be needed to be implemented in grpc itself (before, after are private). +type serverAsClient struct { + srv StoreServer } -func (s *inProcessServer) Send(resp *SeriesResponse) error { - s.yield(resp, nil) - return nil +func (s serverAsClient) Info(ctx context.Context, in *InfoRequest, _ ...grpc.CallOption) (*InfoResponse, error) { + return s.srv.Info(ctx, in) } -func (s *inProcessServer) Context() context.Context { - return s.ctx +func (s serverAsClient) LabelNames(ctx context.Context, in *LabelNamesRequest, _ ...grpc.CallOption) (*LabelNamesResponse, error) { + return s.srv.LabelNames(ctx, in) } -type inProcessClient struct { - Store_SeriesClient - ctx context.Context - next func() (*SeriesResponse, error, bool) - stop func() +func (s serverAsClient) LabelValues(ctx context.Context, in *LabelValuesRequest, _ ...grpc.CallOption) (*LabelValuesResponse, error) { + return s.srv.LabelValues(ctx, in) } -func newInProcessClient(ctx context.Context, next func() (*SeriesResponse, error, bool), stop func()) *inProcessClient { - return &inProcessClient{ - ctx: ctx, - next: next, - stop: stop, - } +func (s serverAsClient) Series(ctx context.Context, in *SeriesRequest, _ ...grpc.CallOption) (Store_SeriesClient, error) { + inSrv := &inProcessStream{recv: make(chan *SeriesResponse), err: make(chan error)} + inSrv.ctx, inSrv.cancel = context.WithCancel(ctx) + go func() { + if err := s.srv.Series(in, inSrv); err != nil { + inSrv.err <- err + } + close(inSrv.err) + close(inSrv.recv) + }() + return &inProcessClientStream{srv: inSrv}, nil } -func (c *inProcessClient) Recv() (*SeriesResponse, error) { - resp, err, ok := c.next() - if err != nil { - c.stop() - return nil, err - } - if !ok { - return nil, io.EOF - } - return resp, err -} +// TODO(bwplotka): Add streaming attributes, metadata etc. Currently those are disconnected. Follow up on https://github.com/grpc/grpc-go/issues/906. +// TODO(bwplotka): Use this in proxy.go and receiver multi tenant proxy. +type inProcessStream struct { + grpc.ServerStream -func (c *inProcessClient) Context() context.Context { - return c.ctx + ctx context.Context + cancel context.CancelFunc + recv chan *SeriesResponse + err chan error } -func (c *inProcessClient) CloseSend() error { - c.stop() - return nil +func NewInProcessStream(ctx context.Context, bufferSize int) *inProcessStream { + return &inProcessStream{ + ctx: ctx, + recv: make(chan *SeriesResponse, bufferSize), + err: make(chan error), + } } -func ServerAsClient(srv StoreServer) StoreClient { - return &serverAsClient{srv: srv} -} +func (s *inProcessStream) Context() context.Context { return s.ctx } -// serverAsClient allows to use servers as clients. -// NOTE: Passing CallOptions does not work - it would be needed to be implemented in grpc itself (before, after are private). -type serverAsClient struct { - srv StoreServer +func (s *inProcessStream) Send(r *SeriesResponse) error { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case s.recv <- r: + return nil + } } -func (s serverAsClient) LabelNames(ctx context.Context, in *LabelNamesRequest, _ ...grpc.CallOption) (*LabelNamesResponse, error) { - return s.srv.LabelNames(ctx, in) +type inProcessClientStream struct { + grpc.ClientStream + + srv *inProcessStream } -func (s serverAsClient) LabelValues(ctx context.Context, in *LabelValuesRequest, _ ...grpc.CallOption) (*LabelValuesResponse, error) { - return s.srv.LabelValues(ctx, in) +func (s *inProcessClientStream) Context() context.Context { return s.srv.ctx } + +func (s *inProcessClientStream) CloseSend() error { + s.srv.cancel() + return nil } -func (s serverAsClient) Series(ctx context.Context, in *SeriesRequest, _ ...grpc.CallOption) (Store_SeriesClient, error) { - var srvIter iter.Seq2[*SeriesResponse, error] = func(yield func(*SeriesResponse, error) bool) { - srv := newInProcessServer(ctx, yield) - err := s.srv.Series(in, srv) - if err != nil { - yield(nil, err) - return +func (s *inProcessClientStream) Recv() (*SeriesResponse, error) { + select { + case r, ok := <-s.srv.recv: + if !ok { + return nil, io.EOF + } + return r, nil + case err, ok := <-s.srv.err: + if !ok { + return nil, io.EOF } + return nil, err } - - clientIter, stop := iter.Pull2(srvIter) - return newInProcessClient(ctx, clientIter, stop), nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go index 0da00daf4d..050b8e912f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go @@ -61,7 +61,7 @@ func SamplesFromPromqlSeries(series promql.Series) ([]Sample, []Histogram) { // HistogramProtoToHistogram extracts a (normal integer) Histogram from the // provided proto message. The caller has to make sure that the proto message -// represents an integer histogram and not a float histogram. +// represents an interger histogram and not a float histogram. // Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L626-L645 func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram { if hp.IsFloatHistogram() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go index 0c2d67dfb1..b5e85d69d8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go @@ -13,6 +13,8 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" types "github.com/gogo/protobuf/types" + github_com_thanos_io_thanos_pkg_store_labelpb "github.com/thanos-io/thanos/pkg/store/labelpb" + labelpb "github.com/thanos-io/thanos/pkg/store/labelpb" prompb "github.com/thanos-io/thanos/pkg/store/storepb/prompb" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -30,6 +32,48 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Deprecated. Use `thanos.info` instead. +type StoreType int32 + +const ( + StoreType_UNKNOWN StoreType = 0 + StoreType_QUERY StoreType = 1 + StoreType_RULE StoreType = 2 + StoreType_SIDECAR StoreType = 3 + StoreType_STORE StoreType = 4 + StoreType_RECEIVE StoreType = 5 + // DEBUG represents some debug StoreAPI components e.g. thanos tools store-api-serve. + StoreType_DEBUG StoreType = 6 +) + +var StoreType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "QUERY", + 2: "RULE", + 3: "SIDECAR", + 4: "STORE", + 5: "RECEIVE", + 6: "DEBUG", +} + +var StoreType_value = map[string]int32{ + "UNKNOWN": 0, + "QUERY": 1, + "RULE": 2, + "SIDECAR": 3, + "STORE": 4, + "RECEIVE": 5, + "DEBUG": 6, +} + +func (x StoreType) String() string { + return proto.EnumName(StoreType_name, int32(x)) +} + +func (StoreType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a938d55a388af629, []int{0} +} + type Aggr int32 const ( @@ -64,7 +108,7 @@ func (x Aggr) String() string { } func (Aggr) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{0} + return fileDescriptor_a938d55a388af629, []int{1} } type WriteResponse struct { @@ -142,6 +186,87 @@ func (m *WriteRequest) XXX_DiscardUnknown() { var xxx_messageInfo_WriteRequest proto.InternalMessageInfo +// Deprecated. Use `thanos.info` instead. +type InfoRequest struct { +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (m *InfoRequest) String() string { return proto.CompactTextString(m) } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a938d55a388af629, []int{2} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo + +// Deprecated. Use `thanos.info` instead. +type InfoResponse struct { + // Deprecated. Use label_sets instead. + Labels []github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel" json:"labels"` + MinTime int64 `protobuf:"varint,2,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,3,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + StoreType StoreType `protobuf:"varint,4,opt,name=storeType,proto3,enum=thanos.StoreType" json:"storeType,omitempty"` + // label_sets is an unsorted list of `ZLabelSet`s. + LabelSets []labelpb.ZLabelSet `protobuf:"bytes,5,rep,name=label_sets,json=labelSets,proto3" json:"label_sets"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a938d55a388af629, []int{3} +} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) +} +func (m *InfoResponse) XXX_Size() int { + return m.Size() +} +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo + type SeriesRequest struct { MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` @@ -180,15 +305,13 @@ type SeriesRequest struct { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. WithoutReplicaLabels []string `protobuf:"bytes,14,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` - // limit is used to limit the number of results returned - Limit int64 `protobuf:"varint,15,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } func (m *SeriesRequest) String() string { return proto.CompactTextString(m) } func (*SeriesRequest) ProtoMessage() {} func (*SeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{2} + return fileDescriptor_a938d55a388af629, []int{4} } func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -236,7 +359,7 @@ func (m *QueryHints) Reset() { *m = QueryHints{} } func (m *QueryHints) String() string { return proto.CompactTextString(m) } func (*QueryHints) ProtoMessage() {} func (*QueryHints) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{3} + return fileDescriptor_a938d55a388af629, []int{5} } func (m *QueryHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -281,7 +404,7 @@ func (m *ShardInfo) Reset() { *m = ShardInfo{} } func (m *ShardInfo) String() string { return proto.CompactTextString(m) } func (*ShardInfo) ProtoMessage() {} func (*ShardInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{4} + return fileDescriptor_a938d55a388af629, []int{6} } func (m *ShardInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -319,7 +442,7 @@ func (m *Func) Reset() { *m = Func{} } func (m *Func) String() string { return proto.CompactTextString(m) } func (*Func) ProtoMessage() {} func (*Func) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{5} + return fileDescriptor_a938d55a388af629, []int{7} } func (m *Func) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +482,7 @@ func (m *Grouping) Reset() { *m = Grouping{} } func (m *Grouping) String() string { return proto.CompactTextString(m) } func (*Grouping) ProtoMessage() {} func (*Grouping) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{6} + return fileDescriptor_a938d55a388af629, []int{8} } func (m *Grouping) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,7 +519,7 @@ func (m *Range) Reset() { *m = Range{} } func (m *Range) String() string { return proto.CompactTextString(m) } func (*Range) ProtoMessage() {} func (*Range) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{7} + return fileDescriptor_a938d55a388af629, []int{9} } func (m *Range) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -437,7 +560,7 @@ func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } func (m *SeriesResponse) String() string { return proto.CompactTextString(m) } func (*SeriesResponse) ProtoMessage() {} func (*SeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{8} + return fileDescriptor_a938d55a388af629, []int{10} } func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -536,15 +659,13 @@ type LabelNamesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,6,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,7,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` - // limit is used to limit the number of results returned - Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } func (m *LabelNamesRequest) String() string { return proto.CompactTextString(m) } func (*LabelNamesRequest) ProtoMessage() {} func (*LabelNamesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{9} + return fileDescriptor_a938d55a388af629, []int{11} } func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -586,7 +707,7 @@ func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } func (m *LabelNamesResponse) String() string { return proto.CompactTextString(m) } func (*LabelNamesResponse) ProtoMessage() {} func (*LabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{10} + return fileDescriptor_a938d55a388af629, []int{12} } func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -629,15 +750,13 @@ type LabelValuesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,7,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,8,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` - // limit is used to limit the number of results returned - Limit int64 `protobuf:"varint,9,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) } func (*LabelValuesRequest) ProtoMessage() {} func (*LabelValuesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{11} + return fileDescriptor_a938d55a388af629, []int{13} } func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -679,7 +798,7 @@ func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) } func (*LabelValuesResponse) ProtoMessage() {} func (*LabelValuesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{12} + return fileDescriptor_a938d55a388af629, []int{14} } func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -709,9 +828,12 @@ func (m *LabelValuesResponse) XXX_DiscardUnknown() { var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo func init() { + proto.RegisterEnum("thanos.StoreType", StoreType_name, StoreType_value) proto.RegisterEnum("thanos.Aggr", Aggr_name, Aggr_value) proto.RegisterType((*WriteResponse)(nil), "thanos.WriteResponse") proto.RegisterType((*WriteRequest)(nil), "thanos.WriteRequest") + proto.RegisterType((*InfoRequest)(nil), "thanos.InfoRequest") + proto.RegisterType((*InfoResponse)(nil), "thanos.InfoResponse") proto.RegisterType((*SeriesRequest)(nil), "thanos.SeriesRequest") proto.RegisterType((*QueryHints)(nil), "thanos.QueryHints") proto.RegisterType((*ShardInfo)(nil), "thanos.ShardInfo") @@ -728,79 +850,91 @@ func init() { func init() { proto.RegisterFile("store/storepb/rpc.proto", fileDescriptor_a938d55a388af629) } var fileDescriptor_a938d55a388af629 = []byte{ - // 1149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0x23, 0x45, - 0x10, 0xf6, 0x78, 0x3c, 0x7e, 0x94, 0x13, 0xaf, 0xb7, 0xd7, 0xc9, 0x4e, 0xbc, 0x92, 0x63, 0x8c, - 0x90, 0xac, 0x55, 0xe4, 0xac, 0xbc, 0x08, 0x09, 0xc4, 0x25, 0x09, 0x2c, 0x59, 0x89, 0x04, 0xe8, - 0xec, 0x12, 0x04, 0x87, 0x51, 0xdb, 0xee, 0x8c, 0x47, 0x3b, 0xaf, 0x4c, 0xf7, 0x90, 0xf8, 0x0c, - 0x67, 0xc4, 0x9d, 0xdb, 0xfe, 0x9a, 0xdc, 0xd8, 0x23, 0x27, 0x04, 0xc9, 0x1f, 0x41, 0xfd, 0x18, - 0x3f, 0x82, 0xf7, 0xa5, 0xe4, 0x62, 0x75, 0x7d, 0x5f, 0x75, 0x4d, 0x75, 0xf5, 0x57, 0xe5, 0x86, - 0xfb, 0x8c, 0x47, 0x09, 0xdd, 0x96, 0xbf, 0xf1, 0x60, 0x3b, 0x89, 0x87, 0xbd, 0x38, 0x89, 0x78, - 0x84, 0x8a, 0x7c, 0x4c, 0xc2, 0x88, 0x35, 0x37, 0x16, 0x1d, 0xf8, 0x24, 0xa6, 0x4c, 0xb9, 0x34, - 0x1b, 0x6e, 0xe4, 0x46, 0x72, 0xb9, 0x2d, 0x56, 0x1a, 0x6d, 0x2f, 0x6e, 0x88, 0x93, 0x28, 0xb8, - 0xb6, 0x6f, 0xc3, 0x8d, 0x22, 0xd7, 0xa7, 0xdb, 0xd2, 0x1a, 0xa4, 0x27, 0xdb, 0x24, 0x9c, 0x28, - 0xaa, 0x73, 0x07, 0x56, 0x8f, 0x13, 0x8f, 0x53, 0x4c, 0x59, 0x1c, 0x85, 0x8c, 0x76, 0x7e, 0x31, - 0x60, 0x45, 0x23, 0xa7, 0x29, 0x65, 0x1c, 0xed, 0x00, 0x70, 0x2f, 0xa0, 0x8c, 0x26, 0x1e, 0x65, - 0xb6, 0xd1, 0x36, 0xbb, 0xd5, 0xfe, 0x03, 0xb1, 0x3b, 0xa0, 0x7c, 0x4c, 0x53, 0xe6, 0x0c, 0xa3, - 0x78, 0xd2, 0x7b, 0xe6, 0x05, 0xf4, 0x48, 0xba, 0xec, 0x16, 0x2e, 0xfe, 0xde, 0xcc, 0xe1, 0xb9, - 0x4d, 0x68, 0x1d, 0x8a, 0x9c, 0x86, 0x24, 0xe4, 0x76, 0xbe, 0x6d, 0x74, 0x2b, 0x58, 0x5b, 0xc8, - 0x86, 0x52, 0x42, 0x63, 0xdf, 0x1b, 0x12, 0xdb, 0x6c, 0x1b, 0x5d, 0x13, 0x67, 0x66, 0xe7, 0xa5, - 0x05, 0xab, 0x2a, 0x5c, 0x96, 0xc6, 0x06, 0x94, 0x03, 0x2f, 0x74, 0x44, 0x54, 0xdb, 0x50, 0xce, - 0x81, 0x17, 0x8a, 0xcf, 0x4a, 0x8a, 0x9c, 0x2b, 0x2a, 0xaf, 0x29, 0x72, 0x2e, 0xa9, 0x4f, 0x04, - 0xc5, 0x87, 0x63, 0x9a, 0x30, 0xdb, 0x94, 0xa9, 0x37, 0x7a, 0xaa, 0xce, 0xbd, 0xaf, 0xc9, 0x80, - 0xfa, 0x07, 0x8a, 0xd4, 0x39, 0x4f, 0x7d, 0x51, 0x1f, 0xd6, 0x44, 0xc8, 0x84, 0xb2, 0xc8, 0x4f, - 0xb9, 0x17, 0x85, 0xce, 0x99, 0x17, 0x8e, 0xa2, 0x33, 0xbb, 0x20, 0xe3, 0xdf, 0x0b, 0xc8, 0x39, - 0x9e, 0x72, 0xc7, 0x92, 0x42, 0x5b, 0x00, 0xc4, 0x75, 0x13, 0xea, 0x12, 0x4e, 0x99, 0x6d, 0xb5, - 0xcd, 0x6e, 0xad, 0xbf, 0x92, 0x7d, 0x6d, 0xc7, 0x75, 0x13, 0x3c, 0xc7, 0xa3, 0xcf, 0x60, 0x23, - 0x26, 0x09, 0xf7, 0x88, 0x2f, 0xbe, 0x22, 0x6b, 0xef, 0x8c, 0x3c, 0x46, 0x06, 0x3e, 0x1d, 0xd9, - 0xc5, 0xb6, 0xd1, 0x2d, 0xe3, 0xfb, 0xda, 0x21, 0xbb, 0x9b, 0x2f, 0x34, 0x8d, 0x7e, 0x5a, 0xb2, - 0x97, 0xf1, 0x84, 0x70, 0xea, 0x4e, 0xec, 0x52, 0xdb, 0xe8, 0xd6, 0xfa, 0x9b, 0xd9, 0x87, 0xbf, - 0x5d, 0x8c, 0x71, 0xa4, 0xdd, 0xfe, 0x17, 0x3c, 0x23, 0xd0, 0x26, 0x54, 0xd9, 0x0b, 0x2f, 0x76, - 0x86, 0xe3, 0x34, 0x7c, 0xc1, 0xec, 0xb2, 0x4c, 0x05, 0x04, 0xb4, 0x27, 0x11, 0xf4, 0x10, 0xac, - 0xb1, 0x17, 0x72, 0x66, 0x57, 0xda, 0x86, 0x2c, 0xa8, 0x52, 0x57, 0x2f, 0x53, 0x57, 0x6f, 0x27, - 0x9c, 0x60, 0xe5, 0x82, 0x10, 0x14, 0x18, 0xa7, 0xb1, 0x0d, 0xb2, 0x6c, 0x72, 0x8d, 0x1a, 0x60, - 0x25, 0x24, 0x74, 0xa9, 0x5d, 0x95, 0xa0, 0x32, 0xd0, 0x63, 0xa8, 0x9e, 0xa6, 0x34, 0x99, 0x38, - 0x2a, 0xf6, 0x8a, 0x8c, 0x8d, 0xb2, 0x53, 0x7c, 0x27, 0xa8, 0x7d, 0xc1, 0x60, 0x38, 0x9d, 0xae, - 0xd1, 0x23, 0x00, 0x36, 0x26, 0xc9, 0xc8, 0xf1, 0xc2, 0x93, 0xc8, 0x5e, 0x95, 0x7b, 0xee, 0x66, - 0x7b, 0x8e, 0x04, 0xf3, 0x34, 0x3c, 0x89, 0x70, 0x85, 0x65, 0x4b, 0xf4, 0x31, 0xac, 0x9f, 0x79, - 0x7c, 0x1c, 0xa5, 0xdc, 0xd1, 0x5a, 0x73, 0x7c, 0x21, 0x04, 0x66, 0xd7, 0xda, 0x66, 0xb7, 0x82, - 0x1b, 0x9a, 0xc5, 0x8a, 0x94, 0x22, 0x61, 0x22, 0x65, 0xdf, 0x0b, 0x3c, 0x6e, 0xdf, 0x51, 0x29, - 0x4b, 0xa3, 0xf3, 0xd2, 0x00, 0x98, 0x25, 0x26, 0x0b, 0xc7, 0x69, 0xec, 0x04, 0x9e, 0xef, 0x7b, - 0x4c, 0x8b, 0x14, 0x04, 0x74, 0x20, 0x11, 0xd4, 0x86, 0xc2, 0x49, 0x1a, 0x0e, 0xa5, 0x46, 0xab, - 0x33, 0x69, 0x3c, 0x49, 0xc3, 0x21, 0x96, 0x0c, 0xda, 0x82, 0xb2, 0x9b, 0x44, 0x69, 0xec, 0x85, - 0xae, 0x54, 0x5a, 0xb5, 0x5f, 0xcf, 0xbc, 0xbe, 0xd2, 0x38, 0x9e, 0x7a, 0xa0, 0x0f, 0xb3, 0x42, - 0x5a, 0xd2, 0x75, 0x35, 0x73, 0xc5, 0x02, 0xd4, 0x75, 0xed, 0x9c, 0x41, 0x65, 0x5a, 0x08, 0x99, - 0xa2, 0xae, 0xd7, 0x88, 0x9e, 0x4f, 0x53, 0x54, 0xfc, 0x88, 0x9e, 0xa3, 0x0f, 0x60, 0x85, 0x47, - 0x9c, 0xf8, 0x8e, 0xc4, 0x98, 0x6e, 0xa7, 0xaa, 0xc4, 0x64, 0x18, 0x86, 0x6a, 0x90, 0x1f, 0x4c, - 0x64, 0xbf, 0x96, 0x71, 0x7e, 0x30, 0x11, 0xcd, 0xad, 0x2b, 0x58, 0x90, 0x15, 0xd4, 0x56, 0xa7, - 0x09, 0x05, 0x71, 0x32, 0x21, 0x81, 0x90, 0xe8, 0xa6, 0xad, 0x60, 0xb9, 0xee, 0xf4, 0xa1, 0x9c, - 0x9d, 0x47, 0xc7, 0x33, 0x96, 0xc4, 0x33, 0x17, 0xe2, 0x6d, 0x82, 0x25, 0x0f, 0x26, 0x1c, 0x16, - 0x4a, 0xac, 0xad, 0xce, 0x6f, 0x06, 0xd4, 0xb2, 0x99, 0xa1, 0x34, 0x8d, 0xba, 0x50, 0x9c, 0xce, - 0x2d, 0x51, 0xa2, 0xda, 0x54, 0x1b, 0x12, 0xdd, 0xcf, 0x61, 0xcd, 0xa3, 0x26, 0x94, 0xce, 0x48, - 0x12, 0x8a, 0xc2, 0xcb, 0x19, 0xb5, 0x9f, 0xc3, 0x19, 0x80, 0xb6, 0x32, 0xc1, 0x9b, 0xaf, 0x17, - 0xfc, 0x7e, 0x4e, 0x4b, 0x7e, 0xb7, 0x0c, 0xc5, 0x84, 0xb2, 0xd4, 0xe7, 0x9d, 0x5f, 0x4d, 0xb8, - 0x2b, 0x05, 0x74, 0x48, 0x82, 0xd9, 0x20, 0x7b, 0x63, 0xe3, 0x1b, 0x37, 0x68, 0xfc, 0xfc, 0x0d, - 0x1b, 0xbf, 0x01, 0x16, 0xe3, 0x24, 0xe1, 0x7a, 0x16, 0x2b, 0x03, 0xd5, 0xc1, 0xa4, 0xe1, 0x48, - 0xcf, 0x3d, 0xb1, 0x9c, 0xf5, 0xbf, 0xf5, 0xf6, 0xfe, 0x9f, 0x9f, 0xbf, 0xc5, 0xf7, 0x98, 0xbf, - 0xaf, 0x6f, 0xd3, 0xd2, 0xbb, 0xb4, 0x69, 0x79, 0xbe, 0x4d, 0x13, 0x40, 0xf3, 0xb7, 0xa0, 0xa5, - 0xd1, 0x00, 0x4b, 0x48, 0x51, 0xfd, 0xa3, 0x55, 0xb0, 0x32, 0x50, 0x13, 0xca, 0xfa, 0xd6, 0x85, - 0xf6, 0x05, 0x31, 0xb5, 0x67, 0xe7, 0x36, 0xdf, 0x7a, 0xee, 0xce, 0x1f, 0xa6, 0xfe, 0xe8, 0xf7, - 0xc4, 0x4f, 0x67, 0x77, 0x2f, 0x12, 0x14, 0xa8, 0x6e, 0x06, 0x65, 0xbc, 0x59, 0x11, 0xf9, 0x1b, - 0x28, 0xc2, 0xbc, 0x2d, 0x45, 0x14, 0x96, 0x28, 0xc2, 0x5a, 0xa2, 0x88, 0xe2, 0xfb, 0x29, 0xa2, - 0x74, 0x2b, 0x8a, 0x28, 0xbf, 0x8b, 0x22, 0x2a, 0xf3, 0x8a, 0x48, 0xe1, 0xde, 0xc2, 0xe5, 0x68, - 0x49, 0xac, 0x43, 0xf1, 0x67, 0x89, 0x68, 0x4d, 0x68, 0xeb, 0xb6, 0x44, 0xf1, 0x70, 0x17, 0x0a, - 0xe2, 0x19, 0x80, 0x4a, 0x60, 0xe2, 0x9d, 0xe3, 0x7a, 0x0e, 0x55, 0xc0, 0xda, 0xfb, 0xe6, 0xf9, - 0xe1, 0xb3, 0xba, 0x21, 0xb0, 0xa3, 0xe7, 0x07, 0xf5, 0xbc, 0x58, 0x1c, 0x3c, 0x3d, 0xac, 0x9b, - 0x72, 0xb1, 0xf3, 0x43, 0xbd, 0x80, 0xaa, 0x50, 0x92, 0x5e, 0x5f, 0xe2, 0xba, 0xd5, 0xff, 0xd3, - 0x00, 0xeb, 0x48, 0xbc, 0xf4, 0xd0, 0xa7, 0x50, 0x54, 0x53, 0x0c, 0xad, 0x2d, 0x4e, 0x35, 0x2d, - 0xb6, 0xe6, 0xfa, 0x75, 0x58, 0x1d, 0xf3, 0x91, 0x81, 0xf6, 0x00, 0x66, 0x1d, 0x81, 0x36, 0x16, - 0xea, 0x3f, 0x3f, 0xab, 0x9a, 0xcd, 0x65, 0x94, 0xae, 0xd6, 0x13, 0xa8, 0xce, 0x15, 0x11, 0x2d, - 0xba, 0x2e, 0xc8, 0xbe, 0xf9, 0x60, 0x29, 0xa7, 0xe2, 0xf4, 0x0f, 0xa1, 0x26, 0xdf, 0x9b, 0x42, - 0xcf, 0xea, 0x64, 0x9f, 0x43, 0x15, 0xd3, 0x20, 0xe2, 0x54, 0xe2, 0x68, 0xaa, 0x8f, 0xf9, 0x67, - 0x69, 0x73, 0xed, 0x1a, 0xaa, 0x9f, 0xaf, 0xb9, 0xdd, 0x8f, 0x2e, 0xfe, 0x6d, 0xe5, 0x2e, 0x2e, - 0x5b, 0xc6, 0xab, 0xcb, 0x96, 0xf1, 0xcf, 0x65, 0xcb, 0xf8, 0xfd, 0xaa, 0x95, 0x7b, 0x75, 0xd5, - 0xca, 0xfd, 0x75, 0xd5, 0xca, 0xfd, 0x58, 0xd2, 0xcf, 0xe4, 0x41, 0x51, 0xde, 0xd0, 0xe3, 0xff, - 0x02, 0x00, 0x00, 0xff, 0xff, 0x84, 0xe1, 0x09, 0x34, 0x90, 0x0b, 0x00, 0x00, + // 1331 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0x13, 0x47, + 0x14, 0xf7, 0x7a, 0xbd, 0xfe, 0xf3, 0x9c, 0xb8, 0x66, 0x30, 0x61, 0x63, 0x24, 0xc7, 0x75, 0x55, + 0xc9, 0x42, 0xd4, 0xa6, 0x06, 0x21, 0xb5, 0xe2, 0x92, 0x04, 0x43, 0xa2, 0x12, 0x53, 0xc6, 0x09, + 0x69, 0xa9, 0x2a, 0x6b, 0x6d, 0x4f, 0xd6, 0x2b, 0xec, 0xdd, 0x65, 0x67, 0xb6, 0x89, 0xaf, 0xad, + 0x7a, 0xab, 0xaa, 0xaa, 0x1f, 0xa1, 0x9f, 0x86, 0x23, 0xc7, 0xaa, 0x07, 0xd4, 0xc2, 0xbd, 0x9f, + 0xa1, 0x9a, 0x3f, 0xbb, 0xf6, 0xa6, 0x21, 0x08, 0x91, 0x4b, 0x34, 0xef, 0xf7, 0x7b, 0xf3, 0xe6, + 0xfd, 0xcf, 0x1a, 0xae, 0x52, 0xe6, 0x05, 0xa4, 0x2d, 0xfe, 0xfa, 0xc3, 0x76, 0xe0, 0x8f, 0x5a, + 0x7e, 0xe0, 0x31, 0x0f, 0x65, 0xd9, 0xc4, 0x72, 0x3d, 0x5a, 0x5d, 0x4f, 0x2a, 0xb0, 0xb9, 0x4f, + 0xa8, 0x54, 0xa9, 0x56, 0x6c, 0xcf, 0xf6, 0xc4, 0xb1, 0xcd, 0x4f, 0x0a, 0xad, 0x27, 0x2f, 0xf8, + 0x81, 0x37, 0x3b, 0x75, 0x4f, 0x99, 0x9c, 0x5a, 0x43, 0x32, 0x3d, 0x4d, 0xd9, 0x9e, 0x67, 0x4f, + 0x49, 0x5b, 0x48, 0xc3, 0xf0, 0xa8, 0x6d, 0xb9, 0x73, 0x49, 0x35, 0x3e, 0x82, 0xd5, 0xc3, 0xc0, + 0x61, 0x04, 0x13, 0xea, 0x7b, 0x2e, 0x25, 0x8d, 0x9f, 0x34, 0x58, 0x51, 0xc8, 0xf3, 0x90, 0x50, + 0x86, 0x36, 0x01, 0x98, 0x33, 0x23, 0x94, 0x04, 0x0e, 0xa1, 0xa6, 0x56, 0xd7, 0x9b, 0xc5, 0xce, + 0x35, 0x7e, 0x7b, 0x46, 0xd8, 0x84, 0x84, 0x74, 0x30, 0xf2, 0xfc, 0x79, 0x6b, 0xdf, 0x99, 0x91, + 0xbe, 0x50, 0xd9, 0xca, 0xbc, 0x78, 0xb5, 0x91, 0xc2, 0x4b, 0x97, 0xd0, 0x1a, 0x64, 0x19, 0x71, + 0x2d, 0x97, 0x99, 0xe9, 0xba, 0xd6, 0x2c, 0x60, 0x25, 0x21, 0x13, 0x72, 0x01, 0xf1, 0xa7, 0xce, + 0xc8, 0x32, 0xf5, 0xba, 0xd6, 0xd4, 0x71, 0x24, 0x36, 0x56, 0xa1, 0xb8, 0xeb, 0x1e, 0x79, 0xca, + 0x87, 0xc6, 0xef, 0x69, 0x58, 0x91, 0xb2, 0xf4, 0x12, 0x8d, 0x20, 0x2b, 0x02, 0x8d, 0x1c, 0x5a, + 0x6d, 0xc9, 0xc4, 0xb6, 0x1e, 0x72, 0x74, 0xeb, 0x2e, 0x77, 0xe1, 0xaf, 0x57, 0x1b, 0xb7, 0x6d, + 0x87, 0x4d, 0xc2, 0x61, 0x6b, 0xe4, 0xcd, 0xda, 0x52, 0xe1, 0x33, 0xc7, 0x53, 0xa7, 0xb6, 0xff, + 0xcc, 0x6e, 0x27, 0x72, 0xd6, 0x7a, 0x2a, 0x6e, 0x63, 0x65, 0x1a, 0xad, 0x43, 0x7e, 0xe6, 0xb8, + 0x03, 0x1e, 0x88, 0x70, 0x5c, 0xc7, 0xb9, 0x99, 0xe3, 0xf2, 0x48, 0x05, 0x65, 0x9d, 0x48, 0x4a, + 0xb9, 0x3e, 0xb3, 0x4e, 0x04, 0xd5, 0x86, 0x82, 0xb0, 0xba, 0x3f, 0xf7, 0x89, 0x99, 0xa9, 0x6b, + 0xcd, 0x52, 0xe7, 0x52, 0xe4, 0x5d, 0x3f, 0x22, 0xf0, 0x42, 0x07, 0xdd, 0x01, 0x10, 0x0f, 0x0e, + 0x28, 0x61, 0xd4, 0x34, 0x44, 0x3c, 0xf1, 0x0d, 0xe9, 0x52, 0x9f, 0x30, 0x95, 0xd6, 0xc2, 0x54, + 0xc9, 0xb4, 0xf1, 0x8b, 0x01, 0xab, 0x32, 0xe5, 0x51, 0xa9, 0x96, 0x1d, 0xd6, 0xde, 0xee, 0x70, + 0x3a, 0xe9, 0xf0, 0x1d, 0x4e, 0xb1, 0xd1, 0x84, 0x04, 0xd4, 0xd4, 0xc5, 0xeb, 0x95, 0x44, 0x36, + 0xf7, 0x24, 0xa9, 0x1c, 0x88, 0x75, 0x51, 0x07, 0xae, 0x70, 0x93, 0x01, 0xa1, 0xde, 0x34, 0x64, + 0x8e, 0xe7, 0x0e, 0x8e, 0x1d, 0x77, 0xec, 0x1d, 0x8b, 0xa0, 0x75, 0x7c, 0x79, 0x66, 0x9d, 0xe0, + 0x98, 0x3b, 0x14, 0x14, 0xba, 0x01, 0x60, 0xd9, 0x76, 0x40, 0x6c, 0x8b, 0x11, 0x19, 0x6b, 0xa9, + 0xb3, 0x12, 0xbd, 0xb6, 0x69, 0xdb, 0x01, 0x5e, 0xe2, 0xd1, 0x97, 0xb0, 0xee, 0x5b, 0x01, 0x73, + 0xac, 0x29, 0x7f, 0x45, 0x54, 0x7e, 0x30, 0x76, 0xa8, 0x35, 0x9c, 0x92, 0xb1, 0x99, 0xad, 0x6b, + 0xcd, 0x3c, 0xbe, 0xaa, 0x14, 0xa2, 0xce, 0xb8, 0xa7, 0x68, 0xf4, 0xdd, 0x19, 0x77, 0x29, 0x0b, + 0x2c, 0x46, 0xec, 0xb9, 0x99, 0x13, 0x65, 0xd9, 0x88, 0x1e, 0xfe, 0x3a, 0x69, 0xa3, 0xaf, 0xd4, + 0xfe, 0x67, 0x3c, 0x22, 0xd0, 0x06, 0x14, 0xe9, 0x33, 0xc7, 0x1f, 0x8c, 0x26, 0xa1, 0xfb, 0x8c, + 0x9a, 0x79, 0xe1, 0x0a, 0x70, 0x68, 0x5b, 0x20, 0xe8, 0x3a, 0x18, 0x13, 0xc7, 0x65, 0xd4, 0x2c, + 0xd4, 0x35, 0x91, 0x50, 0x39, 0x81, 0xad, 0x68, 0x02, 0x5b, 0x9b, 0xee, 0x1c, 0x4b, 0x15, 0x84, + 0x20, 0x43, 0x19, 0xf1, 0x4d, 0x10, 0x69, 0x13, 0x67, 0x54, 0x01, 0x23, 0xb0, 0x5c, 0x9b, 0x98, + 0x45, 0x01, 0x4a, 0x01, 0xdd, 0x82, 0xe2, 0xf3, 0x90, 0x04, 0xf3, 0x81, 0xb4, 0xbd, 0x22, 0x6c, + 0xa3, 0x28, 0x8a, 0xc7, 0x9c, 0xda, 0xe1, 0x0c, 0x86, 0xe7, 0xf1, 0x19, 0xdd, 0x04, 0xa0, 0x13, + 0x2b, 0x18, 0x0f, 0x1c, 0xf7, 0xc8, 0x33, 0x57, 0xc5, 0x9d, 0x45, 0x43, 0x72, 0x46, 0x4c, 0x56, + 0x81, 0x46, 0x47, 0x74, 0x1b, 0xd6, 0x8e, 0x1d, 0x36, 0xf1, 0x42, 0x36, 0x50, 0xf3, 0x38, 0x50, + 0xc3, 0x56, 0xaa, 0xeb, 0xcd, 0x02, 0xae, 0x28, 0x16, 0x4b, 0x52, 0x34, 0x09, 0x6d, 0xfc, 0xa1, + 0x01, 0x2c, 0x5c, 0x10, 0x29, 0x62, 0xc4, 0x1f, 0xcc, 0x9c, 0xe9, 0xd4, 0xa1, 0xaa, 0x1d, 0x81, + 0x43, 0x7b, 0x02, 0x41, 0x75, 0xc8, 0x1c, 0x85, 0xee, 0x48, 0x74, 0x63, 0x71, 0xd1, 0x04, 0xf7, + 0x43, 0x77, 0x84, 0x05, 0x83, 0x6e, 0x40, 0xde, 0x0e, 0xbc, 0xd0, 0x77, 0x5c, 0x5b, 0xf4, 0x54, + 0xb1, 0x53, 0x8e, 0xb4, 0x1e, 0x28, 0x1c, 0xc7, 0x1a, 0xe8, 0x93, 0x28, 0x65, 0x86, 0x50, 0x8d, + 0x37, 0x02, 0xe6, 0xa0, 0xca, 0x60, 0xe3, 0x18, 0x0a, 0x71, 0xc8, 0xc2, 0x45, 0x95, 0x99, 0x31, + 0x39, 0x89, 0x5d, 0x94, 0xfc, 0x98, 0x9c, 0xa0, 0x8f, 0x61, 0x85, 0x79, 0xcc, 0x9a, 0x0e, 0x04, + 0x46, 0xd5, 0xe0, 0x14, 0x05, 0x26, 0xcc, 0x50, 0x54, 0x82, 0xf4, 0x70, 0x2e, 0x56, 0x40, 0x1e, + 0xa7, 0x87, 0x73, 0xbe, 0xea, 0x54, 0xae, 0x32, 0x22, 0x57, 0x4a, 0x6a, 0x54, 0x21, 0xc3, 0x23, + 0xe3, 0xc5, 0x76, 0x2d, 0x35, 0x9e, 0x05, 0x2c, 0xce, 0x8d, 0x0e, 0xe4, 0xa3, 0x78, 0x94, 0x3d, + 0xed, 0x0c, 0x7b, 0x7a, 0xc2, 0xde, 0x06, 0x18, 0x22, 0x30, 0xae, 0x90, 0x48, 0xb1, 0x92, 0x1a, + 0xbf, 0x6a, 0x50, 0x8a, 0xb6, 0x83, 0x5a, 0x9a, 0x4d, 0xc8, 0xc6, 0x5b, 0x9c, 0xa7, 0xa8, 0x14, + 0x77, 0x81, 0x40, 0x77, 0x52, 0x58, 0xf1, 0xa8, 0x0a, 0xb9, 0x63, 0x2b, 0x70, 0x79, 0xe2, 0xc5, + 0xc6, 0xde, 0x49, 0xe1, 0x08, 0x40, 0x37, 0xa2, 0xd6, 0xd6, 0xdf, 0xde, 0xda, 0x3b, 0x29, 0xd5, + 0xdc, 0x5b, 0x79, 0xc8, 0x06, 0x84, 0x86, 0x53, 0xd6, 0xf8, 0x37, 0x0d, 0x97, 0x44, 0xab, 0xf4, + 0xac, 0xd9, 0x62, 0x65, 0x9d, 0x3b, 0xe2, 0xda, 0x07, 0x8c, 0x78, 0xfa, 0x03, 0x47, 0xbc, 0x02, + 0x06, 0x65, 0x56, 0xc0, 0xd4, 0x7a, 0x97, 0x02, 0x2a, 0x83, 0x4e, 0xdc, 0xb1, 0xda, 0x70, 0xfc, + 0xb8, 0x98, 0x74, 0xe3, 0xdd, 0x93, 0xbe, 0xbc, 0x69, 0xb3, 0xef, 0xb1, 0x69, 0xdf, 0x3e, 0x90, + 0xb9, 0x73, 0x06, 0x32, 0x00, 0xb4, 0x9c, 0x6f, 0xd5, 0x04, 0x15, 0x30, 0x78, 0xd3, 0xc9, 0x7f, + 0x9c, 0x05, 0x2c, 0x05, 0x54, 0x85, 0xbc, 0xaa, 0x2f, 0xef, 0x72, 0x4e, 0xc4, 0xf2, 0x22, 0x42, + 0xfd, 0x9d, 0x11, 0x36, 0x7e, 0xd6, 0xd5, 0xa3, 0x4f, 0xac, 0x69, 0xb8, 0xa8, 0x72, 0x05, 0x0c, + 0xe1, 0xb0, 0x6a, 0x7b, 0x29, 0x9c, 0x5f, 0xfb, 0xf4, 0x07, 0xd4, 0x5e, 0xbf, 0xa8, 0xda, 0x67, + 0xce, 0xa8, 0xbd, 0x71, 0x46, 0xed, 0xb3, 0xef, 0x57, 0xfb, 0xdc, 0x85, 0xd4, 0x3e, 0x7f, 0x4e, + 0xed, 0x43, 0xb8, 0x9c, 0x28, 0x83, 0x2a, 0xfe, 0x1a, 0x64, 0x7f, 0x10, 0x88, 0xaa, 0xbe, 0x92, + 0x2e, 0xaa, 0xfc, 0xd7, 0xbf, 0x87, 0x42, 0xfc, 0x89, 0x83, 0x8a, 0x90, 0x3b, 0xe8, 0x7d, 0xd5, + 0x7b, 0x74, 0xd8, 0x2b, 0xa7, 0x50, 0x01, 0x8c, 0xc7, 0x07, 0x5d, 0xfc, 0x6d, 0x59, 0x43, 0x79, + 0xc8, 0xe0, 0x83, 0x87, 0xdd, 0x72, 0x9a, 0x6b, 0xf4, 0x77, 0xef, 0x75, 0xb7, 0x37, 0x71, 0x59, + 0xe7, 0x1a, 0xfd, 0xfd, 0x47, 0xb8, 0x5b, 0xce, 0x70, 0x1c, 0x77, 0xb7, 0xbb, 0xbb, 0x4f, 0xba, + 0x65, 0x83, 0xe3, 0xf7, 0xba, 0x5b, 0x07, 0x0f, 0xca, 0xd9, 0xeb, 0x5b, 0x90, 0xe1, 0xdf, 0x08, + 0x28, 0x07, 0x3a, 0xde, 0x3c, 0x94, 0x56, 0xb7, 0x1f, 0x1d, 0xf4, 0xf6, 0xcb, 0x1a, 0xc7, 0xfa, + 0x07, 0x7b, 0xe5, 0x34, 0x3f, 0xec, 0xed, 0xf6, 0xca, 0xba, 0x38, 0x6c, 0x7e, 0x23, 0xcd, 0x09, + 0xad, 0x2e, 0x2e, 0x1b, 0x9d, 0x1f, 0xd3, 0x60, 0x08, 0x1f, 0xd1, 0xe7, 0x90, 0x11, 0xff, 0x06, + 0x2e, 0x47, 0x75, 0x58, 0xfa, 0xe2, 0xac, 0x56, 0x92, 0xa0, 0xca, 0xdf, 0x17, 0x90, 0x95, 0xbb, + 0x12, 0x5d, 0x49, 0xee, 0xce, 0xe8, 0xda, 0xda, 0x69, 0x58, 0x5e, 0xbc, 0xa9, 0xa1, 0x6d, 0x80, + 0xc5, 0x34, 0xa2, 0xf5, 0x44, 0xed, 0x97, 0x37, 0x62, 0xb5, 0x7a, 0x16, 0xa5, 0xde, 0xbf, 0x0f, + 0xc5, 0xa5, 0xb2, 0xa2, 0xa4, 0x6a, 0x62, 0xe4, 0xaa, 0xd7, 0xce, 0xe4, 0xa4, 0x9d, 0x4e, 0x0f, + 0x4a, 0xe2, 0x1b, 0x9f, 0xcf, 0x92, 0x4c, 0xc6, 0x5d, 0x28, 0x62, 0x32, 0xf3, 0x18, 0x11, 0x38, + 0x8a, 0xc3, 0x5f, 0xfe, 0x29, 0x50, 0xbd, 0x72, 0x0a, 0x55, 0x3f, 0x19, 0x52, 0x5b, 0x9f, 0xbe, + 0xf8, 0xa7, 0x96, 0x7a, 0xf1, 0xba, 0xa6, 0xbd, 0x7c, 0x5d, 0xd3, 0xfe, 0x7e, 0x5d, 0xd3, 0x7e, + 0x7b, 0x53, 0x4b, 0xbd, 0x7c, 0x53, 0x4b, 0xfd, 0xf9, 0xa6, 0x96, 0x7a, 0x9a, 0x53, 0xbf, 0x5a, + 0x86, 0x59, 0xd1, 0x33, 0xb7, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x14, 0xa2, 0x0f, 0x1f, + 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -815,6 +949,10 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type StoreClient interface { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + /// Deprecated. Use `thanos.info` instead. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. /// /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain @@ -839,6 +977,15 @@ func NewStoreClient(cc *grpc.ClientConn) StoreClient { return &storeClient{cc} } +func (c *storeClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/thanos.Store/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *storeClient) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Store_SeriesClient, error) { stream, err := c.cc.NewStream(ctx, &_Store_serviceDesc.Streams[0], "/thanos.Store/Series", opts...) if err != nil { @@ -891,6 +1038,10 @@ func (c *storeClient) LabelValues(ctx context.Context, in *LabelValuesRequest, o // StoreServer is the server API for Store service. type StoreServer interface { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + /// Deprecated. Use `thanos.info` instead. + Info(context.Context, *InfoRequest) (*InfoResponse, error) /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. /// /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain @@ -911,6 +1062,9 @@ type StoreServer interface { type UnimplementedStoreServer struct { } +func (*UnimplementedStoreServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} func (*UnimplementedStoreServer) Series(req *SeriesRequest, srv Store_SeriesServer) error { return status.Errorf(codes.Unimplemented, "method Series not implemented") } @@ -925,6 +1079,24 @@ func RegisterStoreServer(s *grpc.Server, srv StoreServer) { s.RegisterService(&_Store_serviceDesc, srv) } +func _Store_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/thanos.Store/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Store_Series_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SeriesRequest) if err := stream.RecvMsg(m); err != nil { @@ -986,6 +1158,10 @@ var _Store_serviceDesc = grpc.ServiceDesc{ ServiceName: "thanos.Store", HandlerType: (*StoreServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _Store_Info_Handler, + }, { MethodName: "LabelNames", Handler: _Store_LabelNames_Handler, @@ -1151,6 +1327,95 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelSets) > 0 { + for iNdEx := len(m.LabelSets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LabelSets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.StoreType != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.StoreType)) + i-- + dAtA[i] = 0x20 + } + if m.MaxTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x18 + } + if m.MinTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x10 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *SeriesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1171,11 +1436,6 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x78 - } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1630,11 +1890,6 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x40 - } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1771,11 +2026,6 @@ func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x48 - } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1941,6 +2191,45 @@ func (m *WriteRequest) Size() (n int) { return n } +func (m *InfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *InfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.MinTime != 0 { + n += 1 + sovRpc(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovRpc(uint64(m.MaxTime)) + } + if m.StoreType != 0 { + n += 1 + sovRpc(uint64(m.StoreType)) + } + if len(m.LabelSets) > 0 { + for _, e := range m.LabelSets { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + func (m *SeriesRequest) Size() (n int) { if m == nil { return 0 @@ -2002,9 +2291,6 @@ func (m *SeriesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } return n } @@ -2179,9 +2465,6 @@ func (m *LabelNamesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } return n } @@ -2248,9 +2531,6 @@ func (m *LabelValuesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } return n } @@ -2470,6 +2750,231 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreType", wireType) + } + m.StoreType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreType |= StoreType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSets = append(m.LabelSets, labelpb.ZLabelSet{}) + if err := m.LabelSets[len(m.LabelSets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SeriesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2896,25 +3401,6 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -3866,25 +4352,6 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -4296,25 +4763,6 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto index 2a6313ec02..2a9e9e3eaf 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto @@ -7,6 +7,7 @@ package thanos; import "store/storepb/types.proto"; import "gogoproto/gogo.proto"; import "store/storepb/prompb/types.proto"; +import "store/labelpb/types.proto"; import "google/protobuf/any.proto"; option go_package = "storepb"; @@ -24,6 +25,11 @@ option (gogoproto.goproto_sizecache_all) = false; /// Store represents API against instance that stores XOR encoded values with label set metadata (e.g Prometheus metrics). service Store { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + /// Deprecated. Use `thanos.info` instead. + rpc Info(InfoRequest) returns (InfoResponse); + /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. /// /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain @@ -57,6 +63,32 @@ message WriteRequest { int64 replica = 3; } +// Deprecated. Use `thanos.info` instead. +message InfoRequest {} + +// Deprecated. Use `thanos.info` instead. +enum StoreType { + UNKNOWN = 0; + QUERY = 1; + RULE = 2; + SIDECAR = 3; + STORE = 4; + RECEIVE = 5; + // DEBUG represents some debug StoreAPI components e.g. thanos tools store-api-serve. + DEBUG = 6; +} + +// Deprecated. Use `thanos.info` instead. +message InfoResponse { + // Deprecated. Use label_sets instead. + repeated Label labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel"]; + int64 min_time = 2; + int64 max_time = 3; + StoreType storeType = 4; + // label_sets is an unsorted list of `ZLabelSet`s. + repeated ZLabelSet label_sets = 5 [(gogoproto.nullable) = false]; +} + message SeriesRequest { int64 min_time = 1; int64 max_time = 2; @@ -105,9 +137,6 @@ message SeriesRequest { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. repeated string without_replica_labels = 14; - - // limit is used to limit the number of results returned - int64 limit = 15; } // QueryHints represents hints from PromQL that might help to @@ -206,9 +235,6 @@ message LabelNamesRequest { // same as in series request. repeated string without_replica_labels = 7; - - // limit is used to limit the number of results returned - int64 limit = 8; } message LabelNamesResponse { @@ -242,9 +268,6 @@ message LabelValuesRequest { // same as in series request. repeated string without_replica_labels = 8; - - // limit is used to limit the number of results returned - int64 limit = 9; } message LabelValuesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index 08c3122e32..7e2c472350 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -11,12 +11,9 @@ import ( "sort" "strings" "sync" - "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/pkg/errors" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "google.golang.org/grpc" @@ -24,35 +21,19 @@ import ( "google.golang.org/grpc/status" "github.com/thanos-io/thanos/pkg/component" - "github.com/thanos-io/thanos/pkg/filter" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" ) -const ( - RemoteReadFrameLimit = 1048576 - cuckooStoreFilterCapacity = 1000000 - storeFilterUpdateInterval = 15 * time.Second -) +const RemoteReadFrameLimit = 1048576 type TSDBReader interface { storage.ChunkQueryable StartTime() (int64, error) } -// TSDBStoreOption is a functional option for TSDBStore. -type TSDBStoreOption func(s *TSDBStore) - -// WithCuckooMetricNameStoreFilter returns a TSDBStoreOption that enables the Cuckoo filter for metric names. -func WithCuckooMetricNameStoreFilter() TSDBStoreOption { - return func(s *TSDBStore) { - s.storeFilter = filter.NewCuckooMetricNameStoreFilter(cuckooStoreFilterCapacity) - s.startStoreFilterUpdate = true - } -} - // TSDBStore implements the store API against a local TSDB instance. // It attaches the provided external labels to all results. It only responds with raw data // and does not support downsampling. @@ -63,16 +44,8 @@ type TSDBStore struct { buffers sync.Pool maxBytesPerFrame int - extLset labels.Labels - startStoreFilterUpdate bool - storeFilter filter.StoreFilter - mtx sync.RWMutex - close func() - storepb.UnimplementedStoreServer -} - -func (s *TSDBStore) Close() { - s.close() + extLset labels.Labels + mtx sync.RWMutex } func RegisterWritableStoreServer(storeSrv storepb.WriteableStoreServer) func(*grpc.Server) { @@ -89,68 +62,21 @@ type ReadWriteTSDBStore struct { // NewTSDBStore creates a new TSDBStore. // NOTE: Given lset has to be sorted. -func NewTSDBStore( - logger log.Logger, - db TSDBReader, - component component.StoreAPI, - extLset labels.Labels, - options ...TSDBStoreOption, -) *TSDBStore { +func NewTSDBStore(logger log.Logger, db TSDBReader, component component.StoreAPI, extLset labels.Labels) *TSDBStore { if logger == nil { logger = log.NewNopLogger() } - - st := &TSDBStore{ + return &TSDBStore{ logger: logger, db: db, component: component, extLset: extLset, maxBytesPerFrame: RemoteReadFrameLimit, - storeFilter: filter.AllowAllStoreFilter{}, - close: func() {}, buffers: sync.Pool{New: func() interface{} { b := make([]byte, 0, initialBufSize) return &b }}, } - - for _, option := range options { - option(st) - } - - if st.startStoreFilterUpdate { - ctx, cancel := context.WithCancel(context.Background()) - - updateFilter := func(ctx context.Context) { - vals, err := st.LabelValues(ctx, &storepb.LabelValuesRequest{ - Label: model.MetricNameLabel, - End: math.MaxInt64, - }) - if err != nil { - level.Error(logger).Log("msg", "failed to update metric names", "err", err) - return - } - - st.storeFilter.ResetAndSet(vals.Values...) - } - st.close = cancel - updateFilter(ctx) - - t := time.NewTicker(storeFilterUpdateInterval) - - go func() { - for { - select { - case <-t.C: - updateFilter(ctx) - case <-ctx.Done(): - return - } - } - }() - } - - return st } func (s *TSDBStore) SetExtLset(extLset labels.Labels) { @@ -167,11 +93,38 @@ func (s *TSDBStore) getExtLset() labels.Labels { return s.extLset } +// Info returns store information about the Prometheus instance. +func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + minTime, err := s.db.StartTime() + if err != nil { + return nil, errors.Wrap(err, "TSDB min Time") + } + + res := &storepb.InfoResponse{ + Labels: labelpb.ZLabelsFromPromLabels(s.getExtLset()), + StoreType: s.component.ToProto(), + MinTime: minTime, + MaxTime: math.MaxInt64, + } + + // Until we deprecate the single labels in the reply, we just duplicate + // them here for migration/compatibility purposes. + res.LabelSets = []labelpb.ZLabelSet{} + if len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{ + Labels: res.Labels, + }) + } + return res, nil +} + func (s *TSDBStore) LabelSet() []labelpb.ZLabelSet { - labels := labelpb.ZLabelSetsFromPromLabels(s.getExtLset()) + labels := labelpb.ZLabelsFromPromLabels(s.getExtLset()) labelSets := []labelpb.ZLabelSet{} if len(labels) > 0 { - labelSets = append(labelSets, labels...) + labelSets = append(labelSets, labelpb.ZLabelSet{ + Labels: labels, + }) } return labelSets @@ -207,10 +160,6 @@ func (s *TSDBStore) TimeRange() (int64, int64) { return minTime, math.MaxInt64 } -func (s *TSDBStore) Matches(matchers []*labels.Matcher) bool { - return s.storeFilter.Matches(matchers) -} - // CloseDelegator allows to delegate close (releasing resources used by request to the server). // This is useful when we invoke StoreAPI within another StoreAPI and results are ephemeral until copied. type CloseDelegator interface { @@ -271,12 +220,7 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_Ser defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb chunk querier series") } - hints := &storage.SelectHints{ - Start: r.MinTime, - End: r.MaxTime, - Limit: int(r.Limit), - } - set := q.Select(srv.Context(), true, hints, matchers...) + set := q.Select(srv.Context(), true, nil, matchers...) shardMatcher := r.ShardInfo.Matcher(&s.buffers) defer shardMatcher.Close() @@ -384,10 +328,7 @@ func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest } defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label names") - hints := &storage.LabelHints{ - Limit: int(r.Limit), - } - res, _, err := q.LabelNames(ctx, hints, matchers...) + res, _, err := q.LabelNames(ctx, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -455,7 +396,6 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque Start: r.Start, End: r.End, Func: "series", - Limit: int(r.Limit), } set := q.Select(ctx, false, hints, matchers...) @@ -465,10 +405,7 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque return &storepb.LabelValuesResponse{}, nil } - hints := &storage.LabelHints{ - Limit: int(r.Limit), - } - res, _, err := q.LabelValues(ctx, r.Label, hints, matchers...) + res, _, err := q.LabelValues(ctx, r.Label, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go index a84f1ca673..d6108771f4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go @@ -10,39 +10,33 @@ import ( // MergeSlices merges a set of sorted string slices into a single ones // while removing all duplicates. -// If limit is set, only the first limit results will be returned. 0 to disable. -func MergeSlices(limit int, a ...[]string) []string { +func MergeSlices(a ...[]string) []string { if len(a) == 0 { return nil } if len(a) == 1 { - return truncateToLimit(limit, a[0]) + return a[0] } l := len(a) / 2 - return mergeTwoStringSlices(limit, MergeSlices(limit, a[:l]...), MergeSlices(limit, a[l:]...)) + return mergeTwoStringSlices(MergeSlices(a[:l]...), MergeSlices(a[l:]...)) } // MergeUnsortedSlices behaves like StringSlices but input slices are validated // for sortedness and are sorted if they are not ordered yet. -// If limit is set, only the first limit results will be returned. 0 to disable. -func MergeUnsortedSlices(limit int, a ...[]string) []string { +func MergeUnsortedSlices(a ...[]string) []string { for _, s := range a { if !sort.StringsAreSorted(s) { sort.Strings(s) } } - return MergeSlices(limit, a...) + return MergeSlices(a...) } -func mergeTwoStringSlices(limit int, a, b []string) []string { - a = truncateToLimit(limit, a) - b = truncateToLimit(limit, b) - +func mergeTwoStringSlices(a, b []string) []string { maxl := len(a) if len(b) > len(a) { maxl = len(b) } - res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { @@ -62,13 +56,5 @@ func mergeTwoStringSlices(limit int, a, b []string) []string { // Append all remaining elements. res = append(res, a...) res = append(res, b...) - res = truncateToLimit(limit, res) return res } - -func truncateToLimit(limit int, a []string) []string { - if limit > 0 && len(a) > limit { - return a[:limit] - } - return a -} diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664be..ae8577ef36 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d09555506f..dbfb2a165a 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -127,8 +127,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4b361d0269..8f68dbd04a 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,52 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + ## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 ### Added @@ -3110,7 +3156,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index bb33965574..22a2e9dbd4 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -629,6 +629,10 @@ should be canceled. ## Approvers and Maintainers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + ### Approvers ### Maintainers @@ -641,13 +645,13 @@ should be canceled. ### Emeritus -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Evan Torrie](https://github.com/evantorrie), Yahoo -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a1228a2124..b8292a4fb9 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -260,7 +260,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go index b069753f7e..3f093e75fc 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go @@ -6,6 +6,7 @@ package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "sync" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -38,6 +39,8 @@ func NewTracerProvider(bridge *BridgeTracer, provider trace.TracerProvider) *Tra type wrappedTracerKey struct { name string version string + schema string + attrs attribute.Set } // Tracer creates a WrappedTracer that wraps the OpenTelemetry tracer for each call to @@ -51,6 +54,8 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := wrappedTracerKey{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if t, ok := p.tracers[key]; ok { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc9..2e7690e43a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927d..2171bee3c8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8d4b4bf089..3ee452ef72 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 709c8f70a6..c76bedfb16 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.31.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6a..ae92a42516 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index e3db438a09..a6acd8dca6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,6 +5,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" "reflect" "sync" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -472,8 +474,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) defer m.mtx.Unlock() if m.delegate != nil { - insts = unwrapInstruments(insts) - return m.delegate.RegisterCallback(f, insts...) + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } reg := ®istration{instruments: insts, function: f} @@ -487,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -515,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} + +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -526,7 +575,7 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b48..ac65262c65 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -87,6 +87,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +103,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045b..34852a47b2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d4..c02aeefdde 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7b..cf3c88e15c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 4ce757dfd6..ccc97e1b66 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5bebd..185aa7c08f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 730fb85c3e..17f883c2c8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -639,10 +639,7 @@ func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index dc1eaa8e9d..0b214d3fe9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.31.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 6d3c7b1f40..59e2481613 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.31.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index cdebdb5eb7..c04b12f6b7 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.31.0 + version: v1.32.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,12 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.53.0 + version: v0.54.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.7.0 + version: v0.8.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.10 + version: v0.0.11 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index db42e6676a..c709b72847 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego +//go:build (!arm64 && !s390x && !ppc64 && !ppc64le) || !gc || purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go similarity index 89% rename from vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go rename to vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go index 3a4287f990..bd183d9ba1 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s similarity index 76% rename from vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s rename to vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s index c672ccf698..a660b4112f 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s @@ -19,7 +19,7 @@ // The differences in this and the original implementation are // due to the calling conventions and initialization of constants. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) #include "textflag.h" @@ -36,32 +36,68 @@ // for VPERMXOR #define MASK R18 -DATA consts<>+0x00(SB)/8, $0x3320646e61707865 -DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 -DATA consts<>+0x10(SB)/8, $0x0000000000000001 -DATA consts<>+0x18(SB)/8, $0x0000000000000000 -DATA consts<>+0x20(SB)/8, $0x0000000000000004 -DATA consts<>+0x28(SB)/8, $0x0000000000000000 -DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d -DATA consts<>+0x38(SB)/8, $0x0203000106070405 -DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c -DATA consts<>+0x48(SB)/8, $0x0102030005060704 -DATA consts<>+0x50(SB)/8, $0x6170786561707865 -DATA consts<>+0x58(SB)/8, $0x6170786561707865 -DATA consts<>+0x60(SB)/8, $0x3320646e3320646e -DATA consts<>+0x68(SB)/8, $0x3320646e3320646e -DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x90(SB)/8, $0x0000000100000000 -DATA consts<>+0x98(SB)/8, $0x0000000300000002 -DATA consts<>+0xa0(SB)/8, $0x5566774411223300 -DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 -DATA consts<>+0xb0(SB)/8, $0x6677445522330011 -DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +DATA consts<>+0x00(SB)/4, $0x61707865 +DATA consts<>+0x04(SB)/4, $0x3320646e +DATA consts<>+0x08(SB)/4, $0x79622d32 +DATA consts<>+0x0c(SB)/4, $0x6b206574 +DATA consts<>+0x10(SB)/4, $0x00000001 +DATA consts<>+0x14(SB)/4, $0x00000000 +DATA consts<>+0x18(SB)/4, $0x00000000 +DATA consts<>+0x1c(SB)/4, $0x00000000 +DATA consts<>+0x20(SB)/4, $0x00000004 +DATA consts<>+0x24(SB)/4, $0x00000000 +DATA consts<>+0x28(SB)/4, $0x00000000 +DATA consts<>+0x2c(SB)/4, $0x00000000 +DATA consts<>+0x30(SB)/4, $0x0e0f0c0d +DATA consts<>+0x34(SB)/4, $0x0a0b0809 +DATA consts<>+0x38(SB)/4, $0x06070405 +DATA consts<>+0x3c(SB)/4, $0x02030001 +DATA consts<>+0x40(SB)/4, $0x0d0e0f0c +DATA consts<>+0x44(SB)/4, $0x090a0b08 +DATA consts<>+0x48(SB)/4, $0x05060704 +DATA consts<>+0x4c(SB)/4, $0x01020300 +DATA consts<>+0x50(SB)/4, $0x61707865 +DATA consts<>+0x54(SB)/4, $0x61707865 +DATA consts<>+0x58(SB)/4, $0x61707865 +DATA consts<>+0x5c(SB)/4, $0x61707865 +DATA consts<>+0x60(SB)/4, $0x3320646e +DATA consts<>+0x64(SB)/4, $0x3320646e +DATA consts<>+0x68(SB)/4, $0x3320646e +DATA consts<>+0x6c(SB)/4, $0x3320646e +DATA consts<>+0x70(SB)/4, $0x79622d32 +DATA consts<>+0x74(SB)/4, $0x79622d32 +DATA consts<>+0x78(SB)/4, $0x79622d32 +DATA consts<>+0x7c(SB)/4, $0x79622d32 +DATA consts<>+0x80(SB)/4, $0x6b206574 +DATA consts<>+0x84(SB)/4, $0x6b206574 +DATA consts<>+0x88(SB)/4, $0x6b206574 +DATA consts<>+0x8c(SB)/4, $0x6b206574 +DATA consts<>+0x90(SB)/4, $0x00000000 +DATA consts<>+0x94(SB)/4, $0x00000001 +DATA consts<>+0x98(SB)/4, $0x00000002 +DATA consts<>+0x9c(SB)/4, $0x00000003 +DATA consts<>+0xa0(SB)/4, $0x11223300 +DATA consts<>+0xa4(SB)/4, $0x55667744 +DATA consts<>+0xa8(SB)/4, $0x99aabb88 +DATA consts<>+0xac(SB)/4, $0xddeeffcc +DATA consts<>+0xb0(SB)/4, $0x22330011 +DATA consts<>+0xb4(SB)/4, $0x66774455 +DATA consts<>+0xb8(SB)/4, $0xaabb8899 +DATA consts<>+0xbc(SB)/4, $0xeeffccdd GLOBL consts<>(SB), RODATA, $0xc0 +#ifdef GOARCH_ppc64 +#define BE_XXBRW_INIT() \ + LVSL (R0)(R0), V24 \ + VSPLTISB $3, V25 \ + VXOR V24, V25, V24 \ + +#define BE_XXBRW(vr) VPERM vr, vr, V24, vr +#else +#define BE_XXBRW_INIT() +#define BE_XXBRW(vr) +#endif + //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD out+0(FP), OUT @@ -94,6 +130,8 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // Clear V27 VXOR V27, V27, V27 + BE_XXBRW_INIT() + // V28 LXVW4X (CONSTBASE)(R11), VS60 @@ -299,6 +337,11 @@ loop_vsx: VADDUWM V8, V18, V8 VADDUWM V12, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -327,6 +370,11 @@ loop_vsx: VADDUWM V9, V18, V8 VADDUWM V13, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -334,8 +382,8 @@ loop_vsx: LXVW4X (INP)(R8), VS60 LXVW4X (INP)(R9), VS61 LXVW4X (INP)(R10), VS62 - VXOR V27, V0, V27 + VXOR V27, V0, V27 VXOR V28, V4, V28 VXOR V29, V8, V29 VXOR V30, V12, V30 @@ -354,6 +402,11 @@ loop_vsx: VADDUWM V10, V18, V8 VADDUWM V14, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -381,6 +434,11 @@ loop_vsx: VADDUWM V11, V18, V8 VADDUWM V15, V19, V12 + BE_XXBRW(V0) + BE_XXBRW(V4) + BE_XXBRW(V8) + BE_XXBRW(V12) + CMPU LEN, $64 BLT tail_vsx @@ -408,9 +466,9 @@ loop_vsx: done_vsx: // Increment counter by number of 64 byte blocks - MOVD (CNT), R14 + MOVWZ (CNT), R14 ADD BLOCKS, R14 - MOVD R14, (CNT) + MOVWZ R14, (CNT) RET tail_vsx: diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index 333da285b3..bd896bdc76 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego +//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go similarity index 95% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go index 4aec4874b5..1a1679aaad 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s similarity index 89% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s index b3c1699bff..6899a1dabc 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s @@ -2,15 +2,25 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) #include "textflag.h" // This was ported from the amd64 implementation. +#ifdef GOARCH_ppc64le +#define LE_MOVD MOVD +#define LE_MOVWZ MOVWZ +#define LE_MOVHZ MOVHZ +#else +#define LE_MOVD MOVDBR +#define LE_MOVWZ MOVWBR +#define LE_MOVHZ MOVHBR +#endif + #define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ + LE_MOVD (msg)( R0), t0; \ + LE_MOVD (msg)(R24), t1; \ MOVD $1, t2; \ ADDC t0, h0, h0; \ ADDE t1, h1, h1; \ @@ -50,10 +60,6 @@ ADDE t3, h1, h1; \ ADDZE h2 -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - // func update(state *[7]uint64, msg []byte) TEXT ·update(SB), $0-32 MOVD state+0(FP), R3 @@ -66,6 +72,8 @@ TEXT ·update(SB), $0-32 MOVD 24(R3), R11 // r0 MOVD 32(R3), R12 // r1 + MOVD $8, R24 + CMP R5, $16 BLT bytes_between_0_and_15 @@ -94,7 +102,7 @@ flush_buffer: // Greater than 8 -- load the rightmost remaining bytes in msg // and put into R17 (h1) - MOVD (R4)(R21), R17 + LE_MOVD (R4)(R21), R17 MOVD $16, R22 // Find the offset to those bytes @@ -118,7 +126,7 @@ just1: BLT less8 // Exactly 8 - MOVD (R4), R16 + LE_MOVD (R4), R16 CMP R17, $0 @@ -133,7 +141,7 @@ less8: MOVD $0, R22 // shift count CMP R5, $4 BLT less4 - MOVWZ (R4), R16 + LE_MOVWZ (R4), R16 ADD $4, R4 ADD $-4, R5 MOVD $32, R22 @@ -141,7 +149,7 @@ less8: less4: CMP R5, $2 BLT less2 - MOVHZ (R4), R21 + LE_MOVHZ (R4), R21 SLD R22, R21, R21 OR R16, R21, R16 ADD $16, R22 diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index 780968d6c1..e81b73e6a7 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -8,8 +8,8 @@ package http2 import ( "context" - "crypto/tls" "errors" + "net" "net/http" "sync" ) @@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) { // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { @@ -194,8 +194,8 @@ type addConnCall struct { err error } -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) +func (c *addConnCall) run(t *Transport, key string, nc net.Conn) { + cc, err := t.NewClientConn(nc) p := c.p p.mu.Lock() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 617b4a4762..832414b450 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -306,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -323,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -2880,6 +2899,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 0c5f64aa8b..f5968f4407 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -202,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -281,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -293,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -324,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -364,6 +397,14 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. @@ -420,12 +461,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -530,6 +571,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -562,7 +605,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -573,7 +623,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -598,6 +648,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -616,9 +682,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -757,6 +824,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), } var group synctestGroupInterface if t.transportTestHooks != nil { @@ -960,7 +1028,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -992,16 +1060,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1014,7 +1104,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1578,6 +1668,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1602,16 +1693,40 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1633,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -2180,10 +2300,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2243,7 +2363,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2267,6 +2386,24 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2494,15 +2631,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -3046,6 +3202,11 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3068,13 +3229,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3228,7 +3396,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 0000000000..b2de211613 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go index cebde7634f..3c9576e2d8 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go index cebde7634f..3c9576e2d8 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 0000000000..ec2acfe540 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 0000000000..b838cb9e95 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb16..32a44514e2 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -6,10 +6,10 @@ package cpu -// cpuid is implemented in cpu_x86.s for gc compiler +// cpuid is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s similarity index 94% rename from vendor/golang.org/x/sys/cpu/cpu_x86.s rename to vendor/golang.org/x/sys/cpu/cpu_gc_x86.s index 7d7ba33efb..ce208ce6d6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24 RET // func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB),NOSPLIT,$0-8 +TEXT ·xgetbv(SB), NOSPLIT, $0-8 MOVL $0, CX XGETBV MOVL AX, eax+0(FP) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9f9..170d21ddfd 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) { gccgoXgetbv(&a, &d) return a, d } - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 08f35ea177..f1caf0f78e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -110,7 +110,6 @@ func doinit() { ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) ARM64.HasDIT = isSet(hwCap, hwcap_DIT) - // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 0000000000..a0fd7e2f75 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c5a..600a680786 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -92,10 +92,8 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 0000000000..4d0888b0c0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab8..7ca4fa12aa 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index ac54ecaba0..6ab02b6c31 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index f08abd434f..230a94549a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1d..7bf5c04bb0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index de3b462489..ccba391c9f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2625,6 +2625,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8aa6d77c01..0c00cb3f3a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index da428f4253..dfb364554d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index bf45bfec78..d46dcf78ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 71c67162b7..3af3248a7f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -240,6 +240,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9476628fa0..292bcf0283 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -238,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9e85f3cf0..782b7110fa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a48b68a764..84973fd927 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ea00e8522a..6d9cbc3b27 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 91c6468717..5f9fedbce0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 8cbf38d639..bb0026ee0c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index a2df734191..46120db5c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 2479137923..5c951634fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d265f146ee..11a84d5af2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3f2d644396..f78c4617ca 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 5d8b727a1c..aeb777c344 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -239,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index af30da5578..5cc1e8eb2f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 3a69e45496..8daaf3faf4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1752,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1796,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1825,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1857,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1925,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1977,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Rsv [3]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af468..2e5d5a4435 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a3143..4510bfc3f5 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c9..51311e205f 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2203,6 +2203,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc01..6f5252880c 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) diff --git a/vendor/modules.txt b/vendor/modules.txt index 2eb463c967..c75722ee9c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -312,9 +312,6 @@ github.com/davecgh/go-spew/spew # github.com/dennwc/varint v1.0.0 ## explicit; go 1.12 github.com/dennwc/varint -# github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 -## explicit -github.com/dgryski/go-metro # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous @@ -545,7 +542,7 @@ github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 ## explicit; go 1.19 github.com/grpc-ecosystem/go-grpc-middleware/v2 -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 ## explicit; go 1.21 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -674,7 +671,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.79 +# github.com/minio/minio-go/v7 v7.0.80 ## explicit; go 1.22 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/cors @@ -721,8 +718,8 @@ github.com/oklog/run # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e -## explicit; go 1.11 +# github.com/opentracing-contrib/go-grpc v0.1.0 +## explicit; go 1.22.7 github.com/opentracing-contrib/go-grpc # github.com/opentracing-contrib/go-stdlib v1.0.0 ## explicit; go 1.14 @@ -837,7 +834,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.55.0 +# github.com/prometheus/prometheus v0.55.1 ## explicit; go 1.22.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -913,9 +910,6 @@ github.com/sean-/seed # github.com/segmentio/fasthash v1.0.3 ## explicit; go 1.11 github.com/segmentio/fasthash/fnv1a -# github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 -## explicit; go 1.15 -github.com/seiflotfy/cuckoofilter # github.com/sercand/kuberesolver/v4 v4.0.0 => github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v4 @@ -982,8 +976,8 @@ github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/ringbuffer github.com/thanos-io/promql-engine/storage github.com/thanos-io/promql-engine/storage/prometheus -# github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290 -## explicit; go 1.23.0 +# github.com/thanos-io/thanos v0.36.1 +## explicit; go 1.21 github.com/thanos-io/thanos/pkg/api/query/querypb github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -1007,7 +1001,6 @@ github.com/thanos-io/thanos/pkg/extkingpin github.com/thanos-io/thanos/pkg/extprom github.com/thanos-io/thanos/pkg/extprom/http github.com/thanos-io/thanos/pkg/extpromql -github.com/thanos-io/thanos/pkg/filter github.com/thanos-io/thanos/pkg/gate github.com/thanos-io/thanos/pkg/info/infopb github.com/thanos-io/thanos/pkg/losertree @@ -1144,7 +1137,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector/pdata v1.18.0 +# go.opentelemetry.io/collector/pdata v1.19.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data @@ -1179,7 +1172,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvut # go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/autoprop -# go.opentelemetry.io/contrib/propagators/aws v1.31.0 +# go.opentelemetry.io/contrib/propagators/aws v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/contrib/propagators/aws/xray # go.opentelemetry.io/contrib/propagators/b3 v1.29.0 @@ -1191,7 +1184,7 @@ go.opentelemetry.io/contrib/propagators/jaeger # go.opentelemetry.io/contrib/propagators/ot v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.31.0 +# go.opentelemetry.io/otel v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1208,27 +1201,27 @@ go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/bridge/opentracing v1.31.0 +# go.opentelemetry.io/otel/bridge/opentracing v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.31.0 +# go.opentelemetry.io/otel/metric v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.31.0 +# go.opentelemetry.io/otel/sdk v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -1237,7 +1230,7 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.31.0 +# go.opentelemetry.io/otel/trace v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1273,7 +1266,7 @@ go4.org/intern # go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 ## explicit; go 1.11 go4.org/unsafe/assume-no-moving-gc -# golang.org/x/crypto v0.28.0 +# golang.org/x/crypto v0.29.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1296,7 +1289,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.21.0 ## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.30.0 +# golang.org/x/net v0.31.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -1327,17 +1320,17 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.8.0 +# golang.org/x/sync v0.9.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.26.0 +# golang.org/x/sys v0.27.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.19.0 +# golang.org/x/text v0.20.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/internal @@ -1350,7 +1343,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.7.0 +# golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.24.0 @@ -1398,17 +1391,17 @@ google.golang.org/api/transport/http/internal/propagation ## explicit; go 1.21 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 +# google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.67.1 => google.golang.org/grpc v1.65.0 +# google.golang.org/grpc v1.68.0 => google.golang.org/grpc v1.65.0 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes