diff --git a/go.mod b/go.mod index acdf36a5f8..780dcd1915 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/lib/pq v1.10.9 - github.com/minio/minio-go/v7 v7.0.79 + github.com/minio/minio-go/v7 v7.0.80 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -46,14 +46,14 @@ require ( github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.60.1 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.55.0 + github.com/prometheus/prometheus v0.55.1 github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.9.0 github.com/thanos-io/objstore v0.0.0-20240913074259-63feed0da069 github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31 - github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290 + github.com/thanos-io/thanos v0.36.1 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.16 @@ -68,8 +68,8 @@ require ( go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/atomic v1.11.0 golang.org/x/net v0.30.0 - golang.org/x/sync v0.8.0 - golang.org/x/time v0.7.0 + golang.org/x/sync v0.9.0 + golang.org/x/time v0.8.0 google.golang.org/grpc v1.67.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -81,7 +81,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/google/go-cmp v0.6.0 github.com/sercand/kuberesolver/v4 v4.0.0 - go.opentelemetry.io/collector/pdata v1.18.0 + go.opentelemetry.io/collector/pdata v1.19.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 google.golang.org/protobuf v1.35.1 ) @@ -121,7 +121,6 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.5.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect @@ -200,7 +199,6 @@ require ( github.com/rs/cors v1.11.0 // indirect github.com/rs/xid v1.6.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 // indirect github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index a1b95fa256..8cb8164eb2 100644 --- a/go.sum +++ b/go.sum @@ -953,8 +953,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 h1:BS21ZUJ/B5X2UVUbczfmdWH7GapPWAhxcMsDnjJTU1E= -github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.4.3 h1:wquqUxAFdcUgabAVLvSCOKOlag5cIZuaOjYIBOWdsR0= @@ -1459,8 +1457,8 @@ github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcs github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.79 h1:SvJZpj3hT0RN+4KiuX/FxLfPZdsuegy6d/2PiemM/bM= -github.com/minio/minio-go/v7 v7.0.79/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= +github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -1505,8 +1503,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -1600,8 +1598,8 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.55.0 h1:ITinOi1zr3HemoVWHf679PfRRmpxZOcR4nEvsze6eB0= -github.com/prometheus/prometheus v0.55.0/go.mod h1:GGS7QlWKCqCbcEzWsVahYIfQwiGhcExkarHyLJTsv6I= +github.com/prometheus/prometheus v0.55.1 h1:+NM9V/h4A+wRkOyQzGewzgPPgq/iX2LUQoISNvmjZmI= +github.com/prometheus/prometheus v0.55.1/go.mod h1:GGS7QlWKCqCbcEzWsVahYIfQwiGhcExkarHyLJTsv6I= github.com/redis/rueidis v1.0.45-alpha.1 h1:69Bu1l7gVC/qDYuGGwMwGg2rjOjSyxESol/Zila62gY= github.com/redis/rueidis v1.0.45-alpha.1/go.mod h1:q7BfhDaPt7xxwy2nv2RqQO12/mmHflDjebpcNwWFjms= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1626,8 +1624,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 h1:emzAzMZ1L9iaKCTxdy3Em8Wv4ChIAGnfiz18Cda70g4= -github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771/go.mod h1:bR6DqgcAl1zTcOX8/pE2Qkj9XO00eCNqmKb7lXP8EAg= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= @@ -1688,8 +1684,8 @@ github.com/thanos-io/objstore v0.0.0-20240913074259-63feed0da069 h1:TUPZ6euAh8I6 github.com/thanos-io/objstore v0.0.0-20240913074259-63feed0da069/go.mod h1:Cba80S8NbVBBdyZKzra7San/jXvpAxArbpFymWzIZhg= github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31 h1:xPaP58g+3EPohdw4cv+6jv5+LcX6LynhHvQcYwTAMxQ= github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31/go.mod h1:wx0JlRZtsB2S10JYUgeg5GqLfMxw31SzArP+28yyE00= -github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290 h1:d58OLbcIC6F3TviRFK85Ucdxbs/3cPI1fcLRBWiNThA= -github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290/go.mod h1:kqF1rQspIAL+rktXL3OSKfiDjJmsCRezQblsR56degY= +github.com/thanos-io/thanos v0.36.1 h1:NsUBsWkJcZ6Uo2VuEr06mZZ9YNMLGVA2sIGVu+LsrNU= +github.com/thanos-io/thanos v0.36.1/go.mod h1:f7LiW4+/xvV5+gkseMuVbQnrbFTFnCPv5+X1M6mXkn4= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -1740,8 +1736,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg= -go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= +go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE= +go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= @@ -2015,8 +2011,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2175,8 +2171,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2563,7 +2559,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/vendor/github.com/dgryski/go-metro/LICENSE b/vendor/github.com/dgryski/go-metro/LICENSE deleted file mode 100644 index 6243b617cf..0000000000 --- a/vendor/github.com/dgryski/go-metro/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This package is a mechanical translation of the reference C++ code for -MetroHash, available at https://github.com/jandrewrogers/MetroHash - -The MIT License (MIT) - -Copyright (c) 2016 Damian Gryski - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/dgryski/go-metro/README b/vendor/github.com/dgryski/go-metro/README deleted file mode 100644 index 5ecebb3853..0000000000 --- a/vendor/github.com/dgryski/go-metro/README +++ /dev/null @@ -1,6 +0,0 @@ -MetroHash - -This package is a mechanical translation of the reference C++ code for -MetroHash, available at https://github.com/jandrewrogers/MetroHash - -I claim no additional copyright over the original implementation. diff --git a/vendor/github.com/dgryski/go-metro/metro.py b/vendor/github.com/dgryski/go-metro/metro.py deleted file mode 100644 index 8dd4d26e6a..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro.py +++ /dev/null @@ -1,199 +0,0 @@ -import peachpy.x86_64 - -k0 = 0xD6D018F5 -k1 = 0xA2AA033B -k2 = 0x62992FC1 -k3 = 0x30BC5B29 - -def advance(p,l,c): - ADD(p,c) - SUB(l,c) - -def imul(r,k): - t = GeneralPurposeRegister64() - MOV(t, k) - IMUL(r, t) - -def update32(v, p,idx, k, vadd): - r = GeneralPurposeRegister64() - MOV(r, [p + idx]) - imul(r, k) - ADD(v, r) - ROR(v, 29) - ADD(v, vadd) - -def final32(v, regs, keys): - r = GeneralPurposeRegister64() - MOV(r, v[regs[1]]) - ADD(r, v[regs[2]]) - imul(r, keys[0]) - ADD(r, v[regs[3]]) - ROR(r, 37) - imul(r, keys[1]) - XOR(v[regs[0]], r) - -seed = Argument(uint64_t) -buffer_base = Argument(ptr()) -buffer_len = Argument(int64_t) -buffer_cap = Argument(int64_t) - -def makeHash(name, args): - with Function(name, args, uint64_t) as function: - - reg_ptr = GeneralPurposeRegister64() - reg_ptr_len = GeneralPurposeRegister64() - reg_hash = GeneralPurposeRegister64() - - LOAD.ARGUMENT(reg_hash, seed) - LOAD.ARGUMENT(reg_ptr, buffer_base) - LOAD.ARGUMENT(reg_ptr_len, buffer_len) - - imul(reg_hash, k0) - r = GeneralPurposeRegister64() - MOV(r, k2*k0) - ADD(reg_hash, r) - - after32 = Label("after32") - - CMP(reg_ptr_len, 32) - JL(after32) - v = [GeneralPurposeRegister64() for _ in range(4)] - for i in range(4): - MOV(v[i], reg_hash) - - with Loop() as loop: - update32(v[0], reg_ptr, 0, k0, v[2]) - update32(v[1], reg_ptr, 8, k1, v[3]) - update32(v[2], reg_ptr, 16, k2, v[0]) - update32(v[3], reg_ptr, 24, k3, v[1]) - - ADD(reg_ptr, 32) - SUB(reg_ptr_len, 32) - CMP(reg_ptr_len, 32) - JGE(loop.begin) - - final32(v, [2,0,3,1], [k0, k1]) - final32(v, [3,1,2,0], [k1, k0]) - final32(v, [0,0,2,3], [k0, k1]) - final32(v, [1,1,3,2], [k1, k0]) - - XOR(v[0], v[1]) - ADD(reg_hash, v[0]) - - LABEL(after32) - - after16 = Label("after16") - CMP(reg_ptr_len, 16) - JL(after16) - - for i in range(2): - MOV(v[i], [reg_ptr]) - imul(v[i], k2) - ADD(v[i], reg_hash) - - advance(reg_ptr, reg_ptr_len, 8) - - ROR(v[i], 29) - imul(v[i], k3) - - r = GeneralPurposeRegister64() - MOV(r, v[0]) - imul(r, k0) - ROR(r, 21) - ADD(r, v[1]) - XOR(v[0], r) - - MOV(r, v[1]) - imul(r, k3) - ROR(r, 21) - ADD(r, v[0]) - XOR(v[1], r) - - ADD(reg_hash, v[1]) - - LABEL(after16) - - after8 = Label("after8") - CMP(reg_ptr_len, 8) - JL(after8) - - r = GeneralPurposeRegister64() - MOV(r, [reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 8) - - MOV(r, reg_hash) - ROR(r, 55) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after8) - - after4 = Label("after4") - CMP(reg_ptr_len, 4) - JL(after4) - - r = GeneralPurposeRegister64() - XOR(r, r) - MOV(r.as_dword, dword[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 4) - - MOV(r, reg_hash) - ROR(r, 26) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after4) - - after2 = Label("after2") - CMP(reg_ptr_len, 2) - JL(after2) - - r = GeneralPurposeRegister64() - XOR(r,r) - MOV(r.as_word, word[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 2) - - MOV(r, reg_hash) - ROR(r, 48) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after2) - - after1 = Label("after1") - CMP(reg_ptr_len, 1) - JL(after1) - - r = GeneralPurposeRegister64() - MOVZX(r, byte[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - - MOV(r, reg_hash) - ROR(r, 37) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after1) - - r = GeneralPurposeRegister64() - MOV(r, reg_hash) - ROR(r, 28) - XOR(reg_hash, r) - - imul(reg_hash, k0) - - MOV(r, reg_hash) - ROR(r, 29) - XOR(reg_hash, r) - - RETURN(reg_hash) - -makeHash("Hash64", (buffer_base, buffer_len, buffer_cap, seed)) -makeHash("Hash64Str", (buffer_base, buffer_len, seed)) \ No newline at end of file diff --git a/vendor/github.com/dgryski/go-metro/metro128.go b/vendor/github.com/dgryski/go-metro/metro128.go deleted file mode 100644 index e8dd8ddbf5..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro128.go +++ /dev/null @@ -1,94 +0,0 @@ -package metro - -import "encoding/binary" - -func rotate_right(v uint64, k uint) uint64 { - return (v >> k) | (v << (64 - k)) -} - -func Hash128(buffer []byte, seed uint64) (uint64, uint64) { - - const ( - k0 = 0xC83A91E1 - k1 = 0x8648DBDB - k2 = 0x7BDEC03B - k3 = 0x2F5870A5 - ) - - ptr := buffer - - var v [4]uint64 - - v[0] = (seed - k0) * k3 - v[1] = (seed + k1) * k2 - - if len(ptr) >= 32 { - v[2] = (seed + k0) * k2 - v[3] = (seed - k1) * k3 - - for len(ptr) >= 32 { - v[0] += binary.LittleEndian.Uint64(ptr) * k0 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 29) + v[2] - v[1] += binary.LittleEndian.Uint64(ptr) * k1 - ptr = ptr[8:] - v[1] = rotate_right(v[1], 29) + v[3] - v[2] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[2] = rotate_right(v[2], 29) + v[0] - v[3] += binary.LittleEndian.Uint64(ptr) * k3 - ptr = ptr[8:] - v[3] = rotate_right(v[3], 29) + v[1] - } - - v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 21) * k1 - v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 21) * k0 - v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 21) * k1 - v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 21) * k0 - } - - if len(ptr) >= 16 { - v[0] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 33) * k3 - v[1] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[1] = rotate_right(v[1], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 45) * k1 - v[1] ^= rotate_right((v[1]*k3)+v[0], 45) * k0 - } - - if len(ptr) >= 8 { - v[0] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 27) * k1 - } - - if len(ptr) >= 4 { - v[1] += uint64(binary.LittleEndian.Uint32(ptr)) * k2 - ptr = ptr[4:] - v[1] = rotate_right(v[1], 33) * k3 - v[1] ^= rotate_right((v[1]*k3)+v[0], 46) * k0 - } - - if len(ptr) >= 2 { - v[0] += uint64(binary.LittleEndian.Uint16(ptr)) * k2 - ptr = ptr[2:] - v[0] = rotate_right(v[0], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 22) * k1 - } - - if len(ptr) >= 1 { - v[1] += uint64(ptr[0]) * k2 - v[1] = rotate_right(v[1], 33) * k3 - v[1] ^= rotate_right((v[1]*k3)+v[0], 58) * k0 - } - - v[0] += rotate_right((v[0]*k0)+v[1], 13) - v[1] += rotate_right((v[1]*k1)+v[0], 37) - v[0] += rotate_right((v[0]*k2)+v[1], 13) - v[1] += rotate_right((v[1]*k3)+v[0], 37) - - return v[0], v[1] -} diff --git a/vendor/github.com/dgryski/go-metro/metro64.go b/vendor/github.com/dgryski/go-metro/metro64.go deleted file mode 100644 index 1c04228a0b..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro64.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build noasm !amd64 gccgo - -package metro - -import ( - "encoding/binary" - "math/bits" -) - -func Hash64(buffer []byte, seed uint64) uint64 { - - const ( - k0 = 0xD6D018F5 - k1 = 0xA2AA033B - k2 = 0x62992FC1 - k3 = 0x30BC5B29 - ) - - ptr := buffer - - hash := (seed + k2) * k0 - - if len(ptr) >= 32 { - v0, v1, v2, v3 := hash, hash, hash, hash - - for len(ptr) >= 32 { - v0 += binary.LittleEndian.Uint64(ptr[:8]) * k0 - v0 = bits.RotateLeft64(v0, -29) + v2 - v1 += binary.LittleEndian.Uint64(ptr[8:16]) * k1 - v1 = bits.RotateLeft64(v1, -29) + v3 - v2 += binary.LittleEndian.Uint64(ptr[16:24]) * k2 - v2 = bits.RotateLeft64(v2, -29) + v0 - v3 += binary.LittleEndian.Uint64(ptr[24:32]) * k3 - v3 = bits.RotateLeft64(v3, -29) + v1 - ptr = ptr[32:] - } - - v2 ^= bits.RotateLeft64(((v0+v3)*k0)+v1, -37) * k1 - v3 ^= bits.RotateLeft64(((v1+v2)*k1)+v0, -37) * k0 - v0 ^= bits.RotateLeft64(((v0+v2)*k0)+v3, -37) * k1 - v1 ^= bits.RotateLeft64(((v1+v3)*k1)+v2, -37) * k0 - hash += v0 ^ v1 - } - - if len(ptr) >= 16 { - v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2) - v0 = bits.RotateLeft64(v0, -29) * k3 - v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2) - v1 = bits.RotateLeft64(v1, -29) * k3 - v0 ^= bits.RotateLeft64(v0*k0, -21) + v1 - v1 ^= bits.RotateLeft64(v1*k3, -21) + v0 - hash += v1 - ptr = ptr[16:] - } - - if len(ptr) >= 8 { - hash += binary.LittleEndian.Uint64(ptr[:8]) * k3 - ptr = ptr[8:] - hash ^= bits.RotateLeft64(hash, -55) * k1 - } - - if len(ptr) >= 4 { - hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3 - hash ^= bits.RotateLeft64(hash, -26) * k1 - ptr = ptr[4:] - } - - if len(ptr) >= 2 { - hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3 - ptr = ptr[2:] - hash ^= bits.RotateLeft64(hash, -48) * k1 - } - - if len(ptr) >= 1 { - hash += uint64(ptr[0]) * k3 - hash ^= bits.RotateLeft64(hash, -37) * k1 - } - - hash ^= bits.RotateLeft64(hash, -28) - hash *= k0 - hash ^= bits.RotateLeft64(hash, -29) - - return hash -} - -func Hash64Str(buffer string, seed uint64) uint64 { - return Hash64([]byte(buffer), seed) -} diff --git a/vendor/github.com/dgryski/go-metro/metro_amd64.s b/vendor/github.com/dgryski/go-metro/metro_amd64.s deleted file mode 100644 index 7fa4730a48..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro_amd64.s +++ /dev/null @@ -1,372 +0,0 @@ -// +build !noasm -// +build !gccgo - -// Generated by PeachPy 0.2.0 from metro.py - -// func Hash64(buffer_base uintptr, buffer_len int64, buffer_cap int64, seed uint64) uint64 -TEXT ·Hash64(SB),4,$0-40 - MOVQ seed+24(FP), AX - MOVQ buffer_base+0(FP), BX - MOVQ buffer_len+8(FP), CX - MOVQ $3603962101, DX - IMULQ DX, AX - MOVQ $5961697176435608501, DX - ADDQ DX, AX - CMPQ CX, $32 - JLT after32 - MOVQ AX, DX - MOVQ AX, DI - MOVQ AX, SI - MOVQ AX, BP -loop_begin: - MOVQ 0(BX), R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ R8, DX - RORQ $29, DX - ADDQ SI, DX - MOVQ 8(BX), R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ R8, DI - RORQ $29, DI - ADDQ BP, DI - MOVQ 16(BX), R8 - MOVQ $1654206401, R9 - IMULQ R9, R8 - ADDQ R8, SI - RORQ $29, SI - ADDQ DX, SI - MOVQ 24(BX), R8 - MOVQ $817650473, R9 - IMULQ R9, R8 - ADDQ R8, BP - RORQ $29, BP - ADDQ DI, BP - ADDQ $32, BX - SUBQ $32, CX - CMPQ CX, $32 - JGE loop_begin - MOVQ DX, R8 - ADDQ BP, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ DI, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, SI - MOVQ DI, R8 - ADDQ SI, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ DX, R8 - RORQ $37, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - XORQ R8, BP - MOVQ DX, R8 - ADDQ SI, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ BP, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, DX - MOVQ DI, R8 - ADDQ BP, R8 - MOVQ $2729050939, BP - IMULQ BP, R8 - ADDQ SI, R8 - RORQ $37, R8 - MOVQ $3603962101, SI - IMULQ SI, R8 - XORQ R8, DI - XORQ DI, DX - ADDQ DX, AX -after32: - CMPQ CX, $16 - JLT after16 - MOVQ 0(BX), DX - MOVQ $1654206401, DI - IMULQ DI, DX - ADDQ AX, DX - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DX - MOVQ $817650473, DI - IMULQ DI, DX - MOVQ 0(BX), DI - MOVQ $1654206401, SI - IMULQ SI, DI - ADDQ AX, DI - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DI - MOVQ $817650473, SI - IMULQ SI, DI - MOVQ DX, SI - MOVQ $3603962101, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DI, SI - XORQ SI, DX - MOVQ DI, SI - MOVQ $817650473, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DX, SI - XORQ SI, DI - ADDQ DI, AX -after16: - CMPQ CX, $8 - JLT after8 - MOVQ 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $8, BX - SUBQ $8, CX - MOVQ AX, DX - RORQ $55, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after8: - CMPQ CX, $4 - JLT after4 - XORQ DX, DX - MOVL 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $4, BX - SUBQ $4, CX - MOVQ AX, DX - RORQ $26, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after4: - CMPQ CX, $2 - JLT after2 - XORQ DX, DX - MOVW 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $2, BX - SUBQ $2, CX - MOVQ AX, DX - RORQ $48, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after2: - CMPQ CX, $1 - JLT after1 - MOVBQZX 0(BX), BX - MOVQ $817650473, CX - IMULQ CX, BX - ADDQ BX, AX - MOVQ AX, BX - RORQ $37, BX - MOVQ $2729050939, CX - IMULQ CX, BX - XORQ BX, AX -after1: - MOVQ AX, BX - RORQ $28, BX - XORQ BX, AX - MOVQ $3603962101, BX - IMULQ BX, AX - MOVQ AX, BX - RORQ $29, BX - XORQ BX, AX - MOVQ AX, ret+32(FP) - RET - -// func Hash64Str(buffer_base uintptr, buffer_len int64, seed uint64) uint64 -TEXT ·Hash64Str(SB),4,$0-32 - MOVQ seed+16(FP), AX - MOVQ buffer_base+0(FP), BX - MOVQ buffer_len+8(FP), CX - MOVQ $3603962101, DX - IMULQ DX, AX - MOVQ $5961697176435608501, DX - ADDQ DX, AX - CMPQ CX, $32 - JLT after32 - MOVQ AX, DX - MOVQ AX, DI - MOVQ AX, SI - MOVQ AX, BP -loop_begin: - MOVQ 0(BX), R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ R8, DX - RORQ $29, DX - ADDQ SI, DX - MOVQ 8(BX), R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ R8, DI - RORQ $29, DI - ADDQ BP, DI - MOVQ 16(BX), R8 - MOVQ $1654206401, R9 - IMULQ R9, R8 - ADDQ R8, SI - RORQ $29, SI - ADDQ DX, SI - MOVQ 24(BX), R8 - MOVQ $817650473, R9 - IMULQ R9, R8 - ADDQ R8, BP - RORQ $29, BP - ADDQ DI, BP - ADDQ $32, BX - SUBQ $32, CX - CMPQ CX, $32 - JGE loop_begin - MOVQ DX, R8 - ADDQ BP, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ DI, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, SI - MOVQ DI, R8 - ADDQ SI, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ DX, R8 - RORQ $37, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - XORQ R8, BP - MOVQ DX, R8 - ADDQ SI, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ BP, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, DX - MOVQ DI, R8 - ADDQ BP, R8 - MOVQ $2729050939, BP - IMULQ BP, R8 - ADDQ SI, R8 - RORQ $37, R8 - MOVQ $3603962101, SI - IMULQ SI, R8 - XORQ R8, DI - XORQ DI, DX - ADDQ DX, AX -after32: - CMPQ CX, $16 - JLT after16 - MOVQ 0(BX), DX - MOVQ $1654206401, DI - IMULQ DI, DX - ADDQ AX, DX - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DX - MOVQ $817650473, DI - IMULQ DI, DX - MOVQ 0(BX), DI - MOVQ $1654206401, SI - IMULQ SI, DI - ADDQ AX, DI - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DI - MOVQ $817650473, SI - IMULQ SI, DI - MOVQ DX, SI - MOVQ $3603962101, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DI, SI - XORQ SI, DX - MOVQ DI, SI - MOVQ $817650473, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DX, SI - XORQ SI, DI - ADDQ DI, AX -after16: - CMPQ CX, $8 - JLT after8 - MOVQ 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $8, BX - SUBQ $8, CX - MOVQ AX, DX - RORQ $55, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after8: - CMPQ CX, $4 - JLT after4 - XORQ DX, DX - MOVL 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $4, BX - SUBQ $4, CX - MOVQ AX, DX - RORQ $26, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after4: - CMPQ CX, $2 - JLT after2 - XORQ DX, DX - MOVW 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $2, BX - SUBQ $2, CX - MOVQ AX, DX - RORQ $48, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after2: - CMPQ CX, $1 - JLT after1 - MOVBQZX 0(BX), BX - MOVQ $817650473, CX - IMULQ CX, BX - ADDQ BX, AX - MOVQ AX, BX - RORQ $37, BX - MOVQ $2729050939, CX - IMULQ CX, BX - XORQ BX, AX -after1: - MOVQ AX, BX - RORQ $28, BX - XORQ BX, AX - MOVQ $3603962101, BX - IMULQ BX, AX - MOVQ AX, BX - RORQ $29, BX - XORQ BX, AX - MOVQ AX, ret+24(FP) - RET diff --git a/vendor/github.com/dgryski/go-metro/metro_stub.go b/vendor/github.com/dgryski/go-metro/metro_stub.go deleted file mode 100644 index 86ddcb4705..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro_stub.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !noasm,amd64 -// +build !gccgo - -package metro - -//go:generate python -m peachpy.x86_64 metro.py -S -o metro_amd64.s -mabi=goasm -//go:noescape - -func Hash64(buffer []byte, seed uint64) uint64 -func Hash64Str(buffer string, seed uint64) uint64 diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 54a20e7116..380ec4fdef 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -99,6 +99,7 @@ type Client struct { healthStatus int32 trailingHeaderSupport bool + maxRetries int } // Options for New method @@ -123,12 +124,16 @@ type Options struct { // Custom hash routines. Leave nil to use standard. CustomMD5 func() md5simd.Hasher CustomSHA256 func() md5simd.Hasher + + // Number of times a request is retried. Defaults to 10 retries if this option is not configured. + // Set to 1 to disable retries. + MaxRetries int } // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.79" + libraryVersion = "v7.0.80" ) // User Agent should always following the below style. @@ -278,6 +283,11 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { // healthcheck is not initialized clnt.healthStatus = unknown + clnt.maxRetries = MaxRetry + if opts.MaxRetries > 0 { + clnt.maxRetries = opts.MaxRetries + } + // Return. return clnt, nil } @@ -590,9 +600,9 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, errors.New(c.endpointURL.String() + " is offline.") } - var retryable bool // Indicates if request can be retried. - var bodySeeker io.Seeker // Extracted seeker from io.Reader. - reqRetry := MaxRetry // Indicates how many times we can retry the request + var retryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + var reqRetry = c.maxRetries // Indicates how many times we can retry the request if metadata.contentBody != nil { // Check if body is seekable then it is retryable. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index e706b57de6..344af2b780 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -434,12 +434,34 @@ func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElemen return enc.EncodeElement(delMarkerExp(de), start) } +// AllVersionsExpiration represents AllVersionsExpiration actions element in an ILM policy +type AllVersionsExpiration struct { + XMLName xml.Name `xml:"AllVersionsExpiration" json:"-"` + Days int `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"DeleteMarker,omitempty" json:"DeleteMarker,omitempty"` +} + +// IsNull returns true if days field is 0 +func (e AllVersionsExpiration) IsNull() bool { + return e.Days == 0 +} + +// MarshalXML satisfies xml.Marshaler to provide custom encoding +func (e AllVersionsExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if e.IsNull() { + return nil + } + type allVersionsExp AllVersionsExpiration + return enc.EncodeElement(allVersionsExp(e), start) +} + // MarshalJSON customizes json encoding by omitting empty values func (r Rule) MarshalJSON() ([]byte, error) { type rule struct { AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` Expiration *Expiration `json:"Expiration,omitempty"` DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"` + AllVersionsExpiration *AllVersionsExpiration `json:"AllVersionsExpiration,omitempty"` ID string `json:"ID"` RuleFilter *Filter `json:"Filter,omitempty"` NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` @@ -475,6 +497,9 @@ func (r Rule) MarshalJSON() ([]byte, error) { if !r.NoncurrentVersionTransition.isNull() { newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition } + if !r.AllVersionsExpiration.IsNull() { + newr.AllVersionsExpiration = &r.AllVersionsExpiration + } return json.Marshal(newr) } @@ -485,6 +510,7 @@ type Rule struct { AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"` + AllVersionsExpiration AllVersionsExpiration `xml:"AllVersionsExpiration,omitempty" json:"AllVersionsExpiration,omitempty"` ID string `xml:"ID" json:"ID"` RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index cc5f19dae7..a509f783fa 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -534,6 +534,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper for _, el := range vec { f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ Metric: el.Metric, F: f, diff --git a/vendor/github.com/seiflotfy/cuckoofilter/.gitignore b/vendor/github.com/seiflotfy/cuckoofilter/.gitignore deleted file mode 100644 index 11b90db8d9..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea diff --git a/vendor/github.com/seiflotfy/cuckoofilter/LICENSE b/vendor/github.com/seiflotfy/cuckoofilter/LICENSE deleted file mode 100644 index 58393c98c1..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Seif Lotfy - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/seiflotfy/cuckoofilter/README.md b/vendor/github.com/seiflotfy/cuckoofilter/README.md deleted file mode 100644 index 2a77fb393f..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Cuckoo Filter - -[![GoDoc](https://godoc.org/github.com/seiflotfy/cuckoofilter?status.svg)](https://godoc.org/github.com/seiflotfy/cuckoofilter) [![CodeHunt.io](https://img.shields.io/badge/vote-codehunt.io-02AFD1.svg)](http://codehunt.io/sub/cuckoo-filter/?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) - -Cuckoo filter is a Bloom filter replacement for approximated set-membership queries. While Bloom filters are well-known space-efficient data structures to serve queries like "if item x is in a set?", they do not support deletion. Their variances to enable deletion (like counting Bloom filters) usually require much more space. - -Cuckoo filters provide the flexibility to add and remove items dynamically. A cuckoo filter is based on cuckoo hashing (and therefore named as cuckoo filter). It is essentially a cuckoo hash table storing each key's fingerprint. Cuckoo hash tables can be highly compact, thus a cuckoo filter could use less space than conventional Bloom filters, for applications that require low false positive rates (< 3%). - -For details about the algorithm and citations please use this article for now - -["Cuckoo Filter: Better Than Bloom" by Bin Fan, Dave Andersen and Michael Kaminsky](https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf) - -## Implementation details - -The paper cited above leaves several parameters to choose. In this implementation - -1. Every element has 2 possible bucket indices -2. Buckets have a static size of 4 fingerprints -3. Fingerprints have a static size of 8 bits - -1 and 2 are suggested to be the optimum by the authors. The choice of 3 comes down to the desired false positive rate. Given a target false positive rate of `r` and a bucket size `b`, they suggest choosing the fingerprint size `f` using - - f >= log2(2b/r) bits - -With the 8 bit fingerprint size in this repository, you can expect `r ~= 0.03`. -[Other implementations](https://github.com/panmari/cuckoofilter) use 16 bit, which correspond to a false positive rate of `r ~= 0.0001`. - -## Example usage: -```go -package main - -import "fmt" -import cuckoo "github.com/seiflotfy/cuckoofilter" - -func main() { - cf := cuckoo.NewFilter(1000) - cf.InsertUnique([]byte("geeky ogre")) - - // Lookup a string (and it a miss) if it exists in the cuckoofilter - cf.Lookup([]byte("hello")) - - count := cf.Count() - fmt.Println(count) // count == 1 - - // Delete a string (and it a miss) - cf.Delete([]byte("hello")) - - count = cf.Count() - fmt.Println(count) // count == 1 - - // Delete a string (a hit) - cf.Delete([]byte("geeky ogre")) - - count = cf.Count() - fmt.Println(count) // count == 0 - - cf.Reset() // reset -} -``` - -## Documentation: -["Cuckoo Filter on GoDoc"](http://godoc.org/github.com/seiflotfy/cuckoofilter) diff --git a/vendor/github.com/seiflotfy/cuckoofilter/bucket.go b/vendor/github.com/seiflotfy/cuckoofilter/bucket.go deleted file mode 100644 index 4a83fc5030..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/bucket.go +++ /dev/null @@ -1,45 +0,0 @@ -package cuckoo - -type fingerprint byte - -type bucket [bucketSize]fingerprint - -const ( - nullFp = 0 - bucketSize = 4 -) - -func (b *bucket) insert(fp fingerprint) bool { - for i, tfp := range b { - if tfp == nullFp { - b[i] = fp - return true - } - } - return false -} - -func (b *bucket) delete(fp fingerprint) bool { - for i, tfp := range b { - if tfp == fp { - b[i] = nullFp - return true - } - } - return false -} - -func (b *bucket) getFingerprintIndex(fp fingerprint) int { - for i, tfp := range b { - if tfp == fp { - return i - } - } - return -1 -} - -func (b *bucket) reset() { - for i := range b { - b[i] = nullFp - } -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go b/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go deleted file mode 100644 index ec0d246de2..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go +++ /dev/null @@ -1,165 +0,0 @@ -package cuckoo - -import ( - "fmt" - "math/bits" - "math/rand" -) - -const maxCuckooCount = 500 - -// Filter is a probabilistic counter -type Filter struct { - buckets []bucket - count uint - bucketPow uint -} - -// NewFilter returns a new cuckoofilter with a given capacity. -// A capacity of 1000000 is a normal default, which allocates -// about ~1MB on 64-bit machines. -func NewFilter(capacity uint) *Filter { - capacity = getNextPow2(uint64(capacity)) / bucketSize - if capacity == 0 { - capacity = 1 - } - buckets := make([]bucket, capacity) - return &Filter{ - buckets: buckets, - count: 0, - bucketPow: uint(bits.TrailingZeros(capacity)), - } -} - -// Lookup returns true if data is in the counter -func (cf *Filter) Lookup(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.buckets[i1].getFingerprintIndex(fp) > -1 { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - return cf.buckets[i2].getFingerprintIndex(fp) > -1 -} - -// Reset ... -func (cf *Filter) Reset() { - for i := range cf.buckets { - cf.buckets[i].reset() - } - cf.count = 0 -} - -func randi(i1, i2 uint) uint { - if rand.Intn(2) == 0 { - return i1 - } - return i2 -} - -// Insert inserts data into the counter and returns true upon success -func (cf *Filter) Insert(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.insert(fp, i1) { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - if cf.insert(fp, i2) { - return true - } - return cf.reinsert(fp, randi(i1, i2)) -} - -// InsertUnique inserts data into the counter if not exists and returns true upon success -func (cf *Filter) InsertUnique(data []byte) bool { - if cf.Lookup(data) { - return false - } - return cf.Insert(data) -} - -func (cf *Filter) insert(fp fingerprint, i uint) bool { - if cf.buckets[i].insert(fp) { - cf.count++ - return true - } - return false -} - -func (cf *Filter) reinsert(fp fingerprint, i uint) bool { - for k := 0; k < maxCuckooCount; k++ { - j := rand.Intn(bucketSize) - oldfp := fp - fp = cf.buckets[i][j] - cf.buckets[i][j] = oldfp - - // look in the alternate location for that random element - i = getAltIndex(fp, i, cf.bucketPow) - if cf.insert(fp, i) { - return true - } - } - return false -} - -// Delete data from counter if exists and return if deleted or not -func (cf *Filter) Delete(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.delete(fp, i1) { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - return cf.delete(fp, i2) -} - -func (cf *Filter) delete(fp fingerprint, i uint) bool { - if cf.buckets[i].delete(fp) { - if cf.count > 0 { - cf.count-- - } - return true - } - return false -} - -// Count returns the number of items in the counter -func (cf *Filter) Count() uint { - return cf.count -} - -// Encode returns a byte slice representing a Cuckoofilter -func (cf *Filter) Encode() []byte { - bytes := make([]byte, len(cf.buckets)*bucketSize) - for i, b := range cf.buckets { - for j, f := range b { - index := (i * len(b)) + j - bytes[index] = byte(f) - } - } - return bytes -} - -// Decode returns a Cuckoofilter from a byte slice -func Decode(bytes []byte) (*Filter, error) { - var count uint - if len(bytes)%bucketSize != 0 { - return nil, fmt.Errorf("expected bytes to be multiple of %d, got %d", bucketSize, len(bytes)) - } - if len(bytes) == 0 { - return nil, fmt.Errorf("bytes can not be empty") - } - buckets := make([]bucket, len(bytes)/4) - for i, b := range buckets { - for j := range b { - index := (i * len(b)) + j - if bytes[index] != 0 { - buckets[i][j] = fingerprint(bytes[index]) - count++ - } - } - } - return &Filter{ - buckets: buckets, - count: count, - bucketPow: uint(bits.TrailingZeros(uint(len(buckets)))), - }, nil -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/doc.go b/vendor/github.com/seiflotfy/cuckoofilter/doc.go deleted file mode 100644 index 6f6cbf8281..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -/* -Package cuckoo provides a Cuckoo Filter, a Bloom filter replacement for approximated set-membership queries. - -While Bloom filters are well-known space-efficient data structures to serve queries like "if item x is in a set?", they do not support deletion. Their variances to enable deletion (like counting Bloom filters) usually require much more space. - -Cuckoo filters provide the flexibility to add and remove items dynamically. A cuckoo filter is based on cuckoo hashing (and therefore named as cuckoo filter). It is essentially a cuckoo hash table storing each key's fingerprint. Cuckoo hash tables can be highly compact, thus a cuckoo filter could use less space than conventional Bloom filters, for applications that require low false positive rates (< 3%). - -For details about the algorithm and citations please use this article: - -"Cuckoo Filter: Better Than Bloom" by Bin Fan, Dave Andersen and Michael Kaminsky -(https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf) - -Note: -This implementation uses a a static bucket size of 4 fingerprints and a fingerprint size of 1 byte based on my understanding of an optimal bucket/fingerprint/size ratio from the aforementioned paper.*/ -package cuckoo diff --git a/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go b/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go deleted file mode 100644 index 693184c9d4..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go +++ /dev/null @@ -1,170 +0,0 @@ -package cuckoo - -import ( - "bytes" - "encoding/gob" -) - -const ( - DefaultLoadFactor = 0.9 - DefaultCapacity = 10000 -) - -type ScalableCuckooFilter struct { - filters []*Filter - loadFactor float32 - //when scale(last filter size * loadFactor >= capacity) get new filter capacity - scaleFactor func(capacity uint) uint -} - -type option func(*ScalableCuckooFilter) - -type Store struct { - Bytes [][]byte - LoadFactor float32 -} - -/* - by default option the grow capacity is: - capacity , total - 4096 4096 - 8192 12288 -16384 28672 -32768 61440 -65536 126,976 -*/ -func NewScalableCuckooFilter(opts ...option) *ScalableCuckooFilter { - sfilter := new(ScalableCuckooFilter) - for _, opt := range opts { - opt(sfilter) - } - configure(sfilter) - return sfilter -} - -func (sf *ScalableCuckooFilter) Lookup(data []byte) bool { - for _, filter := range sf.filters { - if filter.Lookup(data) { - return true - } - } - return false -} - -func (sf *ScalableCuckooFilter) Reset() { - for _, filter := range sf.filters { - filter.Reset() - } -} - -func (sf *ScalableCuckooFilter) Insert(data []byte) bool { - needScale := false - lastFilter := sf.filters[len(sf.filters)-1] - if (float32(lastFilter.count) / float32(len(lastFilter.buckets))) > sf.loadFactor { - needScale = true - } else { - b := lastFilter.Insert(data) - needScale = !b - } - if !needScale { - return true - } - newFilter := NewFilter(sf.scaleFactor(uint(len(lastFilter.buckets)))) - sf.filters = append(sf.filters, newFilter) - return newFilter.Insert(data) -} - -func (sf *ScalableCuckooFilter) InsertUnique(data []byte) bool { - if sf.Lookup(data) { - return false - } - return sf.Insert(data) -} - -func (sf *ScalableCuckooFilter) Delete(data []byte) bool { - for _, filter := range sf.filters { - if filter.Delete(data) { - return true - } - } - return false -} - -func (sf *ScalableCuckooFilter) Count() uint { - var sum uint - for _, filter := range sf.filters { - sum += filter.count - } - return sum - -} - -func (sf *ScalableCuckooFilter) Encode() []byte { - slice := make([][]byte, len(sf.filters)) - for i, filter := range sf.filters { - encode := filter.Encode() - slice[i] = encode - } - store := &Store{ - Bytes: slice, - LoadFactor: sf.loadFactor, - } - buf := bytes.NewBuffer(nil) - enc := gob.NewEncoder(buf) - err := enc.Encode(store) - if err != nil { - return nil - } - return buf.Bytes() -} - -func (sf *ScalableCuckooFilter) DecodeWithParam(fBytes []byte, opts ...option) (*ScalableCuckooFilter, error) { - instance, err := DecodeScalableFilter(fBytes) - if err != nil { - return nil, err - } - for _, opt := range opts { - opt(instance) - } - return instance, nil -} - -func DecodeScalableFilter(fBytes []byte) (*ScalableCuckooFilter, error) { - buf := bytes.NewBuffer(fBytes) - dec := gob.NewDecoder(buf) - store := &Store{} - err := dec.Decode(store) - if err != nil { - return nil, err - } - filterSize := len(store.Bytes) - instance := NewScalableCuckooFilter(func(filter *ScalableCuckooFilter) { - filter.filters = make([]*Filter, filterSize) - }, func(filter *ScalableCuckooFilter) { - filter.loadFactor = store.LoadFactor - }) - for i, oneBytes := range store.Bytes { - filter, err := Decode(oneBytes) - if err != nil { - return nil, err - } - instance.filters[i] = filter - } - return instance, nil - -} - -func configure(sfilter *ScalableCuckooFilter) { - if sfilter.loadFactor == 0 { - sfilter.loadFactor = DefaultLoadFactor - } - if sfilter.scaleFactor == nil { - sfilter.scaleFactor = func(currentSize uint) uint { - return currentSize * bucketSize * 2 - } - } - if sfilter.filters == nil { - initFilter := NewFilter(DefaultCapacity) - sfilter.filters = []*Filter{initFilter} - } -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/util.go b/vendor/github.com/seiflotfy/cuckoofilter/util.go deleted file mode 100644 index 840932e288..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/util.go +++ /dev/null @@ -1,71 +0,0 @@ -package cuckoo - -import ( - metro "github.com/dgryski/go-metro" -) - -var ( - altHash = [256]uint{} - masks = [65]uint{} -) - -func init() { - for i := 0; i < 256; i++ { - altHash[i] = (uint(metro.Hash64([]byte{byte(i)}, 1337))) - } - for i := uint(0); i <= 64; i++ { - masks[i] = (1 << i) - 1 - } -} - -func getAltIndex(fp fingerprint, i uint, bucketPow uint) uint { - mask := masks[bucketPow] - hash := altHash[fp] & mask - return (i & mask) ^ hash -} - -func getFingerprint(hash uint64) byte { - // Use least significant bits for fingerprint. - fp := byte(hash%255 + 1) - return fp -} - -// getIndicesAndFingerprint returns the 2 bucket indices and fingerprint to be used -func getIndexAndFingerprint(data []byte, bucketPow uint) (uint, fingerprint) { - hash := defaultHasher.Hash64(data) - fp := getFingerprint(hash) - // Use most significant bits for deriving index. - i1 := uint(hash>>32) & masks[bucketPow] - return i1, fingerprint(fp) -} - -func getNextPow2(n uint64) uint { - n-- - n |= n >> 1 - n |= n >> 2 - n |= n >> 4 - n |= n >> 8 - n |= n >> 16 - n |= n >> 32 - n++ - return uint(n) -} - -var defaultHasher Hasher = new(metrotHasher) - -func SetDefaultHasher(hasher Hasher) { - defaultHasher = hasher -} - -type Hasher interface { - Hash64([]byte) uint64 -} - -var _ Hasher = new(metrotHasher) - -type metrotHasher struct{} - -func (h *metrotHasher) Hash64(data []byte) uint64 { - hash := metro.Hash64(data, 1337) - return hash -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 00fda38831..64add7fb51 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -130,7 +130,7 @@ func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st } if checkExternalLabels { - if len(meta.Thanos.Labels) == 0 { + if meta.Thanos.Labels == nil || len(meta.Thanos.Labels) == 0 { return errors.New("empty external labels are not allowed for Thanos block.") } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 772d37a48b..883b8e0608 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -257,11 +257,7 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch c mu.Unlock() continue } - select { - case <-ctx.Done(): - return ctx.Err() - case ch <- uid: - } + ch <- uid } return nil }) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index ff3975663c..e9fe5eb7dc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -32,7 +32,7 @@ func NewReaderPoolMetrics(reg prometheus.Registerer) *ReaderPoolMetrics { } } -// ReaderPool is used to instantiate new index-header readers and keep track of them. +// ReaderPool is used to istantiate new index-header readers and keep track of them. // When the lazy reader is enabled, the pool keeps track of all instantiated readers // and automatically close them once the idle timeout is reached. A closed lazy reader // will be automatically re-opened upon next usage. @@ -73,7 +73,7 @@ func (s IndexHeaderLazyDownloadStrategy) StrategyToDownloadFunc() LazyDownloadIn } } -// LazyDownloadIndexHeaderFunc is used to determine whether to download the index header lazily +// LazyDownloadIndexHeaderFunc is used to determinte whether to download the index header lazily // or not by checking its block metadata. Usecase can be by time or by index file size. type LazyDownloadIndexHeaderFunc func(meta *metadata.Meta) bool diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go index a5b0c5b2a4..8c10a9a874 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go @@ -134,7 +134,7 @@ type MemcachedClientConfig struct { MaxItemSize model.Bytes `yaml:"max_item_size"` // MaxGetMultiBatchSize specifies the maximum number of keys a single underlying - // GetMulti() should run. If more keys are specified, internally keys are split + // GetMulti() should run. If more keys are specified, internally keys are splitted // into multiple batches and fetched concurrently, honoring MaxGetMultiConcurrency parallelism. // If set to 0, the max batch size is unlimited. MaxGetMultiBatchSize int `yaml:"max_get_multi_batch_size"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 7f08297671..522e4c9d4c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -894,7 +894,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp _, _ = sb.WriteString(",") } } - rerr = fmt.Errorf("panicked while compacting %s: %v", sb.String(), p) + rerr = fmt.Errorf("paniced while compacting %s: %v", sb.String(), p) } }() diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 46d590186e..6aa2b23dfe 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -149,7 +149,7 @@ func Downsample( // Raw and already downsampled data need different processing. if origMeta.Thanos.Downsample.Resolution == 0 { for _, c := range chks { - // TODO(bwplotka): We can optimize this further by using in WriteSeries iterators of each chunk instead of + // TODO(bwplotka): We can optimze this further by using in WriteSeries iterators of each chunk instead of // samples. Also ensure 120 sample limit, otherwise we have gigantic chunks. // https://github.com/thanos-io/thanos/issues/2542. if err := expandChunkIterator(c.Chunk.Iterator(reuseIt), &all); err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go index 394e33185b..6d7d03eea2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -68,7 +68,7 @@ func (p *tsdbBasedPlanner) plan(noCompactMarked map[ulid.ULID]*metadata.NoCompac } // No overlapping blocks, do compaction the usual way. - // We do not include a recently produced block with max(minTime), so the block which was just uploaded to bucket. + // We do not include a recently producted block with max(minTime), so the block which was just uploaded to bucket. // This gives users a window of a full block size maintenance if needed. if _, excluded := noCompactMarked[metasByMinTime[len(metasByMinTime)-1].ULID]; !excluded { notExcludedMetasByMinTime = notExcludedMetasByMinTime[:len(notExcludedMetasByMinTime)-1] @@ -200,7 +200,7 @@ func splitByRange(metasByMinTime []*metadata.Meta, tr int64) [][]*metadata.Meta t0 = tr * ((m.MinTime - tr + 1) / tr) } - // Skip blocks that don't fall into the range. This can happen via misalignment or + // Skip blocks that don't fall into the range. This can happen via mis-alignment or // by being the multiple of the intended range. if m.MaxTime > t0+tr { i++ diff --git a/vendor/github.com/thanos-io/thanos/pkg/component/component.go b/vendor/github.com/thanos-io/thanos/pkg/component/component.go index 6c52aef138..dfbae08289 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/component/component.go +++ b/vendor/github.com/thanos-io/thanos/pkg/component/component.go @@ -3,6 +3,12 @@ package component +import ( + "strings" + + "github.com/thanos-io/thanos/pkg/store/storepb" +) + // Component is a generic component interface. type Component interface { String() string @@ -12,6 +18,7 @@ type Component interface { type StoreAPI interface { implementsStoreAPI() String() string + ToProto() storepb.StoreType } // Source is a Thanos component that produce blocks of metrics. @@ -26,6 +33,7 @@ type SourceStoreAPI interface { implementsStoreAPI() producesBlocks() String() string + ToProto() storepb.StoreType } type component struct { @@ -40,6 +48,14 @@ type storeAPI struct { func (storeAPI) implementsStoreAPI() {} +func (s sourceStoreAPI) ToProto() storepb.StoreType { + return storepb.StoreType(storepb.StoreType_value[strings.ToUpper(s.String())]) +} + +func (s storeAPI) ToProto() storepb.StoreType { + return storepb.StoreType(storepb.StoreType_value[strings.ToUpper(s.String())]) +} + type source struct { component } @@ -52,6 +68,26 @@ type sourceStoreAPI struct { storeAPI } +// FromProto converts from a gRPC StoreType to StoreAPI. +func FromProto(storeType storepb.StoreType) StoreAPI { + switch storeType { + case storepb.StoreType_QUERY: + return Query + case storepb.StoreType_RULE: + return Rule + case storepb.StoreType_SIDECAR: + return Sidecar + case storepb.StoreType_STORE: + return Store + case storepb.StoreType_RECEIVE: + return Receive + case storepb.StoreType_DEBUG: + return Debug + default: + return UnknownStoreAPI + } +} + func FromString(storeType string) StoreAPI { switch storeType { case "query": @@ -89,24 +125,4 @@ var ( Store = storeAPI{component: component{name: "store"}} UnknownStoreAPI = storeAPI{component: component{name: "unknown-store-api"}} Query = storeAPI{component: component{name: "query"}} - - All = []Component{ - Bucket, - Cleanup, - Mark, - Upload, - Rewrite, - Retention, - Compact, - Downsample, - Replicate, - QueryFrontend, - Debug, - Receive, - Rule, - Sidecar, - Store, - UnknownStoreAPI, - Query, - } ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go deleted file mode 100644 index 4e315596df..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package dns - -import ( - "context" - "sync" - "time" - - grpcresolver "google.golang.org/grpc/resolver" -) - -var ( - _ grpcresolver.Builder = &builder{} - _ grpcresolver.Resolver = &resolver{} -) - -type builder struct { - resolveInterval time.Duration - provider *Provider -} - -func RegisterGRPCResolver(provider *Provider, interval time.Duration) { - grpcresolver.Register(&builder{ - resolveInterval: interval, - provider: provider, - }) -} - -func (b *builder) Scheme() string { return "thanos" } - -func (b *builder) Build(t grpcresolver.Target, cc grpcresolver.ClientConn, _ grpcresolver.BuildOptions) (grpcresolver.Resolver, error) { - ctx, cancel := context.WithCancel(context.Background()) - r := &resolver{ - provider: b.provider, - target: t.Endpoint(), - ctx: ctx, - cancel: cancel, - cc: cc, - interval: b.resolveInterval, - } - r.wg.Add(1) - go r.run() - - return r, nil -} - -type resolver struct { - provider *Provider - - target string - ctx context.Context - cancel context.CancelFunc - cc grpcresolver.ClientConn - interval time.Duration - - wg sync.WaitGroup -} - -func (r *resolver) Close() { - r.cancel() - r.wg.Wait() -} - -func (r *resolver) ResolveNow(_ grpcresolver.ResolveNowOptions) {} - -func (r *resolver) resolve() error { - ctx, cancel := context.WithTimeout(r.ctx, r.interval) - defer cancel() - return r.provider.Resolve(ctx, []string{r.target}) -} - -func (r *resolver) addresses() []string { - return r.provider.AddressesForHost(r.target) -} - -func (r *resolver) run() { - defer r.wg.Done() - for { - if err := r.resolve(); err != nil { - r.cc.ReportError(err) - } else { - state := grpcresolver.State{} - for _, addr := range r.addresses() { - raddr := grpcresolver.Address{Addr: addr} - state.Addresses = append(state.Addresses, raddr) - } - _ = r.cc.UpdateState(state) - } - select { - case <-r.ctx.Done(): - return - case <-time.After(r.interval): - } - } -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index 8f42bf4d26..3ec032a654 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -164,16 +164,3 @@ func (p *Provider) Addresses() []string { } return result } - -// AddressesForHost returns the latest addresses present for the host in the Provider. -func (p *Provider) AddressesForHost(host string) []string { - p.RLock() - defer p.RUnlock() - - addrs := p.resolved[host] - - res := make([]string, len(addrs)) - copy(res, addrs) - - return res -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go index 0025178607..d0078aea57 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go @@ -108,9 +108,7 @@ func (s *dnsSD) Resolve(ctx context.Context, name string, qtype QType) ([]string } if qtype == SRVNoA { - // Remove the final dot from rooted DNS names (this is for compatibility with Prometheus) - target := strings.TrimRight(rec.Target, ".") - res = append(res, appendScheme(scheme, net.JoinHostPort(target, resPort))) + res = append(res, appendScheme(scheme, net.JoinHostPort(rec.Target, resPort))) continue } // Do A lookup for the domain in SRV answer. diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go index ebdda5fbe1..75d33243c7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_client.go @@ -27,8 +27,6 @@ type ClientMetrics struct { // e.g. 1 ClientMetrics should be used for all the clients that talk to Alertmanager. func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { var m ClientMetrics - const maxBucketNumber = 256 - const bucketFactor = 1.1 m.inFlightGauge = promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Subsystem: "http_client", @@ -48,9 +46,6 @@ func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { Name: "dns_duration_seconds", Help: "Trace dns latency histogram.", Buckets: []float64{0.025, .05, .1, .5, 1, 5, 10}, - - NativeHistogramBucketFactor: bucketFactor, - NativeHistogramMaxBucketNumber: maxBucketNumber, }, []string{"event"}, ) @@ -61,9 +56,6 @@ func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { Name: "tls_duration_seconds", Help: "Trace tls latency histogram.", Buckets: []float64{0.025, .05, .1, .5, 1, 5, 10}, - - NativeHistogramBucketFactor: bucketFactor, - NativeHistogramMaxBucketNumber: maxBucketNumber, }, []string{"event"}, ) @@ -74,9 +66,6 @@ func NewClientMetrics(reg prometheus.Registerer) *ClientMetrics { Name: "request_duration_seconds", Help: "A histogram of request latencies.", Buckets: []float64{0.025, .05, .1, .5, 1, 5, 10}, - - NativeHistogramBucketFactor: bucketFactor, - NativeHistogramMaxBucketNumber: maxBucketNumber, }, []string{"code", "method"}, ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/filter/cuckoo.go b/vendor/github.com/thanos-io/thanos/pkg/filter/cuckoo.go deleted file mode 100644 index 0cdce6dc94..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/filter/cuckoo.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filter - -import ( - "sync" - "unsafe" - - "github.com/prometheus/prometheus/model/labels" - cuckoo "github.com/seiflotfy/cuckoofilter" -) - -type CuckooMetricNameStoreFilter struct { - filter *cuckoo.Filter - mtx sync.RWMutex -} - -func NewCuckooMetricNameStoreFilter(capacity uint) *CuckooMetricNameStoreFilter { - return &CuckooMetricNameStoreFilter{ - filter: cuckoo.NewFilter(capacity), - } -} - -func (f *CuckooMetricNameStoreFilter) Matches(matchers []*labels.Matcher) bool { - f.mtx.RLock() - defer f.mtx.RUnlock() - - for _, m := range matchers { - if m.Type == labels.MatchEqual && m.Name == labels.MetricName { - return f.filter.Lookup([]byte(m.Value)) - } - } - - return true -} - -func (f *CuckooMetricNameStoreFilter) ResetAndSet(values ...string) { - f.mtx.Lock() - defer f.mtx.Unlock() - f.filter.Reset() - for _, value := range values { - f.filter.Insert(unsafe.Slice(unsafe.StringData(value), len(value))) - } -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/filter/filter.go b/vendor/github.com/thanos-io/thanos/pkg/filter/filter.go deleted file mode 100644 index f5cc068cf5..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/filter/filter.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filter - -import "github.com/prometheus/prometheus/model/labels" - -type StoreFilter interface { - // Matches returns true if the filter matches the given matchers. - Matches(matchers []*labels.Matcher) bool - - // ResetAndSet resets the filter and sets it to the given values. - ResetAndSet(values ...string) -} - -type AllowAllStoreFilter struct{} - -func (f AllowAllStoreFilter) Matches(matchers []*labels.Matcher) bool { - return true -} - -func (f AllowAllStoreFilter) ResetAndSet(values ...string) {} diff --git a/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go index f3481257f2..159966d0cb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go @@ -9,52 +9,51 @@ import ( "github.com/pkg/errors" ) -// Pool is a pool for slices of type T that can be reused. -type Pool[T any] interface { - // Get returns a new T slice that fits the given size. - Get(sz int) (*[]T, error) - // Put returns a T slice to the right bucket in the pool. - Put(b *[]T) +// Bytes is a pool of bytes that can be reused. +type Bytes interface { + // Get returns a new byte slices that fits the given size. + Get(sz int) (*[]byte, error) + // Put returns a byte slice to the right bucket in the pool. + Put(b *[]byte) } -// NoopPool is pool that always allocated required slice on heap and ignore puts. -type NoopPool[T any] struct{} +// NoopBytes is pool that always allocated required slice on heap and ignore puts. +type NoopBytes struct{} -func (p NoopPool[T]) Get(sz int) (*[]T, error) { - b := make([]T, 0, sz) +func (p NoopBytes) Get(sz int) (*[]byte, error) { + b := make([]byte, 0, sz) return &b, nil } -func (p NoopPool[T]) Put(*[]T) {} +func (p NoopBytes) Put(*[]byte) {} -// BucketedPool is a bucketed pool for variably sized T slices. It can be -// configured to not allow more than a maximum number of T items being used at a -// given time. Every slice obtained from the pool must be returned. -type BucketedPool[T any] struct { +// BucketedBytes is a bucketed pool for variably sized byte slices. It can be configured to not allow +// more than a maximum number of bytes being used at a given time. +// Every byte slice obtained from the pool must be returned. +type BucketedBytes struct { buckets []sync.Pool sizes []int maxTotal uint64 usedTotal uint64 mtx sync.RWMutex - new func(s int) *[]T + new func(s int) *[]byte } -// MustNewBucketedPool is like NewBucketedPool but panics if construction fails. +// MustNewBucketedBytes is like NewBucketedBytes but panics if construction fails. // Useful for package internal pools. -func MustNewBucketedPool[T any](minSize, maxSize int, factor float64, maxTotal uint64) *BucketedPool[T] { - pool, err := NewBucketedPool[T](minSize, maxSize, factor, maxTotal) +func MustNewBucketedBytes(minSize, maxSize int, factor float64, maxTotal uint64) *BucketedBytes { + pool, err := NewBucketedBytes(minSize, maxSize, factor, maxTotal) if err != nil { panic(err) } return pool } -// NewBucketedPool returns a new BucketedPool with size buckets for minSize to -// maxSize increasing by the given factor and maximum number of used items. No -// more than maxTotal items can be used at any given time unless maxTotal is set -// to 0. -func NewBucketedPool[T any](minSize, maxSize int, factor float64, maxTotal uint64) (*BucketedPool[T], error) { +// NewBucketedBytes returns a new Bytes with size buckets for minSize to maxSize +// increasing by the given factor and maximum number of used bytes. +// No more than maxTotal bytes can be used at any given time unless maxTotal is set to 0. +func NewBucketedBytes(minSize, maxSize int, factor float64, maxTotal uint64) (*BucketedBytes, error) { if minSize < 1 { return nil, errors.New("invalid minimum pool size") } @@ -70,23 +69,23 @@ func NewBucketedPool[T any](minSize, maxSize int, factor float64, maxTotal uint6 for s := minSize; s <= maxSize; s = int(float64(s) * factor) { sizes = append(sizes, s) } - p := &BucketedPool[T]{ + p := &BucketedBytes{ buckets: make([]sync.Pool, len(sizes)), sizes: sizes, maxTotal: maxTotal, - new: func(sz int) *[]T { - s := make([]T, 0, sz) + new: func(sz int) *[]byte { + s := make([]byte, 0, sz) return &s }, } return p, nil } -// ErrPoolExhausted is returned if a pool cannot provide the requested slice. +// ErrPoolExhausted is returned if a pool cannot provide the request bytes. var ErrPoolExhausted = errors.New("pool exhausted") -// Get returns a slice into from the bucket that fits the given size. -func (p *BucketedPool[T]) Get(sz int) (*[]T, error) { +// Get returns a new byte slice that fits the given size. +func (p *BucketedBytes) Get(sz int) (*[]byte, error) { p.mtx.Lock() defer p.mtx.Unlock() @@ -98,7 +97,7 @@ func (p *BucketedPool[T]) Get(sz int) (*[]T, error) { if sz > bktSize { continue } - b, ok := p.buckets[i].Get().(*[]T) + b, ok := p.buckets[i].Get().(*[]byte) if !ok { b = p.new(bktSize) } @@ -112,8 +111,8 @@ func (p *BucketedPool[T]) Get(sz int) (*[]T, error) { return p.new(sz), nil } -// Put returns a slice to the right bucket in the pool. -func (p *BucketedPool[T]) Put(b *[]T) { +// Put returns a byte slice to the right bucket in the pool. +func (p *BucketedBytes) Put(b *[]byte) { if b == nil { return } @@ -139,7 +138,7 @@ func (p *BucketedPool[T]) Put(b *[]T) { } } -func (p *BucketedPool[T]) UsedBytes() uint64 { +func (p *BucketedBytes) UsedBytes() uint64 { p.mtx.RLock() defer p.mtx.RUnlock() diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index 1f96f4c666..5dde62c5ee 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -734,7 +734,7 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string // SeriesInGRPC returns the labels from Prometheus series API. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]map[string]string, error) { +func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]map[string]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/series") q := u.Query() @@ -742,7 +742,6 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la q.Add("match[]", storepb.PromMatchersToString(matchers...)) q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) - q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -754,7 +753,7 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la // LabelNamesInGRPC returns all known label names constrained by the given matchers. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { +func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/labels") q := u.Query() @@ -764,7 +763,6 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) - q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -775,7 +773,7 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ // LabelValuesInGRPC returns all known label values for a given label name. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { +func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/label/", label, "/values") q := u.Query() @@ -785,7 +783,6 @@ func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label str } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) - q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go index 4c519bf925..b1faff425b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go @@ -83,9 +83,91 @@ func (es *endpointRef) Metadata(ctx context.Context, infoClient infopb.InfoClien return &endpointMetadata{resp}, nil } } + + // Call Info method of StoreAPI, this way querier will be able to discovery old components not exposing InfoAPI. + if storeClient != nil { + metadata, err := es.getMetadataUsingStoreAPI(ctx, storeClient) + if err != nil { + return nil, errors.Wrapf(err, "fallback fetching info from %s", es.addr) + } + return metadata, nil + } + return nil, errors.New(noMetadataEndpointMessage) } +func (es *endpointRef) getMetadataUsingStoreAPI(ctx context.Context, client storepb.StoreClient) (*endpointMetadata, error) { + resp, err := client.Info(ctx, &storepb.InfoRequest{}) + if err != nil { + return nil, err + } + + infoResp := fillExpectedAPIs(component.FromProto(resp.StoreType), resp.MinTime, resp.MaxTime) + infoResp.LabelSets = resp.LabelSets + infoResp.ComponentType = component.FromProto(resp.StoreType).String() + + return &endpointMetadata{ + &infoResp, + }, nil +} + +func fillExpectedAPIs(componentType component.Component, mintime, maxTime int64) infopb.InfoResponse { + switch componentType { + case component.Sidecar: + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Rules: &infopb.RulesInfo{}, + Targets: &infopb.TargetsInfo{}, + MetricMetadata: &infopb.MetricMetadataInfo{}, + Exemplars: &infopb.ExemplarsInfo{}, + } + case component.Query: + { + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Rules: &infopb.RulesInfo{}, + Targets: &infopb.TargetsInfo{}, + MetricMetadata: &infopb.MetricMetadataInfo{}, + Exemplars: &infopb.ExemplarsInfo{}, + Query: &infopb.QueryAPIInfo{}, + } + } + case component.Receive: + { + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Exemplars: &infopb.ExemplarsInfo{}, + } + } + case component.Store: + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + } + case component.Rule: + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + Rules: &infopb.RulesInfo{}, + } + default: + return infopb.InfoResponse{} + } +} + // stringError forces the error to be a string // when marshaled into a JSON. type stringError struct { @@ -117,7 +199,7 @@ type EndpointStatus struct { // TODO(hitanshu-mehta) Currently,only collecting metrics of storeEndpoints. Make this struct generic. type endpointSetNodeCollector struct { mtx sync.Mutex - storeNodes map[string]map[string]int + storeNodes map[component.Component]map[string]int storePerExtLset map[string]int logger log.Logger @@ -131,7 +213,7 @@ func newEndpointSetNodeCollector(logger log.Logger, labels ...string) *endpointS } return &endpointSetNodeCollector{ logger: logger, - storeNodes: map[string]map[string]int{}, + storeNodes: map[component.Component]map[string]int{}, connectionsDesc: prometheus.NewDesc( "thanos_store_nodes_grpc_connections", "Number of gRPC connection to Store APIs. Opened connection means healthy store APIs available for Querier.", @@ -154,8 +236,8 @@ func truncateExtLabels(s string, threshold int) string { } return s } -func (c *endpointSetNodeCollector) Update(nodes map[string]map[string]int) { - storeNodes := make(map[string]map[string]int, len(nodes)) +func (c *endpointSetNodeCollector) Update(nodes map[component.Component]map[string]int) { + storeNodes := make(map[component.Component]map[string]int, len(nodes)) storePerExtLset := map[string]int{} for storeType, occurrencesPerExtLset := range nodes { @@ -181,8 +263,12 @@ func (c *endpointSetNodeCollector) Collect(ch chan<- prometheus.Metric) { c.mtx.Lock() defer c.mtx.Unlock() - for k, occurrencesPerExtLset := range c.storeNodes { + for storeType, occurrencesPerExtLset := range c.storeNodes { for externalLabels, occurrences := range occurrencesPerExtLset { + var storeTypeStr string + if storeType != nil { + storeTypeStr = storeType.String() + } // Select only required labels. lbls := []string{} for _, lbl := range c.labels { @@ -190,7 +276,7 @@ func (c *endpointSetNodeCollector) Collect(ch chan<- prometheus.Metric) { case string(ExternalLabels): lbls = append(lbls, externalLabels) case string(StoreType): - lbls = append(lbls, k) + lbls = append(lbls, storeTypeStr) } } select { @@ -368,12 +454,12 @@ func (e *EndpointSet) Update(ctx context.Context) { // All producers that expose StoreAPI should have unique external labels. Check all which connect to our Querier. if er.HasStoreAPI() && (er.ComponentType() == component.Sidecar || er.ComponentType() == component.Rule) && - stats[component.Sidecar.String()][extLset]+stats[component.Rule.String()][extLset] > 0 { + stats[component.Sidecar][extLset]+stats[component.Rule][extLset] > 0 { level.Warn(e.logger).Log("msg", "found duplicate storeEndpoints producer (sidecar or ruler). This is not advices as it will malform data in in the same bucket", - "address", addr, "extLset", extLset, "duplicates", fmt.Sprintf("%v", stats[component.Sidecar.String()][extLset]+stats[component.Rule.String()][extLset]+1)) + "address", addr, "extLset", extLset, "duplicates", fmt.Sprintf("%v", stats[component.Sidecar][extLset]+stats[component.Rule][extLset]+1)) } - stats[er.ComponentType().String()][extLset]++ + stats[er.ComponentType()][extLset]++ } e.endpointsMetric.Update(stats) @@ -631,7 +717,7 @@ func (er *endpointRef) updateMetadata(metadata *endpointMetadata, err error) { } // isQueryable returns true if an endpointRef should be used for querying. -// A strict endpointRef is always queryable. A non-strict endpointRef +// A strict endpointRef is always queriable. A non-strict endpointRef // is queryable if the last health check (info call) succeeded. func (er *endpointRef) isQueryable() bool { er.mtx.RLock() @@ -711,7 +797,11 @@ func (er *endpointRef) labelSets() []labels.Labels { labelSet := make([]labels.Labels, 0, len(er.metadata.LabelSets)) for _, ls := range labelpb.ZLabelSetsToPromLabelSets(er.metadata.LabelSets...) { - if ls.Len() == 0 { + if len(ls) == 0 { + continue + } + // Compatibility label for Queriers pre 0.8.1. Filter it out now. + if ls[0].Name == store.CompatibilityTypeLabelName { continue } labelSet = append(labelSet, ls.Copy()) @@ -782,7 +872,7 @@ func (er *endpointRef) Addr() (string, bool) { } func (er *endpointRef) Close() { - runutil.CloseWithLogOnErr(er.logger, er.cc, "endpoint %v connection closed", er.addr) + runutil.CloseWithLogOnErr(er.logger, er.cc, fmt.Sprintf("endpoint %v connection closed", er.addr)) } func (er *endpointRef) apisPresent() []string { @@ -815,18 +905,14 @@ func (er *endpointRef) apisPresent() []string { return apisPresent } -func (er *endpointRef) Matches(matchers []*labels.Matcher) bool { - return true -} - type endpointMetadata struct { *infopb.InfoResponse } -func newEndpointAPIStats() map[string]map[string]int { - nodes := make(map[string]map[string]int, len(component.All)) - for _, comp := range component.All { - nodes[comp.String()] = map[string]int{} +func newEndpointAPIStats() map[component.Component]map[string]int { + nodes := make(map[component.Component]map[string]int, len(storepb.StoreType_name)) + for i := range storepb.StoreType_name { + nodes[component.FromProto(storepb.StoreType(i))] = map[string]int{} } return nodes } diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go index e084344ed9..9a1a311097 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go @@ -331,7 +331,6 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . req := storepb.SeriesRequest{ MinTime: hints.Start, MaxTime: hints.End, - Limit: int64(hints.Limit), Matchers: sms, MaxResolutionWindow: q.maxResolutionMillis, Aggregates: aggrs, @@ -374,7 +373,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . } // LabelValues returns all potential values for a label name. -func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_values") defer span.Finish() @@ -385,18 +384,12 @@ func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.L if err != nil { return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } - - if hints == nil { - hints = &storage.LabelHints{} - } - req := &storepb.LabelValuesRequest{ Label: name, PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, - Limit: int64(hints.Limit), } if q.isDedupEnabled() { @@ -418,7 +411,7 @@ func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.L // LabelNames returns all the unique label names present in the block in sorted order constrained // by the given matchers. -func (q *querier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_names") defer span.Finish() @@ -430,16 +423,11 @@ func (q *querier) LabelNames(ctx context.Context, hints *storage.LabelHints, mat return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } - if hints == nil { - hints = &storage.LabelHints{} - } - req := &storepb.LabelNamesRequest{ PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, - Limit: int64(hints.Limit), } if q.isDedupEnabled() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go index c625cab5ba..f379d38017 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go @@ -35,7 +35,6 @@ import ( type Opts struct { AutoDownsample bool ReplicaLabels []string - PartitionLabels []string Timeout time.Duration EnablePartialResponse bool } @@ -119,7 +118,7 @@ func (r *remoteEngine) MinT() int64 { hashBuf = make([]byte, 0, 128) highestMintByLabelSet = make(map[uint64]int64) ) - for _, lset := range r.adjustedInfos() { + for _, lset := range r.infosWithoutReplicaLabels() { key, _ := labelpb.ZLabelsToPromLabels(lset.Labels.Labels).HashWithoutLabels(hashBuf) lsetMinT, ok := highestMintByLabelSet[key] if !ok { @@ -153,22 +152,16 @@ func (r *remoteEngine) MaxT() int64 { func (r *remoteEngine) LabelSets() []labels.Labels { r.labelSetsOnce.Do(func() { - r.labelSets = r.adjustedInfos().LabelSets() + r.labelSets = r.infosWithoutReplicaLabels().LabelSets() }) return r.labelSets } -// adjustedInfos strips out replica labels and scopes the remaining labels -// onto the partition labels if they are set. -func (r *remoteEngine) adjustedInfos() infopb.TSDBInfos { +func (r *remoteEngine) infosWithoutReplicaLabels() infopb.TSDBInfos { replicaLabelSet := make(map[string]struct{}) for _, lbl := range r.opts.ReplicaLabels { replicaLabelSet[lbl] = struct{}{} } - partitionLabelsSet := make(map[string]struct{}) - for _, lbl := range r.opts.PartitionLabels { - partitionLabelsSet[lbl] = struct{}{} - } // Strip replica labels from the result. infos := make(infopb.TSDBInfos, 0, len(r.client.tsdbInfos)) @@ -179,9 +172,6 @@ func (r *remoteEngine) adjustedInfos() infopb.TSDBInfos { if _, ok := replicaLabelSet[lbl.Name]; ok { continue } - if _, ok := partitionLabelsSet[lbl.Name]; !ok && len(partitionLabelsSet) > 0 { - continue - } builder.Add(lbl.Name, lbl.Value) } infos = append(infos, infopb.NewTSDBInfo( diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go index 298ce03063..941014a06a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go @@ -109,9 +109,6 @@ type RulesRequest struct { Type RulesRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.RulesRequest_Type" json:"type,omitempty"` PartialResponseStrategy storepb.PartialResponseStrategy `protobuf:"varint,2,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` MatcherString []string `protobuf:"bytes,3,rep,name=matcher_string,json=matcherString,proto3" json:"matcher_string,omitempty"` - RuleName []string `protobuf:"bytes,4,rep,name=rule_name,json=ruleName,proto3" json:"rule_name,omitempty"` - RuleGroup []string `protobuf:"bytes,5,rep,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"` - File []string `protobuf:"bytes,6,rep,name=file,proto3" json:"file,omitempty"` } func (m *RulesRequest) Reset() { *m = RulesRequest{} } @@ -557,76 +554,74 @@ func init() { func init() { proto.RegisterFile("rules/rulespb/rpc.proto", fileDescriptor_91b1d28f30eb5efb) } var fileDescriptor_91b1d28f30eb5efb = []byte{ - // 1096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0x47, - 0x10, 0xf6, 0xd8, 0x9e, 0xb1, 0xa7, 0x8c, 0x59, 0xb6, 0x17, 0xc4, 0x00, 0x89, 0x07, 0x59, 0x22, - 0x22, 0x51, 0xd6, 0x8e, 0x40, 0xbb, 0xd1, 0x9e, 0x22, 0xcc, 0xcf, 0x82, 0x84, 0xc8, 0xaa, 0x8d, - 0x72, 0xd8, 0x1c, 0x9c, 0xc6, 0x34, 0x66, 0x94, 0xf1, 0xcc, 0x6c, 0x4f, 0x9b, 0x88, 0xb7, 0xd8, - 0x73, 0x5e, 0x24, 0xca, 0x1b, 0x70, 0xcb, 0x1e, 0x73, 0x72, 0x12, 0xb8, 0xf9, 0x90, 0x67, 0x88, - 0xba, 0x7a, 0xc6, 0x63, 0x08, 0x84, 0xdd, 0x84, 0x5c, 0xdc, 0xdd, 0x5f, 0x7d, 0xd5, 0x3f, 0x55, - 0x5f, 0x95, 0x07, 0xe6, 0xc5, 0xc0, 0xe7, 0x71, 0x13, 0x7f, 0xa3, 0xa3, 0xa6, 0x88, 0xba, 0x8d, - 0x48, 0x84, 0x32, 0x24, 0x96, 0x3c, 0x65, 0x41, 0x18, 0x2f, 0x2e, 0xc4, 0x32, 0x14, 0xbc, 0x89, - 0xbf, 0xd1, 0x51, 0x53, 0x9e, 0x47, 0x3c, 0xd6, 0x94, 0xd4, 0xe4, 0xb3, 0x23, 0xee, 0xdf, 0x30, - 0xcd, 0xf6, 0xc2, 0x5e, 0x88, 0xd3, 0xa6, 0x9a, 0x25, 0xa8, 0xdb, 0x0b, 0xc3, 0x9e, 0xcf, 0x9b, - 0xb8, 0x3a, 0x1a, 0x9c, 0x34, 0xa5, 0xd7, 0xe7, 0xb1, 0x64, 0xfd, 0x48, 0x13, 0xea, 0x3f, 0xe7, - 0x61, 0x8a, 0xaa, 0xab, 0x50, 0xfe, 0x66, 0xc0, 0x63, 0x49, 0x9e, 0x42, 0x51, 0x6d, 0xeb, 0x18, - 0xcb, 0xc6, 0xea, 0xf4, 0xda, 0x42, 0x43, 0x5f, 0xaa, 0x31, 0xc9, 0x69, 0x1c, 0x9e, 0x47, 0x9c, - 0x22, 0x8d, 0x7c, 0x0b, 0x0b, 0x11, 0x13, 0xd2, 0x63, 0x7e, 0x47, 0xf0, 0x38, 0x0a, 0x83, 0x98, - 0x77, 0x62, 0x29, 0x98, 0xe4, 0xbd, 0x73, 0x27, 0x8f, 0x7b, 0xb8, 0xe9, 0x1e, 0xaf, 0x34, 0x91, - 0x26, 0xbc, 0x76, 0x42, 0xa3, 0xf3, 0xd1, 0xed, 0x06, 0xb2, 0x02, 0xd3, 0x7d, 0x26, 0xbb, 0xa7, - 0x5c, 0xa8, 0x3d, 0xbd, 0xa0, 0xe7, 0x14, 0x96, 0x0b, 0xab, 0x36, 0xad, 0x26, 0x68, 0x1b, 0x41, - 0xb2, 0x04, 0xb6, 0x8a, 0x66, 0x27, 0x60, 0x7d, 0xee, 0x14, 0x91, 0x51, 0x56, 0xc0, 0x01, 0xeb, - 0x73, 0xf2, 0x31, 0x00, 0x1a, 0x7b, 0x22, 0x1c, 0x44, 0x8e, 0x89, 0x56, 0xa4, 0xbf, 0x54, 0x00, - 0x21, 0x50, 0x3c, 0xf1, 0x7c, 0xee, 0x58, 0x68, 0xc0, 0x79, 0xfd, 0x13, 0x28, 0xaa, 0x17, 0x92, - 0x12, 0x14, 0x36, 0xf6, 0xf7, 0x67, 0x72, 0xc4, 0x06, 0x73, 0x63, 0x7f, 0x9b, 0x1e, 0xce, 0x18, - 0x04, 0xc0, 0xa2, 0xdb, 0x9b, 0x5f, 0xd3, 0xad, 0x99, 0x7c, 0xfd, 0x3b, 0xa8, 0x26, 0x61, 0xd1, - 0xf7, 0x26, 0x9f, 0x82, 0xa9, 0x8f, 0x51, 0xc1, 0xab, 0xac, 0x3d, 0x9e, 0x0c, 0x1e, 0x1e, 0xb7, - 0x9b, 0xa3, 0x9a, 0x41, 0x16, 0xa1, 0xf4, 0x03, 0x13, 0x81, 0x7a, 0x93, 0x8a, 0x92, 0xbd, 0x9b, - 0xa3, 0x29, 0xd0, 0x2a, 0x83, 0x25, 0x78, 0x3c, 0xf0, 0x65, 0x7d, 0x13, 0x60, 0xec, 0x1b, 0x93, - 0x67, 0x60, 0xa1, 0x73, 0xec, 0x18, 0xcb, 0x85, 0x5b, 0xf7, 0x6f, 0xc1, 0x68, 0xe8, 0x26, 0x24, - 0x9a, 0x8c, 0xf5, 0x3f, 0x0b, 0x60, 0x8f, 0x19, 0xe4, 0x23, 0x28, 0x62, 0x9c, 0xd4, 0x15, 0xed, - 0x56, 0x79, 0x34, 0x74, 0x71, 0x4d, 0xf1, 0x57, 0x59, 0x31, 0x1c, 0xf9, 0xcc, 0xaa, 0xd6, 0x3a, - 0x30, 0xe4, 0x29, 0x98, 0x28, 0x5b, 0x4c, 0x43, 0x65, 0x6d, 0x6a, 0xf2, 0xfc, 0x96, 0x3d, 0x1a, - 0xba, 0xda, 0x4c, 0xf5, 0x40, 0x56, 0xa1, 0xec, 0x05, 0x92, 0x8b, 0x33, 0xe6, 0x3b, 0xc5, 0x65, - 0x63, 0xd5, 0x68, 0x4d, 0x8d, 0x86, 0xee, 0x18, 0xa3, 0xe3, 0x19, 0xa1, 0xb0, 0xc4, 0xcf, 0x98, - 0x3f, 0x60, 0xd2, 0x0b, 0x83, 0xce, 0xf1, 0x40, 0xe8, 0x49, 0xcc, 0xbb, 0x61, 0x70, 0x1c, 0x3b, - 0x26, 0x3a, 0x93, 0xd1, 0xd0, 0x9d, 0xce, 0x68, 0x87, 0x5e, 0x9f, 0xd3, 0x85, 0x6c, 0xbd, 0x95, - 0x78, 0xb5, 0xb5, 0x13, 0xe9, 0xc0, 0x23, 0x9f, 0xc5, 0xb2, 0x93, 0x31, 0x1c, 0x0b, 0xd3, 0xb2, - 0xd8, 0xd0, 0x45, 0xd1, 0x48, 0x8b, 0xa2, 0x71, 0x98, 0x16, 0x45, 0x6b, 0xf1, 0x62, 0xe8, 0xe6, - 0xd4, 0x39, 0xca, 0x75, 0x7b, 0xec, 0xf9, 0xf6, 0x37, 0xd7, 0xa0, 0x37, 0x30, 0xe2, 0x82, 0xe9, - 0x7b, 0x7d, 0x4f, 0x3a, 0xf6, 0xb2, 0xb1, 0x5a, 0xd0, 0xef, 0x47, 0x80, 0xea, 0x81, 0x9c, 0xc1, - 0xfc, 0x1d, 0x92, 0x77, 0xca, 0xef, 0x55, 0x19, 0xad, 0xa5, 0xd1, 0xd0, 0xbd, 0xab, 0x3a, 0xe8, - 0x5d, 0x9b, 0xd7, 0x03, 0x28, 0xaa, 0x8c, 0x90, 0x67, 0x60, 0x0b, 0xde, 0x0d, 0xc5, 0xb1, 0x52, - 0x99, 0x96, 0xe4, 0xdc, 0x38, 0x65, 0xa9, 0x41, 0x31, 0x77, 0x73, 0x34, 0x63, 0x92, 0x15, 0x30, - 0x99, 0xcf, 0x85, 0x44, 0x11, 0x54, 0xd6, 0xaa, 0xa9, 0xcb, 0x86, 0x02, 0x95, 0x82, 0xd1, 0x3a, - 0xa1, 0xd2, 0x9f, 0x0a, 0x50, 0x45, 0xe3, 0x5e, 0x10, 0x4b, 0x16, 0x74, 0x39, 0x79, 0x01, 0x16, - 0xf6, 0xa8, 0xf8, 0x66, 0x25, 0xbc, 0xde, 0x57, 0x70, 0x9b, 0xcb, 0xd6, 0x74, 0x12, 0xe9, 0x84, - 0x48, 0x93, 0x91, 0xec, 0x42, 0x85, 0x05, 0x41, 0x28, 0x31, 0xc6, 0x71, 0x72, 0x87, 0x5b, 0xfc, - 0x9f, 0x24, 0xfe, 0x93, 0x6c, 0x3a, 0xb9, 0x20, 0xeb, 0x60, 0xc6, 0x92, 0x49, 0xee, 0x14, 0x30, - 0xd8, 0xe4, 0xda, 0x3b, 0xda, 0xca, 0xa2, 0x73, 0x86, 0x24, 0xaa, 0x07, 0xd2, 0x06, 0x9b, 0x75, - 0xa5, 0x77, 0xc6, 0x3b, 0x4c, 0xa2, 0x68, 0xef, 0xd1, 0xcb, 0x68, 0xe8, 0x12, 0xed, 0xb0, 0x21, - 0x3f, 0x0f, 0xfb, 0x9e, 0xe4, 0xfd, 0x48, 0x9e, 0xa3, 0x5e, 0xca, 0x29, 0xae, 0x94, 0xa2, 0x64, - 0xc3, 0x51, 0xc8, 0xb6, 0x3e, 0x15, 0x01, 0xaa, 0x87, 0x7f, 0x52, 0x8a, 0xf5, 0x7f, 0x2a, 0xe5, - 0x17, 0x13, 0x4c, 0x0c, 0x47, 0x16, 0x2c, 0xe3, 0x03, 0x82, 0x95, 0xf6, 0x92, 0xfc, 0xad, 0xbd, - 0xc4, 0x05, 0xf3, 0xcd, 0x80, 0x8b, 0x73, 0x8c, 0x7f, 0xf2, 0x6a, 0x04, 0xa8, 0x1e, 0xc8, 0x97, - 0x30, 0xf3, 0xb7, 0x52, 0x9f, 0xe8, 0x13, 0xa9, 0x8d, 0x3e, 0x3a, 0xbe, 0x51, 0xda, 0x99, 0xbc, - 0xcc, 0xff, 0x28, 0x2f, 0xeb, 0xdf, 0xcb, 0xeb, 0x05, 0x58, 0x58, 0x08, 0xb1, 0x53, 0xc2, 0x6e, - 0x38, 0x77, 0x2d, 0x64, 0x69, 0x29, 0xe8, 0x8e, 0xac, 0x89, 0x34, 0x19, 0x49, 0x1d, 0xac, 0x53, - 0xce, 0x7c, 0x79, 0x8a, 0x7d, 0xc0, 0xd6, 0x1c, 0x8d, 0xd0, 0x64, 0x24, 0xcf, 0x01, 0x74, 0xfb, - 0x12, 0x22, 0x14, 0xd8, 0x62, 0xec, 0xd6, 0xfc, 0x68, 0xe8, 0x3e, 0xc1, 0x2e, 0xa4, 0xc0, 0x4c, - 0x6e, 0xd4, 0x1e, 0x83, 0xf7, 0xb5, 0x52, 0x78, 0xa0, 0x56, 0x5a, 0x79, 0xd0, 0x56, 0xba, 0x0b, - 0xf3, 0xdf, 0x73, 0x1e, 0x75, 0x4e, 0x3c, 0xf5, 0x87, 0xde, 0x39, 0x09, 0xc5, 0xf8, 0xc2, 0x53, - 0x78, 0xe1, 0xc7, 0xa3, 0xa1, 0x5b, 0x55, 0x94, 0x1d, 0x64, 0xec, 0x84, 0x82, 0xce, 0x5e, 0x5b, - 0x26, 0x57, 0xad, 0xff, 0x58, 0x80, 0xea, 0xb5, 0xde, 0x76, 0xcf, 0x1f, 0xde, 0x58, 0xa4, 0xf9, - 0x3b, 0x44, 0x9a, 0x69, 0xad, 0xf0, 0xa1, 0x5a, 0xcb, 0xd2, 0x5c, 0x7c, 0xcf, 0x34, 0x9b, 0x0f, - 0x95, 0x66, 0xeb, 0x81, 0xd2, 0x5c, 0x7a, 0xc8, 0x34, 0x7f, 0xb6, 0x0e, 0x90, 0xf5, 0x13, 0x32, - 0x05, 0xe5, 0xbd, 0x83, 0x8d, 0xcd, 0xc3, 0xbd, 0x6f, 0xb6, 0x67, 0x72, 0xa4, 0x02, 0xa5, 0x57, - 0xdb, 0x07, 0x5b, 0x7b, 0x07, 0x2f, 0xf5, 0x57, 0xd6, 0xce, 0x1e, 0x55, 0xf3, 0xfc, 0xda, 0x57, - 0x60, 0xe2, 0x57, 0x16, 0x79, 0x9e, 0x4e, 0x66, 0x6f, 0xfb, 0x28, 0x5d, 0x9c, 0xbb, 0x81, 0xea, - 0x56, 0xf7, 0x85, 0xd1, 0x5a, 0xb9, 0xf8, 0xa3, 0x96, 0xbb, 0xb8, 0xac, 0x19, 0xef, 0x2e, 0x6b, - 0xc6, 0xef, 0x97, 0x35, 0xe3, 0xed, 0x55, 0x2d, 0xf7, 0xee, 0xaa, 0x96, 0xfb, 0xf5, 0xaa, 0x96, - 0x7b, 0x5d, 0x4a, 0x3e, 0xc4, 0x8f, 0x2c, 0x7c, 0xdc, 0xfa, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0xa0, 0xc1, 0xf7, 0x10, 0xa0, 0x0b, 0x00, 0x00, + // 1058 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0xc7, + 0x13, 0xf7, 0x60, 0xcf, 0xd8, 0x53, 0xc6, 0x2c, 0xdb, 0x0b, 0x62, 0x60, 0xff, 0xf2, 0x20, 0x4b, + 0xfc, 0x45, 0xa2, 0xac, 0x1d, 0x81, 0x76, 0xa3, 0x3d, 0x45, 0x98, 0x8f, 0x05, 0x09, 0x91, 0x55, + 0x1b, 0xe5, 0xb0, 0x39, 0x38, 0x8d, 0x69, 0xcc, 0x28, 0xe3, 0x99, 0xd9, 0xee, 0x36, 0x11, 0x6f, + 0xb1, 0xe7, 0xbc, 0x48, 0x5e, 0x81, 0x5b, 0xf6, 0x98, 0x93, 0x93, 0xc0, 0x29, 0x3e, 0xe4, 0x19, + 0xa2, 0xae, 0x9e, 0xb1, 0x0d, 0x81, 0xb0, 0x9b, 0x90, 0xcb, 0x54, 0x77, 0xd5, 0xaf, 0x7a, 0xea, + 0xe3, 0xd7, 0x35, 0x03, 0x0b, 0xa2, 0x1f, 0x72, 0xd9, 0xc0, 0x67, 0x72, 0xd4, 0x10, 0x49, 0xa7, + 0x9e, 0x88, 0x58, 0xc5, 0xc4, 0x51, 0xa7, 0x2c, 0x8a, 0xe5, 0xd2, 0xa2, 0x54, 0xb1, 0xe0, 0x0d, + 0x7c, 0x26, 0x47, 0x0d, 0x75, 0x9e, 0x70, 0x69, 0x20, 0x99, 0x29, 0x64, 0x47, 0x3c, 0xbc, 0x61, + 0x9a, 0xeb, 0xc6, 0xdd, 0x18, 0x97, 0x0d, 0xbd, 0x4a, 0xb5, 0x7e, 0x37, 0x8e, 0xbb, 0x21, 0x6f, + 0xe0, 0xee, 0xa8, 0x7f, 0xd2, 0x50, 0x41, 0x8f, 0x4b, 0xc5, 0x7a, 0x89, 0x01, 0xd4, 0x7e, 0xb7, + 0x60, 0x9a, 0xea, 0x50, 0x28, 0x7f, 0xdb, 0xe7, 0x52, 0x91, 0x67, 0x50, 0xd0, 0xc7, 0x7a, 0xd6, + 0xb2, 0xb5, 0x3a, 0xb3, 0xb6, 0x58, 0x37, 0x41, 0xd5, 0x27, 0x31, 0xf5, 0xc3, 0xf3, 0x84, 0x53, + 0x84, 0x91, 0x6f, 0x60, 0x31, 0x61, 0x42, 0x05, 0x2c, 0x6c, 0x0b, 0x2e, 0x93, 0x38, 0x92, 0xbc, + 0x2d, 0x95, 0x60, 0x8a, 0x77, 0xcf, 0xbd, 0x29, 0x3c, 0xc3, 0xcf, 0xce, 0x78, 0x6d, 0x80, 0x34, + 0xc5, 0xb5, 0x52, 0x18, 0x5d, 0x48, 0x6e, 0x37, 0x90, 0x15, 0x98, 0xe9, 0x31, 0xd5, 0x39, 0xe5, + 0x42, 0x9f, 0x19, 0x44, 0x5d, 0x2f, 0xbf, 0x9c, 0x5f, 0x75, 0x69, 0x25, 0xd5, 0xb6, 0x50, 0x59, + 0xfb, 0x3f, 0x14, 0x74, 0x44, 0xa4, 0x08, 0xf9, 0x8d, 0xfd, 0xfd, 0xd9, 0x1c, 0x71, 0xc1, 0xde, + 0xd8, 0xdf, 0xa6, 0x87, 0xb3, 0x16, 0x01, 0x70, 0xe8, 0xf6, 0xe6, 0x57, 0x74, 0x6b, 0x76, 0xaa, + 0xf6, 0x2d, 0x54, 0xd2, 0x34, 0xcc, 0x7b, 0xc8, 0x27, 0x60, 0x77, 0x45, 0xdc, 0x4f, 0x30, 0xd9, + 0xf2, 0xda, 0xe3, 0xc9, 0x64, 0x5f, 0x69, 0xc3, 0x6e, 0x8e, 0x1a, 0x04, 0x59, 0x82, 0xe2, 0xf7, + 0x4c, 0x44, 0x3a, 0x06, 0x9d, 0x95, 0xbb, 0x9b, 0xa3, 0x99, 0xa2, 0x59, 0x02, 0x47, 0x70, 0xd9, + 0x0f, 0x55, 0x6d, 0x13, 0x60, 0xe4, 0x2b, 0xc9, 0x73, 0x70, 0xd0, 0x59, 0x7a, 0xd6, 0x72, 0xfe, + 0xd6, 0xf3, 0x9b, 0x30, 0x1c, 0xf8, 0x29, 0x88, 0xa6, 0xb2, 0xf6, 0x47, 0x1e, 0xdc, 0x11, 0x82, + 0xfc, 0x0f, 0x0a, 0x11, 0xeb, 0x99, 0x7e, 0xb8, 0xcd, 0xd2, 0x70, 0xe0, 0xe3, 0x9e, 0xe2, 0x53, + 0x5b, 0x4f, 0x82, 0x90, 0x9b, 0x98, 0x8c, 0x55, 0xef, 0x29, 0x3e, 0xc9, 0x33, 0xb0, 0x91, 0x66, + 0x58, 0xb6, 0xf2, 0xda, 0xf4, 0xe4, 0xfb, 0x9b, 0xee, 0x70, 0xe0, 0x1b, 0x33, 0x35, 0x82, 0xac, + 0x42, 0x29, 0x88, 0x14, 0x17, 0x67, 0x2c, 0xf4, 0x0a, 0xcb, 0xd6, 0xaa, 0xd5, 0x9c, 0x1e, 0x0e, + 0xfc, 0x91, 0x8e, 0x8e, 0x56, 0x84, 0xc2, 0x53, 0x7e, 0xc6, 0xc2, 0x3e, 0x53, 0x41, 0x1c, 0xb5, + 0x8f, 0xfb, 0xc2, 0x2c, 0x24, 0xef, 0xc4, 0xd1, 0xb1, 0xf4, 0x6c, 0x74, 0x26, 0xc3, 0x81, 0x3f, + 0x33, 0x86, 0x1d, 0x06, 0x3d, 0x4e, 0x17, 0xc7, 0xfb, 0xad, 0xd4, 0xab, 0x65, 0x9c, 0x48, 0x1b, + 0x1e, 0x85, 0x4c, 0xaa, 0xf6, 0x18, 0xe1, 0x39, 0xd8, 0x96, 0xa5, 0xba, 0x21, 0x71, 0x3d, 0x23, + 0x71, 0xfd, 0x30, 0x23, 0x71, 0x73, 0xe9, 0x62, 0xe0, 0xe7, 0xf4, 0x7b, 0xb4, 0xeb, 0xf6, 0xc8, + 0xf3, 0xdd, 0x2f, 0xbe, 0x45, 0x6f, 0xe8, 0x88, 0x0f, 0x76, 0x18, 0xf4, 0x02, 0xe5, 0xb9, 0xcb, + 0xd6, 0x6a, 0xde, 0xe4, 0x8f, 0x0a, 0x6a, 0x04, 0x39, 0x83, 0x85, 0x3b, 0x28, 0xea, 0x95, 0x3e, + 0x88, 0xc9, 0xcd, 0xa7, 0xc3, 0x81, 0x7f, 0x17, 0x9b, 0xe9, 0x5d, 0x87, 0xd7, 0x22, 0x28, 0xe8, + 0x8e, 0x90, 0xe7, 0xe0, 0x0a, 0xde, 0x89, 0xc5, 0xb1, 0x66, 0x99, 0xa1, 0xe4, 0xfc, 0xa8, 0x65, + 0x99, 0x41, 0x23, 0x77, 0x73, 0x74, 0x8c, 0x24, 0x2b, 0x60, 0xb3, 0x90, 0x0b, 0x85, 0x24, 0x28, + 0xaf, 0x55, 0x32, 0x97, 0x0d, 0xad, 0xd4, 0x0c, 0x46, 0xeb, 0x04, 0x4b, 0x7f, 0xcc, 0x43, 0x05, + 0x8d, 0x7b, 0x91, 0x54, 0x2c, 0xea, 0x70, 0xf2, 0x12, 0x1c, 0x9c, 0x29, 0xf2, 0xe6, 0x4d, 0x78, + 0xb3, 0xaf, 0xd5, 0x2d, 0xae, 0x9a, 0x33, 0x69, 0xa5, 0x53, 0x20, 0x4d, 0x25, 0xd9, 0x85, 0x32, + 0x8b, 0xa2, 0x58, 0x61, 0x8d, 0x65, 0x1a, 0xc3, 0x2d, 0xfe, 0x4f, 0x52, 0xff, 0x49, 0x34, 0x9d, + 0xdc, 0x90, 0x75, 0xb0, 0xa5, 0x62, 0x8a, 0x7b, 0x79, 0x2c, 0x36, 0xb9, 0x96, 0x47, 0x4b, 0x5b, + 0x4c, 0xcf, 0x10, 0x44, 0x8d, 0x20, 0x2d, 0x70, 0x59, 0x47, 0x05, 0x67, 0xbc, 0xcd, 0x14, 0x92, + 0xf6, 0x1e, 0xbe, 0x0c, 0x07, 0x3e, 0x31, 0x0e, 0x1b, 0xea, 0xb3, 0xb8, 0x17, 0x28, 0xde, 0x4b, + 0xd4, 0x39, 0xf2, 0xa5, 0x94, 0xe9, 0x35, 0x53, 0x34, 0x6d, 0x38, 0x12, 0xd9, 0x35, 0x6f, 0x45, + 0x05, 0x35, 0xe2, 0xef, 0x98, 0xe2, 0xfc, 0x97, 0x4c, 0xf9, 0xc9, 0x06, 0x1b, 0xcb, 0x31, 0x2e, + 0x96, 0xf5, 0x11, 0xc5, 0xca, 0x66, 0xc9, 0xd4, 0xad, 0xb3, 0xc4, 0x07, 0xfb, 0x6d, 0x9f, 0x8b, + 0x73, 0xac, 0x7f, 0x9a, 0x35, 0x2a, 0xa8, 0x11, 0xe4, 0x0b, 0x98, 0xfd, 0xcb, 0x55, 0x9f, 0x98, + 0x13, 0x99, 0x8d, 0x3e, 0x3a, 0xbe, 0x71, 0xb5, 0xc7, 0xf4, 0xb2, 0xff, 0x25, 0xbd, 0x9c, 0x7f, + 0x4e, 0xaf, 0x97, 0xe0, 0xe0, 0x45, 0x90, 0x5e, 0x11, 0xa7, 0xe1, 0xfc, 0xb5, 0x92, 0x65, 0x57, + 0xc1, 0x4c, 0x64, 0x03, 0xa4, 0xa9, 0x24, 0x35, 0x70, 0x4e, 0x39, 0x0b, 0xd5, 0x29, 0xce, 0x01, + 0xd7, 0x60, 0x8c, 0x86, 0xa6, 0x92, 0xbc, 0x00, 0x30, 0xe3, 0x4b, 0x88, 0x58, 0xe0, 0x88, 0x71, + 0x9b, 0x0b, 0xc3, 0x81, 0xff, 0x04, 0xa7, 0x90, 0x56, 0x8e, 0xe9, 0x46, 0xdd, 0x91, 0xf2, 0xbe, + 0x51, 0x0a, 0x0f, 0x34, 0x4a, 0xcb, 0x0f, 0x3a, 0x4a, 0x77, 0x61, 0xe1, 0x3b, 0xce, 0x93, 0xf6, + 0x49, 0xa0, 0x3f, 0xc0, 0xed, 0x93, 0x58, 0x8c, 0x02, 0x9e, 0xc6, 0x80, 0x1f, 0x0f, 0x07, 0x7e, + 0x45, 0x43, 0x76, 0x10, 0xb1, 0x13, 0x0b, 0x3a, 0x77, 0x6d, 0x9b, 0x86, 0x5a, 0xfb, 0x21, 0x0f, + 0x95, 0x6b, 0xb3, 0xed, 0x9e, 0x0f, 0xde, 0x88, 0xa4, 0x53, 0x77, 0x90, 0x74, 0xcc, 0xb5, 0xfc, + 0xc7, 0x72, 0x6d, 0xdc, 0xe6, 0xc2, 0x07, 0xb6, 0xd9, 0x7e, 0xa8, 0x36, 0x3b, 0x0f, 0xd4, 0xe6, + 0xe2, 0x43, 0xb6, 0xf9, 0xd3, 0x75, 0x80, 0xf1, 0x3c, 0x21, 0xd3, 0x50, 0xda, 0x3b, 0xd8, 0xd8, + 0x3c, 0xdc, 0xfb, 0x7a, 0x7b, 0x36, 0x47, 0xca, 0x50, 0x7c, 0xbd, 0x7d, 0xb0, 0xb5, 0x77, 0xf0, + 0xca, 0xfc, 0x65, 0xed, 0xec, 0x51, 0xbd, 0x9e, 0x5a, 0xfb, 0x12, 0x6c, 0xfc, 0xcb, 0x22, 0x2f, + 0xb2, 0xc5, 0xdc, 0x6d, 0x3f, 0x91, 0x4b, 0xf3, 0x37, 0xb4, 0x66, 0xd4, 0x7d, 0x6e, 0x35, 0x57, + 0x2e, 0x7e, 0xab, 0xe6, 0x2e, 0x2e, 0xab, 0xd6, 0xfb, 0xcb, 0xaa, 0xf5, 0xeb, 0x65, 0xd5, 0x7a, + 0x77, 0x55, 0xcd, 0xbd, 0xbf, 0xaa, 0xe6, 0x7e, 0xbe, 0xaa, 0xe6, 0xde, 0x14, 0xd3, 0x1f, 0xe7, + 0x23, 0x07, 0x93, 0x5b, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xed, 0x6d, 0x27, 0x50, 0x0b, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -760,33 +755,6 @@ func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.File) > 0 { - for iNdEx := len(m.File) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.File[iNdEx]) - copy(dAtA[i:], m.File[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.File[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.RuleGroup) > 0 { - for iNdEx := len(m.RuleGroup) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RuleGroup[iNdEx]) - copy(dAtA[i:], m.RuleGroup[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleGroup[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.RuleName) > 0 { - for iNdEx := len(m.RuleName) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RuleName[iNdEx]) - copy(dAtA[i:], m.RuleName[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleName[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } if len(m.MatcherString) > 0 { for iNdEx := len(m.MatcherString) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.MatcherString[iNdEx]) @@ -1358,24 +1326,6 @@ func (m *RulesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if len(m.RuleName) > 0 { - for _, s := range m.RuleName { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.RuleGroup) > 0 { - for _, s := range m.RuleGroup { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.File) > 0 { - for _, s := range m.File { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } return n } @@ -1714,102 +1664,6 @@ func (m *RulesRequest) Unmarshal(dAtA []byte) error { } m.MatcherString = append(m.MatcherString, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuleName = append(m.RuleName, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuleGroup = append(m.RuleGroup, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.File = append(m.File, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto index f5fc8a038b..25d809ede9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto @@ -41,9 +41,6 @@ message RulesRequest { Type type = 1; PartialResponseStrategy partial_response_strategy = 2; repeated string matcher_string = 3; - repeated string rule_name = 4; - repeated string rule_group = 5; - repeated string file = 6; } message RulesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index 9aaeeca615..809dfce36b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -3,7 +3,7 @@ // Package runutil provides helpers to advanced function scheduling control like repeat or retry. // -// It's very often the case when you need to executes some code every fixed intervals or have it retried automatically. +// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. // To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. // Below function does it for you. // diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 865ea3878d..75a85dd9fb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -42,11 +42,11 @@ import ( "google.golang.org/grpc/status" "github.com/thanos-io/objstore" - "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/indexheader" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" + "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" "github.com/thanos-io/thanos/pkg/info/infopb" @@ -117,9 +117,6 @@ const ( // SeriesBatchSize is the default batch size when fetching series from object storage. SeriesBatchSize = 10000 - - // checkContextEveryNIterations is used in some tight loops to check if the context is done. - checkContextEveryNIterations = 128 ) var ( @@ -381,7 +378,7 @@ type BucketStore struct { indexCache storecache.IndexCache indexReaderPool *indexheader.ReaderPool buffers sync.Pool - chunkPool pool.Pool[byte] + chunkPool pool.Bytes seriesBatchSize int // Sets of blocks that have the same labels. They are indexed by a hash over their label set. @@ -501,7 +498,7 @@ func WithQueryGate(queryGate gate.Gate) BucketStoreOption { } // WithChunkPool sets a pool.Bytes to use for chunks. -func WithChunkPool(chunkPool pool.Pool[byte]) BucketStoreOption { +func WithChunkPool(chunkPool pool.Bytes) BucketStoreOption { return func(s *BucketStore) { s.chunkPool = chunkPool } @@ -597,7 +594,7 @@ func NewBucketStore( b := make([]byte, 0, initialBufSize) return &b }}, - chunkPool: pool.NoopPool[byte]{}, + chunkPool: pool.NoopBytes{}, blocks: map[ulid.ULID]*bucketBlock{}, blockSets: map[uint64]*bucketBlockSet{}, blockSyncConcurrency: blockSyncConcurrency, @@ -950,6 +947,19 @@ func (s *BucketStore) LabelSet() []labelpb.ZLabelSet { return labelSets } +// Info implements the storepb.StoreServer interface. +func (s *BucketStore) Info(context.Context, *storepb.InfoRequest) (*storepb.InfoResponse, error) { + mint, maxt := s.TimeRange() + res := &storepb.InfoResponse{ + StoreType: component.Store.ToProto(), + MinTime: mint, + MaxTime: maxt, + LabelSets: s.LabelSet(), + } + + return res, nil +} + func (s *BucketStore) limitMinTime(mint int64) int64 { if s.filterConfig == nil { return mint @@ -995,7 +1005,6 @@ type blockSeriesClient struct { mint int64 maxt int64 - seriesLimit int indexr *bucketIndexReader chunkr *bucketChunkReader loadAggregates []storepb.Aggr @@ -1071,7 +1080,6 @@ func newBlockSeriesClient( mint: req.MinTime, maxt: req.MaxTime, - seriesLimit: int(req.Limit), indexr: b.indexReader(logger), chunkr: chunkr, seriesLimiter: seriesLimiter, @@ -1151,20 +1159,14 @@ func (b *blockSeriesClient) ExpandPostings( b.expandedPostings = make([]storage.SeriesRef, 0, len(b.lazyPostings.postings)/2) b.lazyExpandedPostingsCount.Inc() } else { - // If seriesLimit is set, it can be applied here to limit the amount of series. - // Note: This can only be done when postings are not expanded lazily. - if b.seriesLimit > 0 && len(b.lazyPostings.postings) > b.seriesLimit { - b.lazyPostings.postings = b.lazyPostings.postings[:b.seriesLimit] - } - // Apply series limiter eargerly if lazy postings not enabled. - if err := seriesLimiter.Reserve(uint64(len(b.lazyPostings.postings))); err != nil { + if err := seriesLimiter.Reserve(uint64(len(ps.postings))); err != nil { return httpgrpc.Errorf(int(codes.ResourceExhausted), "exceeded series limit: %s", err) } } - if b.batchSize > len(b.lazyPostings.postings) { - b.batchSize = len(b.lazyPostings.postings) + if b.batchSize > len(ps.postings) { + b.batchSize = len(ps.postings) } b.entries = make([]seriesEntry, 0, b.batchSize) @@ -1286,11 +1288,6 @@ OUTER: } seriesMatched++ - if b.seriesLimit > 0 && seriesMatched > b.seriesLimit { - // Exit early if seriesLimit is set. - b.hasMorePostings = false - break - } s := seriesEntry{lset: completeLabelset} if b.skipChunks { b.entries = append(b.entries, s) @@ -1694,12 +1691,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store tracing.DoInSpan(ctx, "bucket_store_merge_all", func(ctx context.Context) { begin := time.Now() set := NewResponseDeduplicator(NewProxyResponseLoserTree(respSets...)) - i := 0 for set.Next() { - i++ - if req.Limit > 0 && i > int(req.Limit) { - break - } at := set.At() warn := at.GetWarning() if warn != "" { @@ -1855,7 +1847,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq } }) - result = strutil.MergeSlices(int(req.Limit), res, extRes) + result = strutil.MergeSlices(res, extRes) } else { seriesReq := &storepb.SeriesRequest{ MinTime: req.Start, @@ -1950,10 +1942,8 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label names response hints").Error()) } - names := strutil.MergeSlices(int(req.Limit), sets...) - return &storepb.LabelNamesResponse{ - Names: names, + Names: strutil.MergeSlices(sets...), Hints: anyHints, }, nil } @@ -1967,7 +1957,7 @@ func (b *bucketBlock) FilterExtLabelsMatchers(matchers []*labels.Matcher) ([]*la // If value is empty string the matcher is a valid one since it's not part of external labels. if v == "" { result = append(result, m) - } else if v != "" && !m.Matches(v) { + } else if v != "" && v != m.Value { // If matcher is external label but value is different we don't want to look in block anyway. return []*labels.Matcher{}, false } @@ -2069,7 +2059,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR // Add the external label value as well. if extLabelValue := b.extLset.Get(req.Label); extLabelValue != "" { - res = strutil.MergeSlices(int(req.Limit), res, []string{extLabelValue}) + res = strutil.MergeSlices(res, []string{extLabelValue}) } result = res } else { @@ -2167,10 +2157,8 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label values response hints").Error()) } - vals := strutil.MergeSlices(int(req.Limit), sets...) - return &storepb.LabelValuesResponse{ - Values: vals, + Values: strutil.MergeSlices(sets...), Hints: anyHints, }, nil } @@ -2318,7 +2306,7 @@ type bucketBlock struct { meta *metadata.Meta dir string indexCache storecache.IndexCache - chunkPool pool.Pool[byte] + chunkPool pool.Bytes extLset labels.Labels indexHeaderReader indexheader.Reader @@ -2344,7 +2332,7 @@ func newBucketBlock( bkt objstore.BucketReader, dir string, indexCache storecache.IndexCache, - chunkPool pool.Pool[byte], + chunkPool pool.Bytes, indexHeadReader indexheader.Reader, p Partitioner, maxSeriesSizeFunc BlockEstimator, @@ -2617,15 +2605,10 @@ func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatch } // ExpandPostingsWithContext returns the postings expanded as a slice and considers context. -func ExpandPostingsWithContext(ctx context.Context, p index.Postings) ([]storage.SeriesRef, error) { - res := make([]storage.SeriesRef, 0, 1024) // Pre-allocate slice with initial capacity - i := 0 +func ExpandPostingsWithContext(ctx context.Context, p index.Postings) (res []storage.SeriesRef, err error) { for p.Next() { - i++ - if i%checkContextEveryNIterations == 0 { - if err := ctx.Err(); err != nil { - return nil, err - } + if ctx.Err() != nil { + return nil, ctx.Err() } res = append(res, p.At()) } @@ -2848,8 +2831,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er return nil, nil, err } - for i, val := range vals { - if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { + for _, val := range vals { + if ctx.Err() != nil { return nil, nil, ctx.Err() } if !m.Matches(val) { @@ -2877,8 +2860,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er } var toAdd []string - for i, val := range vals { - if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { + for _, val := range vals { + if ctx.Err() != nil { return nil, nil, ctx.Err() } if m.Matches(val) { @@ -2981,10 +2964,8 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab // If we have a miss, mark key to be fetched in `ptrs` slice. // Overlaps are well handled by partitioner, so we don't need to deduplicate keys. for ix, key := range keys { - if (ix+1)%checkContextEveryNIterations == 0 { - if err := ctx.Err(); err != nil { - return nil, closeFns, err - } + if err := ctx.Err(); err != nil { + return nil, closeFns, err } // Get postings for the given key from cache first. if b, ok := fromCache[key]; ok { @@ -3586,10 +3567,10 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a bufPooled, err := r.block.chunkPool.Get(r.block.estimatedMaxChunkSize) if err == nil { buf = *bufPooled - defer r.block.chunkPool.Put(&buf) } else { buf = make([]byte, r.block.estimatedMaxChunkSize) } + defer r.block.chunkPool.Put(&buf) for i, pIdx := range pIdxs { // Fast forward range reader to the next chunk start in case of sparse (for our purposes) byte range. @@ -3865,6 +3846,6 @@ func (s *queryStats) toHints() *hintspb.QueryStats { } // NewDefaultChunkBytesPool returns a chunk bytes pool with default settings. -func NewDefaultChunkBytesPool(maxChunkPoolBytes uint64) (pool.Pool[byte], error) { - return pool.NewBucketedPool[byte](chunkBytesPoolMinSize, chunkBytesPoolMaxSize, 2, maxChunkPoolBytes) +func NewDefaultChunkBytesPool(maxChunkPoolBytes uint64) (pool.Bytes, error) { + return pool.NewBucketedBytes(chunkBytesPoolMinSize, chunkBytesPoolMaxSize, 2, maxChunkPoolBytes) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go index 3a8ddbb86d..42e6de55a7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go @@ -30,12 +30,7 @@ var ( } ) -const ( - maxInt = int(^uint(0) >> 1) - - // checkContextEveryNIterations is used in some tight loops to check if the context is done. - checkContextEveryNIterations = 128 -) +const maxInt = int(^uint(0) >> 1) type InMemoryIndexCache struct { mtx sync.Mutex @@ -307,13 +302,11 @@ func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID uli blockIDKey := blockID.String() requests := 0 hit := 0 - for i, key := range keys { - if (i+1)%checkContextEveryNIterations == 0 { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) - return hits, misses - } + for _, key := range keys { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) + return hits, misses } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeyPostings(key), ""}); ok { @@ -370,13 +363,11 @@ func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid. blockIDKey := blockID.String() requests := 0 hit := 0 - for i, id := range ids { - if (i+1)%checkContextEveryNIterations == 0 { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) - return hits, misses - } + for _, id := range ids { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) + return hits, misses } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeySeries(id), ""}); ok { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go index 38a0f61822..a72ce0d664 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go @@ -66,8 +66,8 @@ func (c *TracingIndexCache) FetchExpandedPostings(ctx context.Context, blockID u return data, exists } -// StoreSeries stores a single series. Skip instrumenting this method -// excessive spans as a single request can store millions of series. +// StoreSeries stores a single series. Skip intrumenting this method +// excessive spans as a single request can store millions of serieses. func (c *TracingIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { c.cache.StoreSeries(blockID, id, v, tenant) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/io.go b/vendor/github.com/thanos-io/thanos/pkg/store/io.go index f2356e6759..657f3134d2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/io.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/io.go @@ -109,7 +109,7 @@ func readByteRanges(src io.Reader, dst []byte, byteRanges byteRanges) ([]byte, e if err != nil { // We get an ErrUnexpectedEOF if EOF is reached before we fill the slice. // Due to how the reading logic works in the bucket store, we may try to overread - // the last byte range so, if the error occurs on the last one, we consider it legit. + // the last byte range so, if the error occurrs on the last one, we consider it legit. if err == io.ErrUnexpectedEOF && idx == len(byteRanges)-1 { return dst, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go index f8363ab477..1858b7dee4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go @@ -308,8 +308,8 @@ func fetchAndExpandPostingGroups(ctx context.Context, r *bucketIndexReader, post result := index.Without(index.Intersect(groupAdds...), index.Merge(ctx, groupRemovals...)) - if err := ctx.Err(); err != nil { - return nil, nil, err + if ctx.Err() != nil { + return nil, nil, ctx.Err() } ps, err := ExpandPostingsWithContext(ctx, result) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/local.go b/vendor/github.com/thanos-io/thanos/pkg/store/local.go index cb80f8f8cb..4e88c0a7e3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/local.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/local.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "io" + "math" "sort" "github.com/go-kit/log" @@ -32,7 +33,8 @@ type LocalStore struct { logger log.Logger extLabels labels.Labels - c io.Closer + info *storepb.InfoResponse + c io.Closer // TODO(bwplotka): This is very naive in-memory DB. We can support much larger files, by // indexing labels, symbolizing strings and get chunk refs only without storing protobufs in memory. @@ -65,6 +67,14 @@ func NewLocalStoreFromJSONMmappableFile( logger: logger, extLabels: extLabels, c: f, + info: &storepb.InfoResponse{ + LabelSets: []labelpb.ZLabelSet{ + {Labels: labelpb.ZLabelsFromPromLabels(extLabels)}, + }, + StoreType: component.ToProto(), + MinTime: math.MaxInt64, + MaxTime: math.MinInt64, + }, } // Do quick pass for in-mem index. @@ -91,7 +101,13 @@ func NewLocalStoreFromJSONMmappableFile( } chks := make([]int, 0, len(series.Chunks)) // Sort chunks in separate slice by MinTime for easier lookup. Find global max and min. - for ci := range series.Chunks { + for ci, c := range series.Chunks { + if s.info.MinTime > c.MinTime { + s.info.MinTime = c.MinTime + } + if s.info.MaxTime < c.MaxTime { + s.info.MaxTime = c.MaxTime + } chks = append(chks, ci) } @@ -105,7 +121,7 @@ func NewLocalStoreFromJSONMmappableFile( if err := skanner.Err(); err != nil { return nil, errors.Wrapf(err, "scanning file %s", path) } - level.Info(logger).Log("msg", "loading JSON file succeeded", "file", path, "series", len(s.series)) + level.Info(logger).Log("msg", "loading JSON file succeeded", "file", path, "info", s.info.String(), "series", len(s.series)) return s, nil } @@ -127,6 +143,11 @@ func ScanGRPCCurlProtoStreamMessages(data []byte, atEOF bool) (advance int, toke return len(delim), nil, nil } +// Info returns store information about the Prometheus instance. +func (s *LocalStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + return s.info, nil +} + // Series returns all series for a requested time range and label matcher. The returned data may // exceed the requested time bounds. func (s *LocalStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go index e5e472b675..f1f89fbd44 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/index" - extsnappy "github.com/thanos-io/thanos/pkg/extgrpc/snappy" "github.com/thanos-io/thanos/pkg/pool" ) @@ -193,7 +192,7 @@ func maximumDecodedLenSnappyStreamed(in []byte) (int, error) { return maxDecodedLen, nil } -var decodedBufPool = pool.MustNewBucketedPool[byte](1024, 65536, 2, 0) +var decodedBufPool = pool.MustNewBucketedBytes(1024, 65536, 2, 0) func newStreamedDiffVarintPostings(input []byte, disablePooling bool) (closeablePostings, error) { // We can't use the regular s2.Reader because it assumes a stream. @@ -450,7 +449,7 @@ func diffVarintEncodeNoHeader(p index.Postings, length int) ([]byte, error) { } // Creating 15 buckets from 1k to 32mb. -var snappyDecodePool = pool.MustNewBucketedPool[byte](1024, 32*1024*1024, 2, 0) +var snappyDecodePool = pool.MustNewBucketedBytes(1024, 32*1024*1024, 2, 0) type closeablePostings interface { index.Postings diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 11d7f1ff77..721e9ed51e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -24,7 +24,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -110,6 +109,31 @@ func (p *PrometheusStore) labelCallsSupportMatchers() bool { return parseErr == nil && version.GTE(baseVer) } +// Info returns store information about the Prometheus instance. +// NOTE(bwplotka): MaxTime & MinTime are not accurate nor adjusted dynamically. +// This is fine for now, but might be needed in future. +func (p *PrometheusStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + lset := p.externalLabelsFn() + mint, maxt := p.timestamps() + + res := &storepb.InfoResponse{ + Labels: labelpb.ZLabelsFromPromLabels(lset), + StoreType: p.component.ToProto(), + MinTime: mint, + MaxTime: maxt, + } + + // Until we deprecate the single labels in the reply, we just duplicate + // them here for migration/compatibility purposes. + res.LabelSets = []labelpb.ZLabelSet{} + if len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{ + Labels: res.Labels, + }) + } + return res, nil +} + func (p *PrometheusStore) getBuffer() *[]byte { b := p.buffers.Get() return b.(*[]byte) @@ -149,7 +173,7 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Sto if r.SkipChunks { finalExtLset := rmLabels(extLset.Copy(), extLsetToRemove) - labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime, int(r.Limit)) + labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime) if err != nil { return err } @@ -296,7 +320,7 @@ func (p *PrometheusStore) handleStreamedPrometheusResponse( seriesStats := &storepb.SeriesStatsCounter{} // TODO(bwplotka): Put read limit as a flag. - stream := remote.NewChunkedReader(bodySizer, config.DefaultChunkedReadLimit, *data) + stream := remote.NewChunkedReader(bodySizer, remote.DefaultChunkedReadLimit, *data) hasher := hashPool.Get().(hash.Hash64) defer hashPool.Put(hasher) for { @@ -547,12 +571,12 @@ func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesR var lbls []string if len(matchers) == 0 || p.labelCallsSupportMatchers() { - lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } } else { - sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } @@ -618,7 +642,7 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue if len(matchers) == 0 { return &storepb.LabelValuesResponse{Values: []string{val}}, nil } - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } @@ -629,12 +653,12 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue } if len(matchers) == 0 || p.labelCallsSupportMatchers() { - vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End, int(r.Limit)) + vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End) if err != nil { return nil, err } } else { - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index 498c80e2e7..0ac1fc659c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -71,9 +71,6 @@ type Client interface { // Addr returns address of the store client. If second parameter is true, the client // represents a local client (server-as-client) and has no remote address. Addr() (addr string, isLocalClient bool) - - // Matches returns true if provided label matchers are allowed in the store. - Matches(matches []*labels.Matcher) bool } // ProxyStore implements the store API that proxies request to all given underlying stores. @@ -89,7 +86,6 @@ type ProxyStore struct { retrievalStrategy RetrievalStrategy debugLogging bool tsdbSelector *TSDBSelector - enableDedup bool } type proxyStoreMetrics struct { @@ -130,13 +126,6 @@ func WithTSDBSelector(selector *TSDBSelector) ProxyStoreOption { } } -// WithoutDedup disabled chunk deduplication when streaming series. -func WithoutDedup() ProxyStoreOption { - return func(s *ProxyStore) { - s.enableDedup = false - } -} - // NewProxyStore returns a new ProxyStore that uses the given clients that implements storeAPI to fan-in all series to the client. // Note that there is no deduplication support. Deduplication should be done on the highest level (just before PromQL). func NewProxyStore( @@ -167,7 +156,6 @@ func NewProxyStore( metrics: metrics, retrievalStrategy: retrievalStrategy, tsdbSelector: DefaultSelector, - enableDedup: true, } for _, option := range options { @@ -177,6 +165,62 @@ func NewProxyStore( return s } +// Info returns store information about the external labels this store have. +func (s *ProxyStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + res := &storepb.InfoResponse{ + StoreType: s.component.ToProto(), + Labels: labelpb.ZLabelsFromPromLabels(s.selectorLabels), + } + + minTime := int64(math.MaxInt64) + maxTime := int64(0) + stores := s.stores() + + // Edge case: we have no data if there are no stores. + if len(stores) == 0 { + res.MaxTime = 0 + res.MinTime = 0 + + return res, nil + } + + for _, s := range stores { + mint, maxt := s.TimeRange() + if mint < minTime { + minTime = mint + } + if maxt > maxTime { + maxTime = maxt + } + } + + res.MaxTime = maxTime + res.MinTime = minTime + + labelSets := make(map[uint64]labelpb.ZLabelSet, len(stores)) + for _, st := range stores { + for _, lset := range st.LabelSets() { + mergedLabelSet := labelpb.ExtendSortedLabels(lset, s.selectorLabels) + labelSets[mergedLabelSet.Hash()] = labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(mergedLabelSet)} + } + } + + res.LabelSets = make([]labelpb.ZLabelSet, 0, len(labelSets)) + for _, v := range labelSets { + res.LabelSets = append(res.LabelSets, v) + } + + // We always want to enforce announcing the subset of data that + // selector-labels represents. If no label-sets are announced by the + // store-proxy's discovered stores, then we still want to enforce + // announcing this subset by announcing the selector as the label-set. + if len(res.LabelSets) == 0 && len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{Labels: res.Labels}) + } + + return res, nil +} + func (s *ProxyStore) LabelSet() []labelpb.ZLabelSet { stores := s.stores() if len(stores) == 0 { @@ -242,7 +286,7 @@ func (s *ProxyStore) TSDBInfos() []infopb.TSDBInfo { func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // triggered by tracing span to reduce cognitive load. + // tiggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -283,7 +327,6 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. r := &storepb.SeriesRequest{ MinTime: originalRequest.MinTime, MaxTime: originalRequest.MaxTime, - Limit: originalRequest.Limit, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), Aggregates: originalRequest.Aggregates, MaxResolutionWindow: originalRequest.MaxResolutionWindow, @@ -319,17 +362,8 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. level.Debug(reqLogger).Log("msg", "Series: started fanout streams", "status", strings.Join(storeDebugMsgs, ";")) - var respHeap seriesStream = NewProxyResponseLoserTree(storeResponses...) - if s.enableDedup { - respHeap = NewResponseDeduplicator(respHeap) - } - - i := 0 + respHeap := NewResponseDeduplicator(NewProxyResponseLoserTree(storeResponses...)) for respHeap.Next() { - i++ - if r.Limit > 0 && i > int(r.Limit) { - break - } resp := respHeap.At() if resp.GetWarning() != "" && (r.PartialResponseDisabled || r.PartialResponseStrategy == storepb.PartialResponseStrategy_ABORT) { @@ -337,7 +371,6 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. } if err := srv.Send(resp); err != nil { - level.Error(reqLogger).Log("msg", "failed to stream response", "error", err) return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) } } @@ -348,7 +381,7 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. // LabelNames returns all known label names. func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // triggered by tracing span to reduce cognitive load. + // tiggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -386,7 +419,6 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, - Hints: originalRequest.Hints, } var ( @@ -433,10 +465,8 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La return nil, err } - result := strutil.MergeUnsortedSlices(int(originalRequest.Limit), names...) - return &storepb.LabelNamesResponse{ - Names: result, + Names: strutil.MergeUnsortedSlices(names...), Warnings: warnings, }, nil } @@ -446,7 +476,7 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L *storepb.LabelValuesResponse, error, ) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // triggered by tracing span to reduce cognitive load. + // tiggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -490,7 +520,6 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, - Limit: originalRequest.Limit, } var ( @@ -538,10 +567,8 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L return nil, err } - vals := strutil.MergeUnsortedSlices(int(originalRequest.Limit), all...) - return &storepb.LabelValuesResponse{ - Values: vals, + Values: strutil.MergeUnsortedSlices(all...), Warnings: warnings, }, nil } @@ -565,32 +592,26 @@ func (s *ProxyStore) matchingStores(ctx context.Context, minTime, maxTime int64, ) for _, st := range s.stores() { // We might be able to skip the store if its meta information indicates it cannot have series matching our query. - if ok, reason := storeMatches(ctx, s.debugLogging, st, minTime, maxTime, matchers...); !ok { - if s.debugLogging { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, reason)) - } + if ok, reason := storeMatches(ctx, st, minTime, maxTime, matchers...); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, reason)) continue } matches, extraMatchers := s.tsdbSelector.MatchLabelSets(st.LabelSets()...) if !matches { - if s.debugLogging { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, "tsdb selector")) - } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, "tsdb selector")) continue } storeLabelSets = append(storeLabelSets, extraMatchers...) stores = append(stores, st) - if s.debugLogging { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) - } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) } return stores, storeLabelSets, storeDebugMsgs } // storeMatches returns boolean if the given store may hold data for the given label matchers, time ranges and debug store matches gathered from context. -func storeMatches(ctx context.Context, debugLogging bool, s Client, mint, maxt int64, matchers ...*labels.Matcher) (ok bool, reason string) { +func storeMatches(ctx context.Context, s Client, mint, maxt int64, matchers ...*labels.Matcher) (ok bool, reason string) { var storeDebugMatcher [][]*labels.Matcher if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { if value, ok := ctxVal.([][]*labels.Matcher); ok { @@ -600,35 +621,22 @@ func storeMatches(ctx context.Context, debugLogging bool, s Client, mint, maxt i storeMinTime, storeMaxTime := s.TimeRange() if mint > storeMaxTime || maxt < storeMinTime { - const s string = "does not have data within this time period" - if debugLogging { - return false, fmt.Sprintf("%s: [%v,%v]. Store time ranges: [%v,%v]", s, mint, maxt, storeMinTime, storeMaxTime) - } - return false, s + return false, fmt.Sprintf("does not have data within this time period: [%v,%v]. Store time ranges: [%v,%v]", mint, maxt, storeMinTime, storeMaxTime) } - if ok, reason := storeMatchDebugMetadata(s, debugLogging, storeDebugMatcher); !ok { + if ok, reason := storeMatchDebugMetadata(s, storeDebugMatcher); !ok { return false, reason } extLset := s.LabelSets() if !LabelSetsMatch(matchers, extLset...) { - const s string = "external labels does not match request label matchers" - if debugLogging { - return false, fmt.Sprintf("external labels %v does not match request label matchers: %v", extLset, matchers) - } - return false, s - } - - if !s.Matches(matchers) { - return false, fmt.Sprintf("store does not match filter for matchers: %v", matchers) + return false, fmt.Sprintf("external labels %v does not match request label matchers: %v", extLset, matchers) } - return true, "" } // storeMatchDebugMetadata return true if the store's address match the storeDebugMatchers. -func storeMatchDebugMetadata(s Client, debugLogging bool, storeDebugMatchers [][]*labels.Matcher) (ok bool, reason string) { +func storeMatchDebugMetadata(s Client, storeDebugMatchers [][]*labels.Matcher) (ok bool, reason string) { if len(storeDebugMatchers) == 0 { return true, "" } @@ -643,11 +651,7 @@ func storeMatchDebugMetadata(s Client, debugLogging bool, storeDebugMatchers [][ match = match || LabelSetsMatch(sm, labels.FromStrings("__address__", addr)) } if !match { - const s string = "__address__ does not match debug store metadata matchers" - if debugLogging { - return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", addr, storeDebugMatchers) - } - return false, s + return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", addr, storeDebugMatchers) } return true, "" } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go index 29d1e6560a..e2764d574a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go @@ -26,13 +26,8 @@ import ( "github.com/thanos-io/thanos/pkg/tracing" ) -type seriesStream interface { - Next() bool - At() *storepb.SeriesResponse -} - type responseDeduplicator struct { - h seriesStream + h *losertree.Tree[*storepb.SeriesResponse, respSet] bufferedSameSeries []*storepb.SeriesResponse @@ -41,23 +36,20 @@ type responseDeduplicator struct { prev *storepb.SeriesResponse ok bool - - chunkDedupMap map[uint64]storepb.AggrChunk } // NewResponseDeduplicator returns a wrapper around a loser tree that merges duplicated series messages into one. // It also deduplicates identical chunks identified by the same checksum from each series message. -func NewResponseDeduplicator(h seriesStream) *responseDeduplicator { +func NewResponseDeduplicator(h *losertree.Tree[*storepb.SeriesResponse, respSet]) *responseDeduplicator { ok := h.Next() var prev *storepb.SeriesResponse if ok { prev = h.At() } return &responseDeduplicator{ - h: h, - ok: ok, - prev: prev, - chunkDedupMap: make(map[uint64]storepb.AggrChunk), + h: h, + ok: ok, + prev: prev, } } @@ -81,7 +73,7 @@ func (d *responseDeduplicator) Next() bool { d.ok = d.h.Next() if !d.ok { if len(d.bufferedSameSeries) > 0 { - d.bufferedResp = append(d.bufferedResp, d.chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) + d.bufferedResp = append(d.bufferedResp, chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) } return len(d.bufferedResp) > 0 } @@ -109,15 +101,15 @@ func (d *responseDeduplicator) Next() bool { continue } - d.bufferedResp = append(d.bufferedResp, d.chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) + d.bufferedResp = append(d.bufferedResp, chainSeriesAndRemIdenticalChunks(d.bufferedSameSeries)) d.prev = s return true } } -func (d *responseDeduplicator) chainSeriesAndRemIdenticalChunks(series []*storepb.SeriesResponse) *storepb.SeriesResponse { - clear(d.chunkDedupMap) +func chainSeriesAndRemIdenticalChunks(series []*storepb.SeriesResponse) *storepb.SeriesResponse { + chunkDedupMap := map[uint64]*storepb.AggrChunk{} for _, s := range series { for _, chk := range s.GetSeries().Chunks { @@ -132,9 +124,9 @@ func (d *responseDeduplicator) chainSeriesAndRemIdenticalChunks(series []*storep hash = xxhash.Sum64(field.Data) } - if _, ok := d.chunkDedupMap[hash]; !ok { + if _, ok := chunkDedupMap[hash]; !ok { chk := chk - d.chunkDedupMap[hash] = chk + chunkDedupMap[hash] = &chk break } } @@ -142,13 +134,13 @@ func (d *responseDeduplicator) chainSeriesAndRemIdenticalChunks(series []*storep } // If no chunks were requested. - if len(d.chunkDedupMap) == 0 { + if len(chunkDedupMap) == 0 { return series[0] } - finalChunks := make([]storepb.AggrChunk, 0, len(d.chunkDedupMap)) - for _, chk := range d.chunkDedupMap { - finalChunks = append(finalChunks, chk) + finalChunks := make([]storepb.AggrChunk, 0, len(chunkDedupMap)) + for _, chk := range chunkDedupMap { + finalChunks = append(finalChunks, *chk) } sort.Slice(finalChunks, func(i, j int) bool { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go index d5461a5947..faed79bc7b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -329,10 +329,7 @@ func (m *Chunk) Compare(b *Chunk) int { func (x *PartialResponseStrategy) UnmarshalJSON(entry []byte) error { fieldStr, err := strconv.Unquote(string(entry)) if err != nil { - return errors.Wrapf( - err, - "failed to unqote %v, in order to unmarshal as 'partial_response_strategy'. Possible values are %s", string(entry), strings.Join(PartialResponseStrategyValues, ","), - ) + return errors.Wrapf(err, fmt.Sprintf("failed to unqote %v, in order to unmarshal as 'partial_response_strategy'. Possible values are %s", string(entry), strings.Join(PartialResponseStrategyValues, ","))) } if fieldStr == "" { @@ -343,11 +340,7 @@ func (x *PartialResponseStrategy) UnmarshalJSON(entry []byte) error { strategy, ok := PartialResponseStrategy_value[strings.ToUpper(fieldStr)] if !ok { - return errors.Errorf( - "failed to unmarshal %v as 'partial_response_strategy'. Possible values are %s", - string(entry), - strings.Join(PartialResponseStrategyValues, ","), - ) + return errors.Errorf(fmt.Sprintf("failed to unmarshal %v as 'partial_response_strategy'. Possible values are %s", string(entry), strings.Join(PartialResponseStrategyValues, ","))) } *x = PartialResponseStrategy(strategy) return nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go index a5b792bca1..e09210d442 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go @@ -6,97 +6,99 @@ package storepb import ( "context" "io" - "iter" "google.golang.org/grpc" ) -type inProcessServer struct { - Store_SeriesServer - ctx context.Context - yield func(response *SeriesResponse, err error) bool +func ServerAsClient(srv StoreServer) StoreClient { + return &serverAsClient{srv: srv} } -func newInProcessServer(ctx context.Context, yield func(*SeriesResponse, error) bool) *inProcessServer { - return &inProcessServer{ - ctx: ctx, - yield: yield, - } +// serverAsClient allows to use servers as clients. +// NOTE: Passing CallOptions does not work - it would be needed to be implemented in grpc itself (before, after are private). +type serverAsClient struct { + srv StoreServer } -func (s *inProcessServer) Send(resp *SeriesResponse) error { - s.yield(resp, nil) - return nil +func (s serverAsClient) Info(ctx context.Context, in *InfoRequest, _ ...grpc.CallOption) (*InfoResponse, error) { + return s.srv.Info(ctx, in) } -func (s *inProcessServer) Context() context.Context { - return s.ctx +func (s serverAsClient) LabelNames(ctx context.Context, in *LabelNamesRequest, _ ...grpc.CallOption) (*LabelNamesResponse, error) { + return s.srv.LabelNames(ctx, in) } -type inProcessClient struct { - Store_SeriesClient - ctx context.Context - next func() (*SeriesResponse, error, bool) - stop func() +func (s serverAsClient) LabelValues(ctx context.Context, in *LabelValuesRequest, _ ...grpc.CallOption) (*LabelValuesResponse, error) { + return s.srv.LabelValues(ctx, in) } -func newInProcessClient(ctx context.Context, next func() (*SeriesResponse, error, bool), stop func()) *inProcessClient { - return &inProcessClient{ - ctx: ctx, - next: next, - stop: stop, - } +func (s serverAsClient) Series(ctx context.Context, in *SeriesRequest, _ ...grpc.CallOption) (Store_SeriesClient, error) { + inSrv := &inProcessStream{recv: make(chan *SeriesResponse), err: make(chan error)} + inSrv.ctx, inSrv.cancel = context.WithCancel(ctx) + go func() { + if err := s.srv.Series(in, inSrv); err != nil { + inSrv.err <- err + } + close(inSrv.err) + close(inSrv.recv) + }() + return &inProcessClientStream{srv: inSrv}, nil } -func (c *inProcessClient) Recv() (*SeriesResponse, error) { - resp, err, ok := c.next() - if err != nil { - c.stop() - return nil, err - } - if !ok { - return nil, io.EOF - } - return resp, err -} +// TODO(bwplotka): Add streaming attributes, metadata etc. Currently those are disconnected. Follow up on https://github.com/grpc/grpc-go/issues/906. +// TODO(bwplotka): Use this in proxy.go and receiver multi tenant proxy. +type inProcessStream struct { + grpc.ServerStream -func (c *inProcessClient) Context() context.Context { - return c.ctx + ctx context.Context + cancel context.CancelFunc + recv chan *SeriesResponse + err chan error } -func (c *inProcessClient) CloseSend() error { - c.stop() - return nil +func NewInProcessStream(ctx context.Context, bufferSize int) *inProcessStream { + return &inProcessStream{ + ctx: ctx, + recv: make(chan *SeriesResponse, bufferSize), + err: make(chan error), + } } -func ServerAsClient(srv StoreServer) StoreClient { - return &serverAsClient{srv: srv} -} +func (s *inProcessStream) Context() context.Context { return s.ctx } -// serverAsClient allows to use servers as clients. -// NOTE: Passing CallOptions does not work - it would be needed to be implemented in grpc itself (before, after are private). -type serverAsClient struct { - srv StoreServer +func (s *inProcessStream) Send(r *SeriesResponse) error { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case s.recv <- r: + return nil + } } -func (s serverAsClient) LabelNames(ctx context.Context, in *LabelNamesRequest, _ ...grpc.CallOption) (*LabelNamesResponse, error) { - return s.srv.LabelNames(ctx, in) +type inProcessClientStream struct { + grpc.ClientStream + + srv *inProcessStream } -func (s serverAsClient) LabelValues(ctx context.Context, in *LabelValuesRequest, _ ...grpc.CallOption) (*LabelValuesResponse, error) { - return s.srv.LabelValues(ctx, in) +func (s *inProcessClientStream) Context() context.Context { return s.srv.ctx } + +func (s *inProcessClientStream) CloseSend() error { + s.srv.cancel() + return nil } -func (s serverAsClient) Series(ctx context.Context, in *SeriesRequest, _ ...grpc.CallOption) (Store_SeriesClient, error) { - var srvIter iter.Seq2[*SeriesResponse, error] = func(yield func(*SeriesResponse, error) bool) { - srv := newInProcessServer(ctx, yield) - err := s.srv.Series(in, srv) - if err != nil { - yield(nil, err) - return +func (s *inProcessClientStream) Recv() (*SeriesResponse, error) { + select { + case r, ok := <-s.srv.recv: + if !ok { + return nil, io.EOF + } + return r, nil + case err, ok := <-s.srv.err: + if !ok { + return nil, io.EOF } + return nil, err } - - clientIter, stop := iter.Pull2(srvIter) - return newInProcessClient(ctx, clientIter, stop), nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go index 0da00daf4d..050b8e912f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go @@ -61,7 +61,7 @@ func SamplesFromPromqlSeries(series promql.Series) ([]Sample, []Histogram) { // HistogramProtoToHistogram extracts a (normal integer) Histogram from the // provided proto message. The caller has to make sure that the proto message -// represents an integer histogram and not a float histogram. +// represents an interger histogram and not a float histogram. // Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L626-L645 func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram { if hp.IsFloatHistogram() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go index 0c2d67dfb1..b5e85d69d8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go @@ -13,6 +13,8 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" types "github.com/gogo/protobuf/types" + github_com_thanos_io_thanos_pkg_store_labelpb "github.com/thanos-io/thanos/pkg/store/labelpb" + labelpb "github.com/thanos-io/thanos/pkg/store/labelpb" prompb "github.com/thanos-io/thanos/pkg/store/storepb/prompb" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -30,6 +32,48 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// Deprecated. Use `thanos.info` instead. +type StoreType int32 + +const ( + StoreType_UNKNOWN StoreType = 0 + StoreType_QUERY StoreType = 1 + StoreType_RULE StoreType = 2 + StoreType_SIDECAR StoreType = 3 + StoreType_STORE StoreType = 4 + StoreType_RECEIVE StoreType = 5 + // DEBUG represents some debug StoreAPI components e.g. thanos tools store-api-serve. + StoreType_DEBUG StoreType = 6 +) + +var StoreType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "QUERY", + 2: "RULE", + 3: "SIDECAR", + 4: "STORE", + 5: "RECEIVE", + 6: "DEBUG", +} + +var StoreType_value = map[string]int32{ + "UNKNOWN": 0, + "QUERY": 1, + "RULE": 2, + "SIDECAR": 3, + "STORE": 4, + "RECEIVE": 5, + "DEBUG": 6, +} + +func (x StoreType) String() string { + return proto.EnumName(StoreType_name, int32(x)) +} + +func (StoreType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a938d55a388af629, []int{0} +} + type Aggr int32 const ( @@ -64,7 +108,7 @@ func (x Aggr) String() string { } func (Aggr) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{0} + return fileDescriptor_a938d55a388af629, []int{1} } type WriteResponse struct { @@ -142,6 +186,87 @@ func (m *WriteRequest) XXX_DiscardUnknown() { var xxx_messageInfo_WriteRequest proto.InternalMessageInfo +// Deprecated. Use `thanos.info` instead. +type InfoRequest struct { +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (m *InfoRequest) String() string { return proto.CompactTextString(m) } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a938d55a388af629, []int{2} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo + +// Deprecated. Use `thanos.info` instead. +type InfoResponse struct { + // Deprecated. Use label_sets instead. + Labels []github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel" json:"labels"` + MinTime int64 `protobuf:"varint,2,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,3,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + StoreType StoreType `protobuf:"varint,4,opt,name=storeType,proto3,enum=thanos.StoreType" json:"storeType,omitempty"` + // label_sets is an unsorted list of `ZLabelSet`s. + LabelSets []labelpb.ZLabelSet `protobuf:"bytes,5,rep,name=label_sets,json=labelSets,proto3" json:"label_sets"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a938d55a388af629, []int{3} +} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) +} +func (m *InfoResponse) XXX_Size() int { + return m.Size() +} +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo + type SeriesRequest struct { MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` @@ -180,15 +305,13 @@ type SeriesRequest struct { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. WithoutReplicaLabels []string `protobuf:"bytes,14,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` - // limit is used to limit the number of results returned - Limit int64 `protobuf:"varint,15,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } func (m *SeriesRequest) String() string { return proto.CompactTextString(m) } func (*SeriesRequest) ProtoMessage() {} func (*SeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{2} + return fileDescriptor_a938d55a388af629, []int{4} } func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -236,7 +359,7 @@ func (m *QueryHints) Reset() { *m = QueryHints{} } func (m *QueryHints) String() string { return proto.CompactTextString(m) } func (*QueryHints) ProtoMessage() {} func (*QueryHints) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{3} + return fileDescriptor_a938d55a388af629, []int{5} } func (m *QueryHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -281,7 +404,7 @@ func (m *ShardInfo) Reset() { *m = ShardInfo{} } func (m *ShardInfo) String() string { return proto.CompactTextString(m) } func (*ShardInfo) ProtoMessage() {} func (*ShardInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{4} + return fileDescriptor_a938d55a388af629, []int{6} } func (m *ShardInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -319,7 +442,7 @@ func (m *Func) Reset() { *m = Func{} } func (m *Func) String() string { return proto.CompactTextString(m) } func (*Func) ProtoMessage() {} func (*Func) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{5} + return fileDescriptor_a938d55a388af629, []int{7} } func (m *Func) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +482,7 @@ func (m *Grouping) Reset() { *m = Grouping{} } func (m *Grouping) String() string { return proto.CompactTextString(m) } func (*Grouping) ProtoMessage() {} func (*Grouping) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{6} + return fileDescriptor_a938d55a388af629, []int{8} } func (m *Grouping) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,7 +519,7 @@ func (m *Range) Reset() { *m = Range{} } func (m *Range) String() string { return proto.CompactTextString(m) } func (*Range) ProtoMessage() {} func (*Range) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{7} + return fileDescriptor_a938d55a388af629, []int{9} } func (m *Range) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -437,7 +560,7 @@ func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } func (m *SeriesResponse) String() string { return proto.CompactTextString(m) } func (*SeriesResponse) ProtoMessage() {} func (*SeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{8} + return fileDescriptor_a938d55a388af629, []int{10} } func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -536,15 +659,13 @@ type LabelNamesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,6,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,7,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` - // limit is used to limit the number of results returned - Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } func (m *LabelNamesRequest) String() string { return proto.CompactTextString(m) } func (*LabelNamesRequest) ProtoMessage() {} func (*LabelNamesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{9} + return fileDescriptor_a938d55a388af629, []int{11} } func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -586,7 +707,7 @@ func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } func (m *LabelNamesResponse) String() string { return proto.CompactTextString(m) } func (*LabelNamesResponse) ProtoMessage() {} func (*LabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{10} + return fileDescriptor_a938d55a388af629, []int{12} } func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -629,15 +750,13 @@ type LabelValuesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,7,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,8,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` - // limit is used to limit the number of results returned - Limit int64 `protobuf:"varint,9,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) } func (*LabelValuesRequest) ProtoMessage() {} func (*LabelValuesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{11} + return fileDescriptor_a938d55a388af629, []int{13} } func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -679,7 +798,7 @@ func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) } func (*LabelValuesResponse) ProtoMessage() {} func (*LabelValuesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a938d55a388af629, []int{12} + return fileDescriptor_a938d55a388af629, []int{14} } func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -709,9 +828,12 @@ func (m *LabelValuesResponse) XXX_DiscardUnknown() { var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo func init() { + proto.RegisterEnum("thanos.StoreType", StoreType_name, StoreType_value) proto.RegisterEnum("thanos.Aggr", Aggr_name, Aggr_value) proto.RegisterType((*WriteResponse)(nil), "thanos.WriteResponse") proto.RegisterType((*WriteRequest)(nil), "thanos.WriteRequest") + proto.RegisterType((*InfoRequest)(nil), "thanos.InfoRequest") + proto.RegisterType((*InfoResponse)(nil), "thanos.InfoResponse") proto.RegisterType((*SeriesRequest)(nil), "thanos.SeriesRequest") proto.RegisterType((*QueryHints)(nil), "thanos.QueryHints") proto.RegisterType((*ShardInfo)(nil), "thanos.ShardInfo") @@ -728,79 +850,91 @@ func init() { func init() { proto.RegisterFile("store/storepb/rpc.proto", fileDescriptor_a938d55a388af629) } var fileDescriptor_a938d55a388af629 = []byte{ - // 1149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0x23, 0x45, - 0x10, 0xf6, 0x78, 0x3c, 0x7e, 0x94, 0x13, 0xaf, 0xb7, 0xd7, 0xc9, 0x4e, 0xbc, 0x92, 0x63, 0x8c, - 0x90, 0xac, 0x55, 0xe4, 0xac, 0xbc, 0x08, 0x09, 0xc4, 0x25, 0x09, 0x2c, 0x59, 0x89, 0x04, 0xe8, - 0xec, 0x12, 0x04, 0x87, 0x51, 0xdb, 0xee, 0x8c, 0x47, 0x3b, 0xaf, 0x4c, 0xf7, 0x90, 0xf8, 0x0c, - 0x67, 0xc4, 0x9d, 0xdb, 0xfe, 0x9a, 0xdc, 0xd8, 0x23, 0x27, 0x04, 0xc9, 0x1f, 0x41, 0xfd, 0x18, - 0x3f, 0x82, 0xf7, 0xa5, 0xe4, 0x62, 0x75, 0x7d, 0x5f, 0x75, 0x4d, 0x75, 0xf5, 0x57, 0xe5, 0x86, - 0xfb, 0x8c, 0x47, 0x09, 0xdd, 0x96, 0xbf, 0xf1, 0x60, 0x3b, 0x89, 0x87, 0xbd, 0x38, 0x89, 0x78, - 0x84, 0x8a, 0x7c, 0x4c, 0xc2, 0x88, 0x35, 0x37, 0x16, 0x1d, 0xf8, 0x24, 0xa6, 0x4c, 0xb9, 0x34, - 0x1b, 0x6e, 0xe4, 0x46, 0x72, 0xb9, 0x2d, 0x56, 0x1a, 0x6d, 0x2f, 0x6e, 0x88, 0x93, 0x28, 0xb8, - 0xb6, 0x6f, 0xc3, 0x8d, 0x22, 0xd7, 0xa7, 0xdb, 0xd2, 0x1a, 0xa4, 0x27, 0xdb, 0x24, 0x9c, 0x28, - 0xaa, 0x73, 0x07, 0x56, 0x8f, 0x13, 0x8f, 0x53, 0x4c, 0x59, 0x1c, 0x85, 0x8c, 0x76, 0x7e, 0x31, - 0x60, 0x45, 0x23, 0xa7, 0x29, 0x65, 0x1c, 0xed, 0x00, 0x70, 0x2f, 0xa0, 0x8c, 0x26, 0x1e, 0x65, - 0xb6, 0xd1, 0x36, 0xbb, 0xd5, 0xfe, 0x03, 0xb1, 0x3b, 0xa0, 0x7c, 0x4c, 0x53, 0xe6, 0x0c, 0xa3, - 0x78, 0xd2, 0x7b, 0xe6, 0x05, 0xf4, 0x48, 0xba, 0xec, 0x16, 0x2e, 0xfe, 0xde, 0xcc, 0xe1, 0xb9, - 0x4d, 0x68, 0x1d, 0x8a, 0x9c, 0x86, 0x24, 0xe4, 0x76, 0xbe, 0x6d, 0x74, 0x2b, 0x58, 0x5b, 0xc8, - 0x86, 0x52, 0x42, 0x63, 0xdf, 0x1b, 0x12, 0xdb, 0x6c, 0x1b, 0x5d, 0x13, 0x67, 0x66, 0xe7, 0xa5, - 0x05, 0xab, 0x2a, 0x5c, 0x96, 0xc6, 0x06, 0x94, 0x03, 0x2f, 0x74, 0x44, 0x54, 0xdb, 0x50, 0xce, - 0x81, 0x17, 0x8a, 0xcf, 0x4a, 0x8a, 0x9c, 0x2b, 0x2a, 0xaf, 0x29, 0x72, 0x2e, 0xa9, 0x4f, 0x04, - 0xc5, 0x87, 0x63, 0x9a, 0x30, 0xdb, 0x94, 0xa9, 0x37, 0x7a, 0xaa, 0xce, 0xbd, 0xaf, 0xc9, 0x80, - 0xfa, 0x07, 0x8a, 0xd4, 0x39, 0x4f, 0x7d, 0x51, 0x1f, 0xd6, 0x44, 0xc8, 0x84, 0xb2, 0xc8, 0x4f, - 0xb9, 0x17, 0x85, 0xce, 0x99, 0x17, 0x8e, 0xa2, 0x33, 0xbb, 0x20, 0xe3, 0xdf, 0x0b, 0xc8, 0x39, - 0x9e, 0x72, 0xc7, 0x92, 0x42, 0x5b, 0x00, 0xc4, 0x75, 0x13, 0xea, 0x12, 0x4e, 0x99, 0x6d, 0xb5, - 0xcd, 0x6e, 0xad, 0xbf, 0x92, 0x7d, 0x6d, 0xc7, 0x75, 0x13, 0x3c, 0xc7, 0xa3, 0xcf, 0x60, 0x23, - 0x26, 0x09, 0xf7, 0x88, 0x2f, 0xbe, 0x22, 0x6b, 0xef, 0x8c, 0x3c, 0x46, 0x06, 0x3e, 0x1d, 0xd9, - 0xc5, 0xb6, 0xd1, 0x2d, 0xe3, 0xfb, 0xda, 0x21, 0xbb, 0x9b, 0x2f, 0x34, 0x8d, 0x7e, 0x5a, 0xb2, - 0x97, 0xf1, 0x84, 0x70, 0xea, 0x4e, 0xec, 0x52, 0xdb, 0xe8, 0xd6, 0xfa, 0x9b, 0xd9, 0x87, 0xbf, - 0x5d, 0x8c, 0x71, 0xa4, 0xdd, 0xfe, 0x17, 0x3c, 0x23, 0xd0, 0x26, 0x54, 0xd9, 0x0b, 0x2f, 0x76, - 0x86, 0xe3, 0x34, 0x7c, 0xc1, 0xec, 0xb2, 0x4c, 0x05, 0x04, 0xb4, 0x27, 0x11, 0xf4, 0x10, 0xac, - 0xb1, 0x17, 0x72, 0x66, 0x57, 0xda, 0x86, 0x2c, 0xa8, 0x52, 0x57, 0x2f, 0x53, 0x57, 0x6f, 0x27, - 0x9c, 0x60, 0xe5, 0x82, 0x10, 0x14, 0x18, 0xa7, 0xb1, 0x0d, 0xb2, 0x6c, 0x72, 0x8d, 0x1a, 0x60, - 0x25, 0x24, 0x74, 0xa9, 0x5d, 0x95, 0xa0, 0x32, 0xd0, 0x63, 0xa8, 0x9e, 0xa6, 0x34, 0x99, 0x38, - 0x2a, 0xf6, 0x8a, 0x8c, 0x8d, 0xb2, 0x53, 0x7c, 0x27, 0xa8, 0x7d, 0xc1, 0x60, 0x38, 0x9d, 0xae, - 0xd1, 0x23, 0x00, 0x36, 0x26, 0xc9, 0xc8, 0xf1, 0xc2, 0x93, 0xc8, 0x5e, 0x95, 0x7b, 0xee, 0x66, - 0x7b, 0x8e, 0x04, 0xf3, 0x34, 0x3c, 0x89, 0x70, 0x85, 0x65, 0x4b, 0xf4, 0x31, 0xac, 0x9f, 0x79, - 0x7c, 0x1c, 0xa5, 0xdc, 0xd1, 0x5a, 0x73, 0x7c, 0x21, 0x04, 0x66, 0xd7, 0xda, 0x66, 0xb7, 0x82, - 0x1b, 0x9a, 0xc5, 0x8a, 0x94, 0x22, 0x61, 0x22, 0x65, 0xdf, 0x0b, 0x3c, 0x6e, 0xdf, 0x51, 0x29, - 0x4b, 0xa3, 0xf3, 0xd2, 0x00, 0x98, 0x25, 0x26, 0x0b, 0xc7, 0x69, 0xec, 0x04, 0x9e, 0xef, 0x7b, - 0x4c, 0x8b, 0x14, 0x04, 0x74, 0x20, 0x11, 0xd4, 0x86, 0xc2, 0x49, 0x1a, 0x0e, 0xa5, 0x46, 0xab, - 0x33, 0x69, 0x3c, 0x49, 0xc3, 0x21, 0x96, 0x0c, 0xda, 0x82, 0xb2, 0x9b, 0x44, 0x69, 0xec, 0x85, - 0xae, 0x54, 0x5a, 0xb5, 0x5f, 0xcf, 0xbc, 0xbe, 0xd2, 0x38, 0x9e, 0x7a, 0xa0, 0x0f, 0xb3, 0x42, - 0x5a, 0xd2, 0x75, 0x35, 0x73, 0xc5, 0x02, 0xd4, 0x75, 0xed, 0x9c, 0x41, 0x65, 0x5a, 0x08, 0x99, - 0xa2, 0xae, 0xd7, 0x88, 0x9e, 0x4f, 0x53, 0x54, 0xfc, 0x88, 0x9e, 0xa3, 0x0f, 0x60, 0x85, 0x47, - 0x9c, 0xf8, 0x8e, 0xc4, 0x98, 0x6e, 0xa7, 0xaa, 0xc4, 0x64, 0x18, 0x86, 0x6a, 0x90, 0x1f, 0x4c, - 0x64, 0xbf, 0x96, 0x71, 0x7e, 0x30, 0x11, 0xcd, 0xad, 0x2b, 0x58, 0x90, 0x15, 0xd4, 0x56, 0xa7, - 0x09, 0x05, 0x71, 0x32, 0x21, 0x81, 0x90, 0xe8, 0xa6, 0xad, 0x60, 0xb9, 0xee, 0xf4, 0xa1, 0x9c, - 0x9d, 0x47, 0xc7, 0x33, 0x96, 0xc4, 0x33, 0x17, 0xe2, 0x6d, 0x82, 0x25, 0x0f, 0x26, 0x1c, 0x16, - 0x4a, 0xac, 0xad, 0xce, 0x6f, 0x06, 0xd4, 0xb2, 0x99, 0xa1, 0x34, 0x8d, 0xba, 0x50, 0x9c, 0xce, - 0x2d, 0x51, 0xa2, 0xda, 0x54, 0x1b, 0x12, 0xdd, 0xcf, 0x61, 0xcd, 0xa3, 0x26, 0x94, 0xce, 0x48, - 0x12, 0x8a, 0xc2, 0xcb, 0x19, 0xb5, 0x9f, 0xc3, 0x19, 0x80, 0xb6, 0x32, 0xc1, 0x9b, 0xaf, 0x17, - 0xfc, 0x7e, 0x4e, 0x4b, 0x7e, 0xb7, 0x0c, 0xc5, 0x84, 0xb2, 0xd4, 0xe7, 0x9d, 0x5f, 0x4d, 0xb8, - 0x2b, 0x05, 0x74, 0x48, 0x82, 0xd9, 0x20, 0x7b, 0x63, 0xe3, 0x1b, 0x37, 0x68, 0xfc, 0xfc, 0x0d, - 0x1b, 0xbf, 0x01, 0x16, 0xe3, 0x24, 0xe1, 0x7a, 0x16, 0x2b, 0x03, 0xd5, 0xc1, 0xa4, 0xe1, 0x48, - 0xcf, 0x3d, 0xb1, 0x9c, 0xf5, 0xbf, 0xf5, 0xf6, 0xfe, 0x9f, 0x9f, 0xbf, 0xc5, 0xf7, 0x98, 0xbf, - 0xaf, 0x6f, 0xd3, 0xd2, 0xbb, 0xb4, 0x69, 0x79, 0xbe, 0x4d, 0x13, 0x40, 0xf3, 0xb7, 0xa0, 0xa5, - 0xd1, 0x00, 0x4b, 0x48, 0x51, 0xfd, 0xa3, 0x55, 0xb0, 0x32, 0x50, 0x13, 0xca, 0xfa, 0xd6, 0x85, - 0xf6, 0x05, 0x31, 0xb5, 0x67, 0xe7, 0x36, 0xdf, 0x7a, 0xee, 0xce, 0x1f, 0xa6, 0xfe, 0xe8, 0xf7, - 0xc4, 0x4f, 0x67, 0x77, 0x2f, 0x12, 0x14, 0xa8, 0x6e, 0x06, 0x65, 0xbc, 0x59, 0x11, 0xf9, 0x1b, - 0x28, 0xc2, 0xbc, 0x2d, 0x45, 0x14, 0x96, 0x28, 0xc2, 0x5a, 0xa2, 0x88, 0xe2, 0xfb, 0x29, 0xa2, - 0x74, 0x2b, 0x8a, 0x28, 0xbf, 0x8b, 0x22, 0x2a, 0xf3, 0x8a, 0x48, 0xe1, 0xde, 0xc2, 0xe5, 0x68, - 0x49, 0xac, 0x43, 0xf1, 0x67, 0x89, 0x68, 0x4d, 0x68, 0xeb, 0xb6, 0x44, 0xf1, 0x70, 0x17, 0x0a, - 0xe2, 0x19, 0x80, 0x4a, 0x60, 0xe2, 0x9d, 0xe3, 0x7a, 0x0e, 0x55, 0xc0, 0xda, 0xfb, 0xe6, 0xf9, - 0xe1, 0xb3, 0xba, 0x21, 0xb0, 0xa3, 0xe7, 0x07, 0xf5, 0xbc, 0x58, 0x1c, 0x3c, 0x3d, 0xac, 0x9b, - 0x72, 0xb1, 0xf3, 0x43, 0xbd, 0x80, 0xaa, 0x50, 0x92, 0x5e, 0x5f, 0xe2, 0xba, 0xd5, 0xff, 0xd3, - 0x00, 0xeb, 0x48, 0xbc, 0xf4, 0xd0, 0xa7, 0x50, 0x54, 0x53, 0x0c, 0xad, 0x2d, 0x4e, 0x35, 0x2d, - 0xb6, 0xe6, 0xfa, 0x75, 0x58, 0x1d, 0xf3, 0x91, 0x81, 0xf6, 0x00, 0x66, 0x1d, 0x81, 0x36, 0x16, - 0xea, 0x3f, 0x3f, 0xab, 0x9a, 0xcd, 0x65, 0x94, 0xae, 0xd6, 0x13, 0xa8, 0xce, 0x15, 0x11, 0x2d, - 0xba, 0x2e, 0xc8, 0xbe, 0xf9, 0x60, 0x29, 0xa7, 0xe2, 0xf4, 0x0f, 0xa1, 0x26, 0xdf, 0x9b, 0x42, - 0xcf, 0xea, 0x64, 0x9f, 0x43, 0x15, 0xd3, 0x20, 0xe2, 0x54, 0xe2, 0x68, 0xaa, 0x8f, 0xf9, 0x67, - 0x69, 0x73, 0xed, 0x1a, 0xaa, 0x9f, 0xaf, 0xb9, 0xdd, 0x8f, 0x2e, 0xfe, 0x6d, 0xe5, 0x2e, 0x2e, - 0x5b, 0xc6, 0xab, 0xcb, 0x96, 0xf1, 0xcf, 0x65, 0xcb, 0xf8, 0xfd, 0xaa, 0x95, 0x7b, 0x75, 0xd5, - 0xca, 0xfd, 0x75, 0xd5, 0xca, 0xfd, 0x58, 0xd2, 0xcf, 0xe4, 0x41, 0x51, 0xde, 0xd0, 0xe3, 0xff, - 0x02, 0x00, 0x00, 0xff, 0xff, 0x84, 0xe1, 0x09, 0x34, 0x90, 0x0b, 0x00, 0x00, + // 1331 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0x13, 0x47, + 0x14, 0xf7, 0x7a, 0xbd, 0xfe, 0xf3, 0x9c, 0xb8, 0x66, 0x30, 0x61, 0x63, 0x24, 0xc7, 0x75, 0x55, + 0xc9, 0x42, 0xd4, 0xa6, 0x06, 0x21, 0xb5, 0xe2, 0x92, 0x04, 0x43, 0xa2, 0x12, 0x53, 0xc6, 0x09, + 0x69, 0xa9, 0x2a, 0x6b, 0x6d, 0x4f, 0xd6, 0x2b, 0xec, 0xdd, 0x65, 0x67, 0xb6, 0x89, 0xaf, 0xad, + 0x7a, 0xab, 0xaa, 0xaa, 0x1f, 0xa1, 0x9f, 0x86, 0x23, 0xc7, 0xaa, 0x07, 0xd4, 0xc2, 0xbd, 0x9f, + 0xa1, 0x9a, 0x3f, 0xbb, 0xf6, 0xa6, 0x21, 0x08, 0x91, 0x4b, 0x34, 0xef, 0xf7, 0x7b, 0xf3, 0xe6, + 0xfd, 0xcf, 0x1a, 0xae, 0x52, 0xe6, 0x05, 0xa4, 0x2d, 0xfe, 0xfa, 0xc3, 0x76, 0xe0, 0x8f, 0x5a, + 0x7e, 0xe0, 0x31, 0x0f, 0x65, 0xd9, 0xc4, 0x72, 0x3d, 0x5a, 0x5d, 0x4f, 0x2a, 0xb0, 0xb9, 0x4f, + 0xa8, 0x54, 0xa9, 0x56, 0x6c, 0xcf, 0xf6, 0xc4, 0xb1, 0xcd, 0x4f, 0x0a, 0xad, 0x27, 0x2f, 0xf8, + 0x81, 0x37, 0x3b, 0x75, 0x4f, 0x99, 0x9c, 0x5a, 0x43, 0x32, 0x3d, 0x4d, 0xd9, 0x9e, 0x67, 0x4f, + 0x49, 0x5b, 0x48, 0xc3, 0xf0, 0xa8, 0x6d, 0xb9, 0x73, 0x49, 0x35, 0x3e, 0x82, 0xd5, 0xc3, 0xc0, + 0x61, 0x04, 0x13, 0xea, 0x7b, 0x2e, 0x25, 0x8d, 0x9f, 0x34, 0x58, 0x51, 0xc8, 0xf3, 0x90, 0x50, + 0x86, 0x36, 0x01, 0x98, 0x33, 0x23, 0x94, 0x04, 0x0e, 0xa1, 0xa6, 0x56, 0xd7, 0x9b, 0xc5, 0xce, + 0x35, 0x7e, 0x7b, 0x46, 0xd8, 0x84, 0x84, 0x74, 0x30, 0xf2, 0xfc, 0x79, 0x6b, 0xdf, 0x99, 0x91, + 0xbe, 0x50, 0xd9, 0xca, 0xbc, 0x78, 0xb5, 0x91, 0xc2, 0x4b, 0x97, 0xd0, 0x1a, 0x64, 0x19, 0x71, + 0x2d, 0x97, 0x99, 0xe9, 0xba, 0xd6, 0x2c, 0x60, 0x25, 0x21, 0x13, 0x72, 0x01, 0xf1, 0xa7, 0xce, + 0xc8, 0x32, 0xf5, 0xba, 0xd6, 0xd4, 0x71, 0x24, 0x36, 0x56, 0xa1, 0xb8, 0xeb, 0x1e, 0x79, 0xca, + 0x87, 0xc6, 0xef, 0x69, 0x58, 0x91, 0xb2, 0xf4, 0x12, 0x8d, 0x20, 0x2b, 0x02, 0x8d, 0x1c, 0x5a, + 0x6d, 0xc9, 0xc4, 0xb6, 0x1e, 0x72, 0x74, 0xeb, 0x2e, 0x77, 0xe1, 0xaf, 0x57, 0x1b, 0xb7, 0x6d, + 0x87, 0x4d, 0xc2, 0x61, 0x6b, 0xe4, 0xcd, 0xda, 0x52, 0xe1, 0x33, 0xc7, 0x53, 0xa7, 0xb6, 0xff, + 0xcc, 0x6e, 0x27, 0x72, 0xd6, 0x7a, 0x2a, 0x6e, 0x63, 0x65, 0x1a, 0xad, 0x43, 0x7e, 0xe6, 0xb8, + 0x03, 0x1e, 0x88, 0x70, 0x5c, 0xc7, 0xb9, 0x99, 0xe3, 0xf2, 0x48, 0x05, 0x65, 0x9d, 0x48, 0x4a, + 0xb9, 0x3e, 0xb3, 0x4e, 0x04, 0xd5, 0x86, 0x82, 0xb0, 0xba, 0x3f, 0xf7, 0x89, 0x99, 0xa9, 0x6b, + 0xcd, 0x52, 0xe7, 0x52, 0xe4, 0x5d, 0x3f, 0x22, 0xf0, 0x42, 0x07, 0xdd, 0x01, 0x10, 0x0f, 0x0e, + 0x28, 0x61, 0xd4, 0x34, 0x44, 0x3c, 0xf1, 0x0d, 0xe9, 0x52, 0x9f, 0x30, 0x95, 0xd6, 0xc2, 0x54, + 0xc9, 0xb4, 0xf1, 0x8b, 0x01, 0xab, 0x32, 0xe5, 0x51, 0xa9, 0x96, 0x1d, 0xd6, 0xde, 0xee, 0x70, + 0x3a, 0xe9, 0xf0, 0x1d, 0x4e, 0xb1, 0xd1, 0x84, 0x04, 0xd4, 0xd4, 0xc5, 0xeb, 0x95, 0x44, 0x36, + 0xf7, 0x24, 0xa9, 0x1c, 0x88, 0x75, 0x51, 0x07, 0xae, 0x70, 0x93, 0x01, 0xa1, 0xde, 0x34, 0x64, + 0x8e, 0xe7, 0x0e, 0x8e, 0x1d, 0x77, 0xec, 0x1d, 0x8b, 0xa0, 0x75, 0x7c, 0x79, 0x66, 0x9d, 0xe0, + 0x98, 0x3b, 0x14, 0x14, 0xba, 0x01, 0x60, 0xd9, 0x76, 0x40, 0x6c, 0x8b, 0x11, 0x19, 0x6b, 0xa9, + 0xb3, 0x12, 0xbd, 0xb6, 0x69, 0xdb, 0x01, 0x5e, 0xe2, 0xd1, 0x97, 0xb0, 0xee, 0x5b, 0x01, 0x73, + 0xac, 0x29, 0x7f, 0x45, 0x54, 0x7e, 0x30, 0x76, 0xa8, 0x35, 0x9c, 0x92, 0xb1, 0x99, 0xad, 0x6b, + 0xcd, 0x3c, 0xbe, 0xaa, 0x14, 0xa2, 0xce, 0xb8, 0xa7, 0x68, 0xf4, 0xdd, 0x19, 0x77, 0x29, 0x0b, + 0x2c, 0x46, 0xec, 0xb9, 0x99, 0x13, 0x65, 0xd9, 0x88, 0x1e, 0xfe, 0x3a, 0x69, 0xa3, 0xaf, 0xd4, + 0xfe, 0x67, 0x3c, 0x22, 0xd0, 0x06, 0x14, 0xe9, 0x33, 0xc7, 0x1f, 0x8c, 0x26, 0xa1, 0xfb, 0x8c, + 0x9a, 0x79, 0xe1, 0x0a, 0x70, 0x68, 0x5b, 0x20, 0xe8, 0x3a, 0x18, 0x13, 0xc7, 0x65, 0xd4, 0x2c, + 0xd4, 0x35, 0x91, 0x50, 0x39, 0x81, 0xad, 0x68, 0x02, 0x5b, 0x9b, 0xee, 0x1c, 0x4b, 0x15, 0x84, + 0x20, 0x43, 0x19, 0xf1, 0x4d, 0x10, 0x69, 0x13, 0x67, 0x54, 0x01, 0x23, 0xb0, 0x5c, 0x9b, 0x98, + 0x45, 0x01, 0x4a, 0x01, 0xdd, 0x82, 0xe2, 0xf3, 0x90, 0x04, 0xf3, 0x81, 0xb4, 0xbd, 0x22, 0x6c, + 0xa3, 0x28, 0x8a, 0xc7, 0x9c, 0xda, 0xe1, 0x0c, 0x86, 0xe7, 0xf1, 0x19, 0xdd, 0x04, 0xa0, 0x13, + 0x2b, 0x18, 0x0f, 0x1c, 0xf7, 0xc8, 0x33, 0x57, 0xc5, 0x9d, 0x45, 0x43, 0x72, 0x46, 0x4c, 0x56, + 0x81, 0x46, 0x47, 0x74, 0x1b, 0xd6, 0x8e, 0x1d, 0x36, 0xf1, 0x42, 0x36, 0x50, 0xf3, 0x38, 0x50, + 0xc3, 0x56, 0xaa, 0xeb, 0xcd, 0x02, 0xae, 0x28, 0x16, 0x4b, 0x52, 0x34, 0x09, 0x6d, 0xfc, 0xa1, + 0x01, 0x2c, 0x5c, 0x10, 0x29, 0x62, 0xc4, 0x1f, 0xcc, 0x9c, 0xe9, 0xd4, 0xa1, 0xaa, 0x1d, 0x81, + 0x43, 0x7b, 0x02, 0x41, 0x75, 0xc8, 0x1c, 0x85, 0xee, 0x48, 0x74, 0x63, 0x71, 0xd1, 0x04, 0xf7, + 0x43, 0x77, 0x84, 0x05, 0x83, 0x6e, 0x40, 0xde, 0x0e, 0xbc, 0xd0, 0x77, 0x5c, 0x5b, 0xf4, 0x54, + 0xb1, 0x53, 0x8e, 0xb4, 0x1e, 0x28, 0x1c, 0xc7, 0x1a, 0xe8, 0x93, 0x28, 0x65, 0x86, 0x50, 0x8d, + 0x37, 0x02, 0xe6, 0xa0, 0xca, 0x60, 0xe3, 0x18, 0x0a, 0x71, 0xc8, 0xc2, 0x45, 0x95, 0x99, 0x31, + 0x39, 0x89, 0x5d, 0x94, 0xfc, 0x98, 0x9c, 0xa0, 0x8f, 0x61, 0x85, 0x79, 0xcc, 0x9a, 0x0e, 0x04, + 0x46, 0xd5, 0xe0, 0x14, 0x05, 0x26, 0xcc, 0x50, 0x54, 0x82, 0xf4, 0x70, 0x2e, 0x56, 0x40, 0x1e, + 0xa7, 0x87, 0x73, 0xbe, 0xea, 0x54, 0xae, 0x32, 0x22, 0x57, 0x4a, 0x6a, 0x54, 0x21, 0xc3, 0x23, + 0xe3, 0xc5, 0x76, 0x2d, 0x35, 0x9e, 0x05, 0x2c, 0xce, 0x8d, 0x0e, 0xe4, 0xa3, 0x78, 0x94, 0x3d, + 0xed, 0x0c, 0x7b, 0x7a, 0xc2, 0xde, 0x06, 0x18, 0x22, 0x30, 0xae, 0x90, 0x48, 0xb1, 0x92, 0x1a, + 0xbf, 0x6a, 0x50, 0x8a, 0xb6, 0x83, 0x5a, 0x9a, 0x4d, 0xc8, 0xc6, 0x5b, 0x9c, 0xa7, 0xa8, 0x14, + 0x77, 0x81, 0x40, 0x77, 0x52, 0x58, 0xf1, 0xa8, 0x0a, 0xb9, 0x63, 0x2b, 0x70, 0x79, 0xe2, 0xc5, + 0xc6, 0xde, 0x49, 0xe1, 0x08, 0x40, 0x37, 0xa2, 0xd6, 0xd6, 0xdf, 0xde, 0xda, 0x3b, 0x29, 0xd5, + 0xdc, 0x5b, 0x79, 0xc8, 0x06, 0x84, 0x86, 0x53, 0xd6, 0xf8, 0x37, 0x0d, 0x97, 0x44, 0xab, 0xf4, + 0xac, 0xd9, 0x62, 0x65, 0x9d, 0x3b, 0xe2, 0xda, 0x07, 0x8c, 0x78, 0xfa, 0x03, 0x47, 0xbc, 0x02, + 0x06, 0x65, 0x56, 0xc0, 0xd4, 0x7a, 0x97, 0x02, 0x2a, 0x83, 0x4e, 0xdc, 0xb1, 0xda, 0x70, 0xfc, + 0xb8, 0x98, 0x74, 0xe3, 0xdd, 0x93, 0xbe, 0xbc, 0x69, 0xb3, 0xef, 0xb1, 0x69, 0xdf, 0x3e, 0x90, + 0xb9, 0x73, 0x06, 0x32, 0x00, 0xb4, 0x9c, 0x6f, 0xd5, 0x04, 0x15, 0x30, 0x78, 0xd3, 0xc9, 0x7f, + 0x9c, 0x05, 0x2c, 0x05, 0x54, 0x85, 0xbc, 0xaa, 0x2f, 0xef, 0x72, 0x4e, 0xc4, 0xf2, 0x22, 0x42, + 0xfd, 0x9d, 0x11, 0x36, 0x7e, 0xd6, 0xd5, 0xa3, 0x4f, 0xac, 0x69, 0xb8, 0xa8, 0x72, 0x05, 0x0c, + 0xe1, 0xb0, 0x6a, 0x7b, 0x29, 0x9c, 0x5f, 0xfb, 0xf4, 0x07, 0xd4, 0x5e, 0xbf, 0xa8, 0xda, 0x67, + 0xce, 0xa8, 0xbd, 0x71, 0x46, 0xed, 0xb3, 0xef, 0x57, 0xfb, 0xdc, 0x85, 0xd4, 0x3e, 0x7f, 0x4e, + 0xed, 0x43, 0xb8, 0x9c, 0x28, 0x83, 0x2a, 0xfe, 0x1a, 0x64, 0x7f, 0x10, 0x88, 0xaa, 0xbe, 0x92, + 0x2e, 0xaa, 0xfc, 0xd7, 0xbf, 0x87, 0x42, 0xfc, 0x89, 0x83, 0x8a, 0x90, 0x3b, 0xe8, 0x7d, 0xd5, + 0x7b, 0x74, 0xd8, 0x2b, 0xa7, 0x50, 0x01, 0x8c, 0xc7, 0x07, 0x5d, 0xfc, 0x6d, 0x59, 0x43, 0x79, + 0xc8, 0xe0, 0x83, 0x87, 0xdd, 0x72, 0x9a, 0x6b, 0xf4, 0x77, 0xef, 0x75, 0xb7, 0x37, 0x71, 0x59, + 0xe7, 0x1a, 0xfd, 0xfd, 0x47, 0xb8, 0x5b, 0xce, 0x70, 0x1c, 0x77, 0xb7, 0xbb, 0xbb, 0x4f, 0xba, + 0x65, 0x83, 0xe3, 0xf7, 0xba, 0x5b, 0x07, 0x0f, 0xca, 0xd9, 0xeb, 0x5b, 0x90, 0xe1, 0xdf, 0x08, + 0x28, 0x07, 0x3a, 0xde, 0x3c, 0x94, 0x56, 0xb7, 0x1f, 0x1d, 0xf4, 0xf6, 0xcb, 0x1a, 0xc7, 0xfa, + 0x07, 0x7b, 0xe5, 0x34, 0x3f, 0xec, 0xed, 0xf6, 0xca, 0xba, 0x38, 0x6c, 0x7e, 0x23, 0xcd, 0x09, + 0xad, 0x2e, 0x2e, 0x1b, 0x9d, 0x1f, 0xd3, 0x60, 0x08, 0x1f, 0xd1, 0xe7, 0x90, 0x11, 0xff, 0x06, + 0x2e, 0x47, 0x75, 0x58, 0xfa, 0xe2, 0xac, 0x56, 0x92, 0xa0, 0xca, 0xdf, 0x17, 0x90, 0x95, 0xbb, + 0x12, 0x5d, 0x49, 0xee, 0xce, 0xe8, 0xda, 0xda, 0x69, 0x58, 0x5e, 0xbc, 0xa9, 0xa1, 0x6d, 0x80, + 0xc5, 0x34, 0xa2, 0xf5, 0x44, 0xed, 0x97, 0x37, 0x62, 0xb5, 0x7a, 0x16, 0xa5, 0xde, 0xbf, 0x0f, + 0xc5, 0xa5, 0xb2, 0xa2, 0xa4, 0x6a, 0x62, 0xe4, 0xaa, 0xd7, 0xce, 0xe4, 0xa4, 0x9d, 0x4e, 0x0f, + 0x4a, 0xe2, 0x1b, 0x9f, 0xcf, 0x92, 0x4c, 0xc6, 0x5d, 0x28, 0x62, 0x32, 0xf3, 0x18, 0x11, 0x38, + 0x8a, 0xc3, 0x5f, 0xfe, 0x29, 0x50, 0xbd, 0x72, 0x0a, 0x55, 0x3f, 0x19, 0x52, 0x5b, 0x9f, 0xbe, + 0xf8, 0xa7, 0x96, 0x7a, 0xf1, 0xba, 0xa6, 0xbd, 0x7c, 0x5d, 0xd3, 0xfe, 0x7e, 0x5d, 0xd3, 0x7e, + 0x7b, 0x53, 0x4b, 0xbd, 0x7c, 0x53, 0x4b, 0xfd, 0xf9, 0xa6, 0x96, 0x7a, 0x9a, 0x53, 0xbf, 0x5a, + 0x86, 0x59, 0xd1, 0x33, 0xb7, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x14, 0xa2, 0x0f, 0x1f, + 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -815,6 +949,10 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type StoreClient interface { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + /// Deprecated. Use `thanos.info` instead. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. /// /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain @@ -839,6 +977,15 @@ func NewStoreClient(cc *grpc.ClientConn) StoreClient { return &storeClient{cc} } +func (c *storeClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/thanos.Store/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *storeClient) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Store_SeriesClient, error) { stream, err := c.cc.NewStream(ctx, &_Store_serviceDesc.Streams[0], "/thanos.Store/Series", opts...) if err != nil { @@ -891,6 +1038,10 @@ func (c *storeClient) LabelValues(ctx context.Context, in *LabelValuesRequest, o // StoreServer is the server API for Store service. type StoreServer interface { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + /// Deprecated. Use `thanos.info` instead. + Info(context.Context, *InfoRequest) (*InfoResponse, error) /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. /// /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain @@ -911,6 +1062,9 @@ type StoreServer interface { type UnimplementedStoreServer struct { } +func (*UnimplementedStoreServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} func (*UnimplementedStoreServer) Series(req *SeriesRequest, srv Store_SeriesServer) error { return status.Errorf(codes.Unimplemented, "method Series not implemented") } @@ -925,6 +1079,24 @@ func RegisterStoreServer(s *grpc.Server, srv StoreServer) { s.RegisterService(&_Store_serviceDesc, srv) } +func _Store_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/thanos.Store/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Store_Series_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SeriesRequest) if err := stream.RecvMsg(m); err != nil { @@ -986,6 +1158,10 @@ var _Store_serviceDesc = grpc.ServiceDesc{ ServiceName: "thanos.Store", HandlerType: (*StoreServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _Store_Info_Handler, + }, { MethodName: "LabelNames", Handler: _Store_LabelNames_Handler, @@ -1151,6 +1327,95 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelSets) > 0 { + for iNdEx := len(m.LabelSets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LabelSets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.StoreType != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.StoreType)) + i-- + dAtA[i] = 0x20 + } + if m.MaxTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x18 + } + if m.MinTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x10 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *SeriesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1171,11 +1436,6 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x78 - } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1630,11 +1890,6 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x40 - } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1771,11 +2026,6 @@ func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x48 - } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1941,6 +2191,45 @@ func (m *WriteRequest) Size() (n int) { return n } +func (m *InfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *InfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.MinTime != 0 { + n += 1 + sovRpc(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovRpc(uint64(m.MaxTime)) + } + if m.StoreType != 0 { + n += 1 + sovRpc(uint64(m.StoreType)) + } + if len(m.LabelSets) > 0 { + for _, e := range m.LabelSets { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + func (m *SeriesRequest) Size() (n int) { if m == nil { return 0 @@ -2002,9 +2291,6 @@ func (m *SeriesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } return n } @@ -2179,9 +2465,6 @@ func (m *LabelNamesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } return n } @@ -2248,9 +2531,6 @@ func (m *LabelValuesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } return n } @@ -2470,6 +2750,231 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreType", wireType) + } + m.StoreType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreType |= StoreType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSets = append(m.LabelSets, labelpb.ZLabelSet{}) + if err := m.LabelSets[len(m.LabelSets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SeriesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2896,25 +3401,6 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -3866,25 +4352,6 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -4296,25 +4763,6 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto index 2a6313ec02..2a9e9e3eaf 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto @@ -7,6 +7,7 @@ package thanos; import "store/storepb/types.proto"; import "gogoproto/gogo.proto"; import "store/storepb/prompb/types.proto"; +import "store/labelpb/types.proto"; import "google/protobuf/any.proto"; option go_package = "storepb"; @@ -24,6 +25,11 @@ option (gogoproto.goproto_sizecache_all) = false; /// Store represents API against instance that stores XOR encoded values with label set metadata (e.g Prometheus metrics). service Store { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + /// Deprecated. Use `thanos.info` instead. + rpc Info(InfoRequest) returns (InfoResponse); + /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. /// /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain @@ -57,6 +63,32 @@ message WriteRequest { int64 replica = 3; } +// Deprecated. Use `thanos.info` instead. +message InfoRequest {} + +// Deprecated. Use `thanos.info` instead. +enum StoreType { + UNKNOWN = 0; + QUERY = 1; + RULE = 2; + SIDECAR = 3; + STORE = 4; + RECEIVE = 5; + // DEBUG represents some debug StoreAPI components e.g. thanos tools store-api-serve. + DEBUG = 6; +} + +// Deprecated. Use `thanos.info` instead. +message InfoResponse { + // Deprecated. Use label_sets instead. + repeated Label labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel"]; + int64 min_time = 2; + int64 max_time = 3; + StoreType storeType = 4; + // label_sets is an unsorted list of `ZLabelSet`s. + repeated ZLabelSet label_sets = 5 [(gogoproto.nullable) = false]; +} + message SeriesRequest { int64 min_time = 1; int64 max_time = 2; @@ -105,9 +137,6 @@ message SeriesRequest { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. repeated string without_replica_labels = 14; - - // limit is used to limit the number of results returned - int64 limit = 15; } // QueryHints represents hints from PromQL that might help to @@ -206,9 +235,6 @@ message LabelNamesRequest { // same as in series request. repeated string without_replica_labels = 7; - - // limit is used to limit the number of results returned - int64 limit = 8; } message LabelNamesResponse { @@ -242,9 +268,6 @@ message LabelValuesRequest { // same as in series request. repeated string without_replica_labels = 8; - - // limit is used to limit the number of results returned - int64 limit = 9; } message LabelValuesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index 08c3122e32..7e2c472350 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -11,12 +11,9 @@ import ( "sort" "strings" "sync" - "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/pkg/errors" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "google.golang.org/grpc" @@ -24,35 +21,19 @@ import ( "google.golang.org/grpc/status" "github.com/thanos-io/thanos/pkg/component" - "github.com/thanos-io/thanos/pkg/filter" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" ) -const ( - RemoteReadFrameLimit = 1048576 - cuckooStoreFilterCapacity = 1000000 - storeFilterUpdateInterval = 15 * time.Second -) +const RemoteReadFrameLimit = 1048576 type TSDBReader interface { storage.ChunkQueryable StartTime() (int64, error) } -// TSDBStoreOption is a functional option for TSDBStore. -type TSDBStoreOption func(s *TSDBStore) - -// WithCuckooMetricNameStoreFilter returns a TSDBStoreOption that enables the Cuckoo filter for metric names. -func WithCuckooMetricNameStoreFilter() TSDBStoreOption { - return func(s *TSDBStore) { - s.storeFilter = filter.NewCuckooMetricNameStoreFilter(cuckooStoreFilterCapacity) - s.startStoreFilterUpdate = true - } -} - // TSDBStore implements the store API against a local TSDB instance. // It attaches the provided external labels to all results. It only responds with raw data // and does not support downsampling. @@ -63,16 +44,8 @@ type TSDBStore struct { buffers sync.Pool maxBytesPerFrame int - extLset labels.Labels - startStoreFilterUpdate bool - storeFilter filter.StoreFilter - mtx sync.RWMutex - close func() - storepb.UnimplementedStoreServer -} - -func (s *TSDBStore) Close() { - s.close() + extLset labels.Labels + mtx sync.RWMutex } func RegisterWritableStoreServer(storeSrv storepb.WriteableStoreServer) func(*grpc.Server) { @@ -89,68 +62,21 @@ type ReadWriteTSDBStore struct { // NewTSDBStore creates a new TSDBStore. // NOTE: Given lset has to be sorted. -func NewTSDBStore( - logger log.Logger, - db TSDBReader, - component component.StoreAPI, - extLset labels.Labels, - options ...TSDBStoreOption, -) *TSDBStore { +func NewTSDBStore(logger log.Logger, db TSDBReader, component component.StoreAPI, extLset labels.Labels) *TSDBStore { if logger == nil { logger = log.NewNopLogger() } - - st := &TSDBStore{ + return &TSDBStore{ logger: logger, db: db, component: component, extLset: extLset, maxBytesPerFrame: RemoteReadFrameLimit, - storeFilter: filter.AllowAllStoreFilter{}, - close: func() {}, buffers: sync.Pool{New: func() interface{} { b := make([]byte, 0, initialBufSize) return &b }}, } - - for _, option := range options { - option(st) - } - - if st.startStoreFilterUpdate { - ctx, cancel := context.WithCancel(context.Background()) - - updateFilter := func(ctx context.Context) { - vals, err := st.LabelValues(ctx, &storepb.LabelValuesRequest{ - Label: model.MetricNameLabel, - End: math.MaxInt64, - }) - if err != nil { - level.Error(logger).Log("msg", "failed to update metric names", "err", err) - return - } - - st.storeFilter.ResetAndSet(vals.Values...) - } - st.close = cancel - updateFilter(ctx) - - t := time.NewTicker(storeFilterUpdateInterval) - - go func() { - for { - select { - case <-t.C: - updateFilter(ctx) - case <-ctx.Done(): - return - } - } - }() - } - - return st } func (s *TSDBStore) SetExtLset(extLset labels.Labels) { @@ -167,11 +93,38 @@ func (s *TSDBStore) getExtLset() labels.Labels { return s.extLset } +// Info returns store information about the Prometheus instance. +func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + minTime, err := s.db.StartTime() + if err != nil { + return nil, errors.Wrap(err, "TSDB min Time") + } + + res := &storepb.InfoResponse{ + Labels: labelpb.ZLabelsFromPromLabels(s.getExtLset()), + StoreType: s.component.ToProto(), + MinTime: minTime, + MaxTime: math.MaxInt64, + } + + // Until we deprecate the single labels in the reply, we just duplicate + // them here for migration/compatibility purposes. + res.LabelSets = []labelpb.ZLabelSet{} + if len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, labelpb.ZLabelSet{ + Labels: res.Labels, + }) + } + return res, nil +} + func (s *TSDBStore) LabelSet() []labelpb.ZLabelSet { - labels := labelpb.ZLabelSetsFromPromLabels(s.getExtLset()) + labels := labelpb.ZLabelsFromPromLabels(s.getExtLset()) labelSets := []labelpb.ZLabelSet{} if len(labels) > 0 { - labelSets = append(labelSets, labels...) + labelSets = append(labelSets, labelpb.ZLabelSet{ + Labels: labels, + }) } return labelSets @@ -207,10 +160,6 @@ func (s *TSDBStore) TimeRange() (int64, int64) { return minTime, math.MaxInt64 } -func (s *TSDBStore) Matches(matchers []*labels.Matcher) bool { - return s.storeFilter.Matches(matchers) -} - // CloseDelegator allows to delegate close (releasing resources used by request to the server). // This is useful when we invoke StoreAPI within another StoreAPI and results are ephemeral until copied. type CloseDelegator interface { @@ -271,12 +220,7 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_Ser defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb chunk querier series") } - hints := &storage.SelectHints{ - Start: r.MinTime, - End: r.MaxTime, - Limit: int(r.Limit), - } - set := q.Select(srv.Context(), true, hints, matchers...) + set := q.Select(srv.Context(), true, nil, matchers...) shardMatcher := r.ShardInfo.Matcher(&s.buffers) defer shardMatcher.Close() @@ -384,10 +328,7 @@ func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest } defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label names") - hints := &storage.LabelHints{ - Limit: int(r.Limit), - } - res, _, err := q.LabelNames(ctx, hints, matchers...) + res, _, err := q.LabelNames(ctx, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -455,7 +396,6 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque Start: r.Start, End: r.End, Func: "series", - Limit: int(r.Limit), } set := q.Select(ctx, false, hints, matchers...) @@ -465,10 +405,7 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque return &storepb.LabelValuesResponse{}, nil } - hints := &storage.LabelHints{ - Limit: int(r.Limit), - } - res, _, err := q.LabelValues(ctx, r.Label, hints, matchers...) + res, _, err := q.LabelValues(ctx, r.Label, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go index a84f1ca673..d6108771f4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go @@ -10,39 +10,33 @@ import ( // MergeSlices merges a set of sorted string slices into a single ones // while removing all duplicates. -// If limit is set, only the first limit results will be returned. 0 to disable. -func MergeSlices(limit int, a ...[]string) []string { +func MergeSlices(a ...[]string) []string { if len(a) == 0 { return nil } if len(a) == 1 { - return truncateToLimit(limit, a[0]) + return a[0] } l := len(a) / 2 - return mergeTwoStringSlices(limit, MergeSlices(limit, a[:l]...), MergeSlices(limit, a[l:]...)) + return mergeTwoStringSlices(MergeSlices(a[:l]...), MergeSlices(a[l:]...)) } // MergeUnsortedSlices behaves like StringSlices but input slices are validated // for sortedness and are sorted if they are not ordered yet. -// If limit is set, only the first limit results will be returned. 0 to disable. -func MergeUnsortedSlices(limit int, a ...[]string) []string { +func MergeUnsortedSlices(a ...[]string) []string { for _, s := range a { if !sort.StringsAreSorted(s) { sort.Strings(s) } } - return MergeSlices(limit, a...) + return MergeSlices(a...) } -func mergeTwoStringSlices(limit int, a, b []string) []string { - a = truncateToLimit(limit, a) - b = truncateToLimit(limit, b) - +func mergeTwoStringSlices(a, b []string) []string { maxl := len(a) if len(b) > len(a) { maxl = len(b) } - res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { @@ -62,13 +56,5 @@ func mergeTwoStringSlices(limit int, a, b []string) []string { // Append all remaining elements. res = append(res, a...) res = append(res, b...) - res = truncateToLimit(limit, res) return res } - -func truncateToLimit(limit int, a []string) []string { - if limit > 0 && len(a) > limit { - return a[:limit] - } - return a -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2eb463c967..ec416562d1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -312,9 +312,6 @@ github.com/davecgh/go-spew/spew # github.com/dennwc/varint v1.0.0 ## explicit; go 1.12 github.com/dennwc/varint -# github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 -## explicit -github.com/dgryski/go-metro # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous @@ -674,7 +671,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.79 +# github.com/minio/minio-go/v7 v7.0.80 ## explicit; go 1.22 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/cors @@ -837,7 +834,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.55.0 +# github.com/prometheus/prometheus v0.55.1 ## explicit; go 1.22.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -913,9 +910,6 @@ github.com/sean-/seed # github.com/segmentio/fasthash v1.0.3 ## explicit; go 1.11 github.com/segmentio/fasthash/fnv1a -# github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 -## explicit; go 1.15 -github.com/seiflotfy/cuckoofilter # github.com/sercand/kuberesolver/v4 v4.0.0 => github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v4 @@ -982,8 +976,8 @@ github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/ringbuffer github.com/thanos-io/promql-engine/storage github.com/thanos-io/promql-engine/storage/prometheus -# github.com/thanos-io/thanos v0.35.2-0.20241011111532-af0900bfd290 -## explicit; go 1.23.0 +# github.com/thanos-io/thanos v0.36.1 +## explicit; go 1.21 github.com/thanos-io/thanos/pkg/api/query/querypb github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -1007,7 +1001,6 @@ github.com/thanos-io/thanos/pkg/extkingpin github.com/thanos-io/thanos/pkg/extprom github.com/thanos-io/thanos/pkg/extprom/http github.com/thanos-io/thanos/pkg/extpromql -github.com/thanos-io/thanos/pkg/filter github.com/thanos-io/thanos/pkg/gate github.com/thanos-io/thanos/pkg/info/infopb github.com/thanos-io/thanos/pkg/losertree @@ -1144,7 +1137,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector/pdata v1.18.0 +# go.opentelemetry.io/collector/pdata v1.19.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data @@ -1327,7 +1320,7 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.8.0 +# golang.org/x/sync v0.9.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore @@ -1350,7 +1343,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.7.0 +# golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.24.0