diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk index a71b1613c3..3a016cbd69 100644 --- a/.bingo/Variables.mk +++ b/.bingo/Variables.mk @@ -1,4 +1,4 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.6. DO NOT EDIT. +# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.7. DO NOT EDIT. # All tools are designed to be build inside $GOBIN. BINGO_DIR := $(dir $(lastword $(MAKEFILE_LIST))) GOPATH ?= $(shell go env GOPATH) @@ -21,125 +21,125 @@ ALERTMANAGER := $(GOBIN)/alertmanager-v0.24.0 $(ALERTMANAGER): $(BINGO_DIR)/alertmanager.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/alertmanager-v0.24.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=alertmanager.mod -o=$(GOBIN)/alertmanager-v0.24.0 "github.com/prometheus/alertmanager/cmd/alertmanager" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=alertmanager.mod -o=$(GOBIN)/alertmanager-v0.24.0 "github.com/prometheus/alertmanager/cmd/alertmanager" BINGO := $(GOBIN)/bingo-v0.6.0 $(BINGO): $(BINGO_DIR)/bingo.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/bingo-v0.6.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.6.0 "github.com/bwplotka/bingo" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.6.0 "github.com/bwplotka/bingo" FAILLINT := $(GOBIN)/faillint-v1.11.0 $(FAILLINT): $(BINGO_DIR)/faillint.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/faillint-v1.11.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=faillint.mod -o=$(GOBIN)/faillint-v1.11.0 "github.com/fatih/faillint" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=faillint.mod -o=$(GOBIN)/faillint-v1.11.0 "github.com/fatih/faillint" GO_BINDATA := $(GOBIN)/go-bindata-v3.1.1+incompatible $(GO_BINDATA): $(BINGO_DIR)/go-bindata.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/go-bindata-v3.1.1+incompatible" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=go-bindata.mod -o=$(GOBIN)/go-bindata-v3.1.1+incompatible "github.com/go-bindata/go-bindata/go-bindata" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=go-bindata.mod -o=$(GOBIN)/go-bindata-v3.1.1+incompatible "github.com/go-bindata/go-bindata/go-bindata" GOIMPORTS := $(GOBIN)/goimports-v0.1.11 $(GOIMPORTS): $(BINGO_DIR)/goimports.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/goimports-v0.1.11" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=goimports.mod -o=$(GOBIN)/goimports-v0.1.11 "golang.org/x/tools/cmd/goimports" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goimports.mod -o=$(GOBIN)/goimports-v0.1.11 "golang.org/x/tools/cmd/goimports" GOJSONTOYAML := $(GOBIN)/gojsontoyaml-v0.1.0 $(GOJSONTOYAML): $(BINGO_DIR)/gojsontoyaml.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/gojsontoyaml-v0.1.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=gojsontoyaml.mod -o=$(GOBIN)/gojsontoyaml-v0.1.0 "github.com/brancz/gojsontoyaml" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=gojsontoyaml.mod -o=$(GOBIN)/gojsontoyaml-v0.1.0 "github.com/brancz/gojsontoyaml" -GOLANGCI_LINT := $(GOBIN)/golangci-lint-v1.49.0 +GOLANGCI_LINT := $(GOBIN)/golangci-lint-v1.51.1 $(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/golangci-lint-v1.49.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v1.49.0 "github.com/golangci/golangci-lint/cmd/golangci-lint" + @echo "(re)installing $(GOBIN)/golangci-lint-v1.51.1" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v1.51.1 "github.com/golangci/golangci-lint/cmd/golangci-lint" GOTESPLIT := $(GOBIN)/gotesplit-v0.2.1 $(GOTESPLIT): $(BINGO_DIR)/gotesplit.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/gotesplit-v0.2.1" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=gotesplit.mod -o=$(GOBIN)/gotesplit-v0.2.1 "github.com/Songmu/gotesplit/cmd/gotesplit" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=gotesplit.mod -o=$(GOBIN)/gotesplit-v0.2.1 "github.com/Songmu/gotesplit/cmd/gotesplit" HUGO := $(GOBIN)/hugo-v0.101.0 $(HUGO): $(BINGO_DIR)/hugo.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/hugo-v0.101.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=hugo.mod -o=$(GOBIN)/hugo-v0.101.0 "github.com/gohugoio/hugo" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=hugo.mod -o=$(GOBIN)/hugo-v0.101.0 "github.com/gohugoio/hugo" JB := $(GOBIN)/jb-v0.5.1 $(JB): $(BINGO_DIR)/jb.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/jb-v0.5.1" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=jb.mod -o=$(GOBIN)/jb-v0.5.1 "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=jb.mod -o=$(GOBIN)/jb-v0.5.1 "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb" JSONNET_LINT := $(GOBIN)/jsonnet-lint-v0.18.0 $(JSONNET_LINT): $(BINGO_DIR)/jsonnet-lint.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/jsonnet-lint-v0.18.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=jsonnet-lint.mod -o=$(GOBIN)/jsonnet-lint-v0.18.0 "github.com/google/go-jsonnet/cmd/jsonnet-lint" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=jsonnet-lint.mod -o=$(GOBIN)/jsonnet-lint-v0.18.0 "github.com/google/go-jsonnet/cmd/jsonnet-lint" JSONNET := $(GOBIN)/jsonnet-v0.18.0 $(JSONNET): $(BINGO_DIR)/jsonnet.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/jsonnet-v0.18.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=jsonnet.mod -o=$(GOBIN)/jsonnet-v0.18.0 "github.com/google/go-jsonnet/cmd/jsonnet" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=jsonnet.mod -o=$(GOBIN)/jsonnet-v0.18.0 "github.com/google/go-jsonnet/cmd/jsonnet" JSONNETFMT := $(GOBIN)/jsonnetfmt-v0.18.0 $(JSONNETFMT): $(BINGO_DIR)/jsonnetfmt.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/jsonnetfmt-v0.18.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=jsonnetfmt.mod -o=$(GOBIN)/jsonnetfmt-v0.18.0 "github.com/google/go-jsonnet/cmd/jsonnetfmt" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=jsonnetfmt.mod -o=$(GOBIN)/jsonnetfmt-v0.18.0 "github.com/google/go-jsonnet/cmd/jsonnetfmt" MDOX := $(GOBIN)/mdox-v0.9.1-0.20220713110358-25b9abcf90a0 $(MDOX): $(BINGO_DIR)/mdox.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/mdox-v0.9.1-0.20220713110358-25b9abcf90a0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=mdox.mod -o=$(GOBIN)/mdox-v0.9.1-0.20220713110358-25b9abcf90a0 "github.com/bwplotka/mdox" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=mdox.mod -o=$(GOBIN)/mdox-v0.9.1-0.20220713110358-25b9abcf90a0 "github.com/bwplotka/mdox" MINIO := $(GOBIN)/minio-v0.0.0-20220720015624-ce8397f7d944 $(MINIO): $(BINGO_DIR)/minio.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/minio-v0.0.0-20220720015624-ce8397f7d944" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=minio.mod -o=$(GOBIN)/minio-v0.0.0-20220720015624-ce8397f7d944 "github.com/minio/minio" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=minio.mod -o=$(GOBIN)/minio-v0.0.0-20220720015624-ce8397f7d944 "github.com/minio/minio" PROMDOC := $(GOBIN)/promdoc-v0.8.0 $(PROMDOC): $(BINGO_DIR)/promdoc.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/promdoc-v0.8.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=promdoc.mod -o=$(GOBIN)/promdoc-v0.8.0 "github.com/plexsystems/promdoc" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=promdoc.mod -o=$(GOBIN)/promdoc-v0.8.0 "github.com/plexsystems/promdoc" PROMETHEUS := $(GOBIN)/prometheus-v0.37.0 $(PROMETHEUS): $(BINGO_DIR)/prometheus.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/prometheus-v0.37.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=prometheus.mod -o=$(GOBIN)/prometheus-v0.37.0 "github.com/prometheus/prometheus/cmd/prometheus" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=prometheus.mod -o=$(GOBIN)/prometheus-v0.37.0 "github.com/prometheus/prometheus/cmd/prometheus" PROMTOOL := $(GOBIN)/promtool-v0.37.0 $(PROMTOOL): $(BINGO_DIR)/promtool.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/promtool-v0.37.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=promtool.mod -o=$(GOBIN)/promtool-v0.37.0 "github.com/prometheus/prometheus/cmd/promtool" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=promtool.mod -o=$(GOBIN)/promtool-v0.37.0 "github.com/prometheus/prometheus/cmd/promtool" PROMU := $(GOBIN)/promu-v0.5.0 $(PROMU): $(BINGO_DIR)/promu.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/promu-v0.5.0" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=promu.mod -o=$(GOBIN)/promu-v0.5.0 "github.com/prometheus/promu" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=promu.mod -o=$(GOBIN)/promu-v0.5.0 "github.com/prometheus/promu" PROTOC_GEN_GOGOFAST := $(GOBIN)/protoc-gen-gogofast-v1.3.2 $(PROTOC_GEN_GOGOFAST): $(BINGO_DIR)/protoc-gen-gogofast.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/protoc-gen-gogofast-v1.3.2" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=protoc-gen-gogofast.mod -o=$(GOBIN)/protoc-gen-gogofast-v1.3.2 "github.com/gogo/protobuf/protoc-gen-gogofast" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=protoc-gen-gogofast.mod -o=$(GOBIN)/protoc-gen-gogofast-v1.3.2 "github.com/gogo/protobuf/protoc-gen-gogofast" SHFMT := $(GOBIN)/shfmt-v3.5.1 $(SHFMT): $(BINGO_DIR)/shfmt.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @echo "(re)installing $(GOBIN)/shfmt-v3.5.1" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=shfmt.mod -o=$(GOBIN)/shfmt-v3.5.1 "mvdan.cc/sh/v3/cmd/shfmt" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=shfmt.mod -o=$(GOBIN)/shfmt-v3.5.1 "mvdan.cc/sh/v3/cmd/shfmt" diff --git a/.bingo/golangci-lint.mod b/.bingo/golangci-lint.mod index ce87937a50..79765e7dcf 100644 --- a/.bingo/golangci-lint.mod +++ b/.bingo/golangci-lint.mod @@ -2,4 +2,4 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT go 1.14 -require github.com/golangci/golangci-lint v1.49.0 // cmd/golangci-lint +require github.com/golangci/golangci-lint v1.51.1 // cmd/golangci-lint diff --git a/.bingo/golangci-lint.sum b/.bingo/golangci-lint.sum index dcf464cba8..a54388f0ce 100644 --- a/.bingo/golangci-lint.sum +++ b/.bingo/golangci-lint.sum @@ -1,7 +1,11 @@ +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= 4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -65,6 +69,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Abirdcfly/dupword v0.0.9 h1:MxprGjKq3yDBICXDgEEsyGirIXfMYXkLNT/agPsE1tk= +github.com/Abirdcfly/dupword v0.0.9/go.mod h1:PzmHVLLZ27MvHSzV7eFmMXSFArWXZPZmfuuziuUrf2g= github.com/Antonboom/errname v0.1.6 h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24= github.com/Antonboom/errname v0.1.6/go.mod h1:7lz79JAnuoMNDAWE9MeeIr1/c/VpSUWatBv2FH9NYpI= github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako= @@ -78,6 +84,8 @@ github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= @@ -99,6 +107,8 @@ github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQ github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o= github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= +github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -170,6 +180,8 @@ github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4 github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU= github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 h1:E7LT642ysztPWE0dfz43cWOvMiF42DyTRC+eZIaO4yI= github.com/chavacava/garif v0.0.0-20220630083739-93517212f375/go.mod h1:4m1Rv7xfuwWPNKXlThldNuJvutYM6J95wNuuVmn55To= +github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348 h1:cy5GCEZLUCshCGCRRUjxHrDUqkB4l5cuUt3ShEckQEo= +github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348/go.mod h1:f/miWtG3SSuTxKsNK3o58H1xl+XV6ZIfbC6p7lPPB8U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -207,8 +219,11 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cristalhq/acmd v0.7.0/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= +github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= github.com/curioswitch/go-reassign v0.1.2 h1:ekM07+z+VFT560Exz4mTv0/s1yU9gem6CJc/tlYpkmI= github.com/curioswitch/go-reassign v0.1.2/go.mod h1:bFJIHgtTM3hRm2sKXSPkbwNjSFyGURQXyn4IXD2qwfQ= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/daixiang0/gci v0.2.8 h1:1mrIGMBQsBu0P7j7m1M8Lb+ZeZxsZL+jyGX4YoMJJpg= github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4= @@ -217,6 +232,8 @@ github.com/daixiang0/gci v0.4.3 h1:wf7x0xRjQqTlA2dzHTI0A/xPyp7VcBatBG9nwGatwbQ= github.com/daixiang0/gci v0.4.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/daixiang0/gci v0.6.3 h1:wUAqXChk8HbwXn8AfxD9DYSCp9Bpz1L3e6Q4Roe+q9E= github.com/daixiang0/gci v0.6.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= +github.com/daixiang0/gci v0.9.0 h1:t8XZ0vK6l0pwPoOmoGyqW2NwQlvbpAQNVvu/GRBgykM= +github.com/daixiang0/gci v0.9.0/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -254,6 +271,8 @@ github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/firefart/nonamedreturns v1.0.1 h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag= @@ -287,6 +306,8 @@ github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+Dx github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k= github.com/go-critic/go-critic v0.6.4 h1:tucuG1pvOyYgpBIrVxw0R6gwO42lNa92Aq3VaDoIs+E= github.com/go-critic/go-critic v0.6.4/go.mod h1:qL5SOlk7NtY6sJPoVCTKDIgzNOxHkkkOCVDyi9wJe1U= +github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo= +github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -301,6 +322,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= @@ -309,12 +332,17 @@ github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+D github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= github.com/go-toolsmith/astcopy v1.0.1 h1:l09oBhAPyV74kLJ3ZO31iBU8htZGTwr9LTjuMCyL8go= github.com/go-toolsmith/astcopy v1.0.1/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= +github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= +github.com/go-toolsmith/astcopy v1.0.3 h1:r0bgSRlMOAgO+BdQnVAcpMSMkrQCnV6ZJmIkrJgcJj0= +github.com/go-toolsmith/astcopy v1.0.3/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc= github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= github.com/go-toolsmith/astequal v1.0.2 h1:+XvaV8zNxua+9+Oa4AHmgmpo4RYAbwr/qjNppLfX2yM= github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.0.3 h1:+LVdyRatFS+XO78SGV4I3TCEA0AC7fKEGma+fH+674o= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= @@ -329,6 +357,8 @@ github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYw github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -388,6 +418,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= github.com/golangci/golangci-lint v1.39.1-0.20210330125642-6844f6abf817 h1:/cj909lXJ48yCH1dmtF7mrm8qH/+aUR15PsC5Oz9SA4= github.com/golangci/golangci-lint v1.39.1-0.20210330125642-6844f6abf817/go.mod h1:tUZzmjq/sO4QmwvxzN5LhVvStPu3OgHtijV8Oqoc/fQ= github.com/golangci/golangci-lint v1.45.2 h1:9I3PzkvscJkFAQpTQi5Ga0V4qWdJERajX1UZ7QqkW+I= @@ -398,12 +430,16 @@ github.com/golangci/golangci-lint v1.47.1 h1:hbubHskV2Ppwz4ZZE2lc0/Pw9ZhqLuzm2dT github.com/golangci/golangci-lint v1.47.1/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg= github.com/golangci/golangci-lint v1.49.0 h1:I8WHOavragDttlLHtSraHn/h39C+R60bEQ5NoGcHQr8= github.com/golangci/golangci-lint v1.49.0/go.mod h1:+V/7lLv449R6w9mQ3WdV0EKh7Je/jTylMeSwBZcLeWE= +github.com/golangci/golangci-lint v1.51.1 h1:N5HD/x0ZrhJYsgKWyz7yJxxQ8JKR0Acc+FOP7QtGSAA= +github.com/golangci/golangci-lint v1.51.1/go.mod h1:hnyNNO3fJ2Rjwo6HM+VXvcmLkKDOuBAnR9gVlS1mW1E= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0= +github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc= github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 h1:c9Mqqrm/Clj5biNaG7rABrmwUq88nHh0uABo2b/WYmc= github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI= @@ -433,6 +469,8 @@ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -473,12 +511,15 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gookit/color v1.3.8/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= +github.com/gookit/color v1.5.2/go.mod h1:w8h4bGiHeeBpvQVePTutdbERIUf3oJE5lZ8HM0UgXyg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 h1:Nb2aRlC404yz7gQIfRZxX9/MLvQiqXyiBTJtgAy6yrI= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 h1:9alfqbrhuD+9fLZ4iaAVwhlp5PEhmnBt7yvK2Oy5C1U= +github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -574,6 +615,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jgautheron/goconst v1.4.0 h1:hp9XKUpe/MPyDamUbfsrGpe+3dnY2whNK4EtB86dvLM= github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= @@ -589,6 +631,7 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= @@ -609,6 +652,8 @@ github.com/julz/importas v0.0.0-20210228071311-d0bf5cb4e1db h1:ZmwBthGFMVAieuVpL github.com/julz/importas v0.0.0-20210228071311-d0bf5cb4e1db/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/junk1tm/musttag v0.4.4 h1:VK4L7v7lvWAhKDDx0cUJgbb0UBNipYinv8pPeHJzH9Q= +github.com/junk1tm/musttag v0.4.4/go.mod h1:XkcL/9O6RmD88JBXb+I15nYRl9W4ExhgQeCBEhfMC8U= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -619,8 +664,12 @@ github.com/kisielk/errcheck v1.6.1 h1:cErYo+J4SmEjdXZrVXGwLJCE2sB06s23LpkcyWNrT+ github.com/kisielk/errcheck v1.6.1/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c= github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= +github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR0uvmw= +github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= @@ -653,6 +702,8 @@ github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= +github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.1 h1:9pAcW9KRZW7HQjFwbozNvFMcNVwdCBufU7os5QUwLIY= github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg= @@ -661,20 +712,27 @@ github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUc github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM= github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/ldez/tagliatelle v0.4.0 h1:sylp7d9kh6AdXN2DpVGHBRb5guTVAgOxqNGhbqc4b1c= +github.com/ldez/tagliatelle v0.4.0/go.mod h1:mNtTfrHy2haaBAw+VT7IBV6VXBThS7TCreYWbBcJ87I= github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magefile/mage v1.13.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -682,6 +740,8 @@ github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaW github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= @@ -710,12 +770,15 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -732,6 +795,8 @@ github.com/mgechev/revive v1.2.1 h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4= github.com/mgechev/revive v1.2.1/go.mod h1:+Ro3wqY4vakcYNtkBWdZC7dBg1xSB6sp054wWwmeFm0= github.com/mgechev/revive v1.2.3 h1:NzIEEa9+WimQ6q2Ov7OcNeySS/IOcwtkQ8RAh0R5UJ4= github.com/mgechev/revive v1.2.3/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= +github.com/mgechev/revive v1.2.5 h1:UF9AR8pOAuwNmhXj2odp4mxv9Nx2qUIwVz8ZsU+Mbec= +github.com/mgechev/revive v1.2.5/go.mod h1:nFOXent79jMTISAfOAasKfy0Z2Ejq0WX7Qn/KAdYopI= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= @@ -786,11 +851,15 @@ github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCr github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo= github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= +github.com/nishanths/exhaustive v0.9.5 h1:TzssWan6orBiLYVqewCG8faud9qlFntJE30ACpzmGME= +github.com/nishanths/exhaustive v0.9.5/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.8.1 h1:/y4o/0hV+ruUHj4xXh89xlFjoaitnI4LnkpuYs02q1c= +github.com/nunnatsa/ginkgolinter v0.8.1/go.mod h1:FYYLtszIdmzCH8XMaMPyxPVXZ7VCaIm55bA+gugx+14= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -807,6 +876,9 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.3.1/go.mod h1:Sv4yQXwG5VmF7tm3Q5Z+RWUpPo24LF1mpnz2crUb8Ys= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= @@ -814,6 +886,10 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.0/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -834,6 +910,8 @@ github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZO github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= @@ -855,6 +933,8 @@ github.com/polyfloyd/go-errorlint v1.0.0 h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjL github.com/polyfloyd/go-errorlint v1.0.0/go.mod h1:KZy4xxPJyy88/gldCe5OdW6OQRtNO3EZE7hXzmnebgA= github.com/polyfloyd/go-errorlint v1.0.2 h1:kp1yvHflYhTmw5m3MmBy8SCyQkKPjwDthVuMH0ug6Yk= github.com/polyfloyd/go-errorlint v1.0.2/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= +github.com/polyfloyd/go-errorlint v1.0.6 h1:ZevdyEGxDoHAMQUVvdTT06hnYuKULe8TQkOmIYx6s1c= +github.com/polyfloyd/go-errorlint v1.0.6/go.mod h1:NcnNncnm8fVV7vfQWiI4HZrzWFzGp24C262IQutNcMs= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -905,6 +985,8 @@ github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a h1:sWFav github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a/go.mod h1:VMX+OnnSw4LicdiEGtRSD/1X8kW7GuEscjYNr4cOIT4= github.com/quasilyte/go-ruleguard v0.3.17 h1:cDdoaSbQg11LXPDQqiCK54QmQXsEQQCTIgdcpeULGSI= github.com/quasilyte/go-ruleguard v0.3.17/go.mod h1:sST5PvaR7yb/Az5ksX8oc88usJ4EGjmJv7cK7y3jyig= +github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE= +github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/dsl v0.3.1/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/dsl v0.3.12-0.20220101150716-969a394a9451/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= @@ -913,6 +995,7 @@ github.com/quasilyte/go-ruleguard/dsl v0.3.16/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQ github.com/quasilyte/go-ruleguard/dsl v0.3.17/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/dsl v0.3.19/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/go-ruleguard/rules v0.0.0-20210221215616-dfcc94e3dffd/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= @@ -921,6 +1004,8 @@ github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 h1:P4QPNn+TK49zJj github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8= github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f h1:6Gtn2i04RD0gVyYf2/IUMTIs+qYleBt4zxDqkLTcu4U= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= @@ -934,6 +1019,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -944,8 +1030,12 @@ github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcT github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1rEOtBp4= github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= +github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= +github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= +github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= @@ -956,10 +1046,14 @@ github.com/sanposhiho/wastedassign v0.2.0 h1:0vycy8D/Ky55U5ub8oJFqyDv9M4ICM/wte9 github.com/sanposhiho/wastedassign v0.2.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.13.0 h1:uObNudVEEHf6JbOJy5bgKJloA1bWjxR9fwgNFpPzKnI= github.com/sashamelentyev/usestdlibvars v1.13.0/go.mod h1:D2Wb7niIYmTB+gB8z7kh8tyP5ccof1dQ+SFk+WW5NtY= +github.com/sashamelentyev/usestdlibvars v1.21.1 h1:GQGlReyL9Ek8DdJmwtwhHbhwHnuPfsKaprpjnrPcjxc= +github.com/sashamelentyev/usestdlibvars v1.21.1/go.mod h1:MPI52Qq99iO9sFZZcKJ2y/bx6BNjs+/2bw3PCggIbew= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/securego/gosec/v2 v2.7.0 h1:mOhJv5w6UyNLpSssQOQCc7eGkKLuicAxvf66Ey/X4xk= github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= @@ -971,6 +1065,8 @@ github.com/securego/gosec/v2 v2.12.0 h1:CQWdW7ATFpvLSohMVsajscfyHJ5rsGmEXmsNcsDN github.com/securego/gosec/v2 v2.12.0/go.mod h1:iTpT+eKTw59bSgklBHlSnH5O2tNygHMDxfvMubA4i7I= github.com/securego/gosec/v2 v2.13.1 h1:7mU32qn2dyC81MH9L2kefnQyRMUarfDER3iQyMHcjYM= github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo= +github.com/securego/gosec/v2 v2.14.0 h1:U1hfs0oBackChXA72plCYVA4cOlQ4gO+209dHiSNZbI= +github.com/securego/gosec/v2 v2.14.0/go.mod h1:Ff03zEi5NwSOfXj9nFpBfhbWTtROCkg9N+9goggrYn4= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= @@ -979,6 +1075,7 @@ github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtS github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= github.com/shirou/gopsutil/v3 v3.22.7/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= +github.com/shirou/gopsutil/v3 v3.23.1/go.mod h1:NN6mnm5/0k8jw4cBfCnJtr5L7ErOTg18tMNpgFkn0hA= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1002,6 +1099,8 @@ github.com/sivchari/tenv v1.5.0 h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ= github.com/sivchari/tenv v1.5.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE= github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1009,6 +1108,8 @@ github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -1032,6 +1133,8 @@ github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= @@ -1063,6 +1166,8 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1078,15 +1183,20 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/sylvia7788/contextcheck v1.0.6 h1:o2EZgVPyMKE/Mtoqym61DInKEjwEbsmyoxg3VrmjNO4= github.com/sylvia7788/contextcheck v1.0.6/go.mod h1:9XDxwvxyuKD+8N+a7Gs7bfWLityh5t70g/GjdEt2N2M= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= @@ -1101,14 +1211,20 @@ github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWh github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e h1:MV6KaVu/hzByHP0UvJ4HcMGE/8a6A4Rggc/0wx2AvJo= +github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.3 h1:ecACo9fNiHxX4/Bc02rW2+kaJIAMAes7qJ7JKxt0EZI= +github.com/timonwong/loggercheck v0.9.3/go.mod h1:wUqnk9yAOIKtGA39l1KLE9Iz0QiTocu/YZoOf+OzFdw= github.com/timonwong/logrlint v0.1.0 h1:phZCcypL/vtx6cGxObJgWZ5wexZF5SXFPLOM+ru0e/M= github.com/timonwong/logrlint v0.1.0/go.mod h1:Zleg4Gw+kRxNej+Ra7o+tEaW5k1qthTaYKU7rSD39LU= github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1120,11 +1236,15 @@ github.com/tomarrell/wrapcheck/v2 v2.6.1 h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgA github.com/tomarrell/wrapcheck/v2 v2.6.1/go.mod h1:Eo+Opt6pyMW1b6cNllOcDSSoHO0aTJ+iF6BfCUbHltA= github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M= github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= +github.com/tomarrell/wrapcheck/v2 v2.8.0 h1:qDzbir0xmoE+aNxGCPrn+rUSxAX+nG6vREgbbXAR81I= +github.com/tomarrell/wrapcheck/v2 v2.8.0/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.3.1 h1:a1S4+4HSXDJMgeODJH/t0EEKxcVla6Tasw+Zx9JJMog= github.com/tommy-muehle/go-mnd/v2 v2.3.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= @@ -1242,6 +1362,8 @@ golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1261,6 +1383,9 @@ golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1293,6 +1418,9 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdx golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1357,6 +1485,10 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1394,6 +1526,9 @@ golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1514,9 +1649,21 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1528,6 +1675,11 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1650,6 +1802,12 @@ golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 h1:NHLFZ56qCjD+0hYY3kE5 golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.4.1-0.20221208213631-3f74d914ae6d/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1866,6 +2024,8 @@ gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -1904,12 +2064,16 @@ honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= +honnef.co/go/tools v0.4.0 h1:lyXVV1c8wUBJRKqI8JgIpT8TW1VDagfYYaxbKa/HoL8= +honnef.co/go/tools v0.4.0/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4= mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= @@ -1920,6 +2084,8 @@ mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZW mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk= mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= +mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w= +mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/.bingo/variables.env b/.bingo/variables.env index df892f66d2..a6ff6b2e5f 100644 --- a/.bingo/variables.env +++ b/.bingo/variables.env @@ -1,4 +1,4 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.6. DO NOT EDIT. +# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.7. DO NOT EDIT. # All tools are designed to be build inside $GOBIN. # Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk. GOBIN=${GOBIN:=$(go env GOBIN)} @@ -20,7 +20,7 @@ GOIMPORTS="${GOBIN}/goimports-v0.1.11" GOJSONTOYAML="${GOBIN}/gojsontoyaml-v0.1.0" -GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.49.0" +GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.51.1" GOTESPLIT="${GOBIN}/gotesplit-v0.2.1" diff --git a/.busybox-versions b/.busybox-versions index c7de0d0043..4d7eb55850 100644 --- a/.busybox-versions +++ b/.busybox-versions @@ -1,6 +1,6 @@ # Auto generated by busybox-updater.sh. DO NOT EDIT -amd64=dca9fb7b1ee7f3ff8707b8169358768768c48cc27ea057b1b221ab76623c1cf5 -arm64=1ee93555a9a3ca7f47efe6d426112becaa74e28cc9a991fed76fe065fd0a5f0d -arm=da43f4293d6bb46349f1384f0695e38b15205c97b5e895328c823745762c5524 -ppc64le=c7e9e969d12af1f7af99ad2708e0d2abd0e956b83dfe889db2162981a419935f -s390x=bfc2eaffe1e60d48b6beabea7c46f8c3765f5f311de80596e1685b5c94d5791e +amd64=998961e8aa5447941cb3eaca24a27df16750a615de95f8e8b6fee94168352c3d +arm64=73740c260d7e6f7d2b4c5ee3a7bb3b296530bad63f0ba555254307ffbedf0aff +arm=56956f7c87f67c75f5c67317b98e924caca4792899624cb173e04dbd9a854b76 +ppc64le=6a18cc4a594973b0d2191f5207ae1b69536fe9a27bfb8a93ebbfd78cdb6deaf9 +s390x=52a6f651c4049b3b5fd517ccb3e15100190c8ed646451d48b8de8ed3c230a433 diff --git a/.circleci/config.yml b/.circleci/config.yml index d2c464ec8a..97cdc78e28 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,16 +2,16 @@ version: 2.1 orbs: - go: circleci/go@1.7.1 - git-shallow-clone: guitarrapc/git-shallow-clone@2.4.0 + go: circleci/go@1.7.2 + git-shallow-clone: guitarrapc/git-shallow-clone@2.5.0 executors: golang: docker: - - image: cimg/go:1.19-node + - image: cimg/go:1.20-node golang-test: docker: - - image: cimg/go:1.19-node + - image: cimg/go:1.20-node - image: quay.io/thanos/docker-swift-onlyone-authv2-keystone:v0.1 jobs: diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000..88e4c15cee --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,16 @@ +# For details, see https://github.com/devcontainers/images/tree/main/src/go +FROM mcr.microsoft.com/devcontainers/go:0-1.20 + +RUN echo "Downloading prometheus..." \ + && curl -sSL -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/prometheus/prometheus/tags" -o /tmp/tags.json \ + && VERSION_LIST="$(jq -r '.[] | select(.name | contains("rc") | not) | .name | split("v") | .[1]' /tmp/tags.json | tr -d '"' | sort -rV)" \ + && PROMETHEUS_LATEST_VERSION="$(echo "${VERSION_LIST}" | head -n 1)" \ + && PROMETHEUS_FILE_NAME="prometheus-${PROMETHEUS_LATEST_VERSION}.linux-amd64" \ + && curl -fsSLO "https://github.com/prometheus/prometheus/releases/download/v${PROMETHEUS_LATEST_VERSION}/${PROMETHEUS_FILE_NAME}.tar.gz" \ + && tar -xzf "${PROMETHEUS_FILE_NAME}.tar.gz" \ + && rm "${PROMETHEUS_FILE_NAME}.tar.gz" \ + && mv ${PROMETHEUS_FILE_NAME}/prometheus /go/bin/ + +ENV GOPROXY "https://proxy.golang.org" + +COPY .devcontainer/welcome-message.txt /usr/local/etc/vscode-dev-containers/first-run-notice.txt diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 88eb7140b9..e54fd06548 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,22 +1,68 @@ +// For more details, see https://aka.ms/devcontainer.json. { - "name": "Codespaces for Thanos", - "customizations": { - "vscode": { - "settings": { - "go.toolsManagement.checkForUpdates": "local", - "go.useLanguageServer": true, - "go.gopath": "/go" - } - } - }, - "extensions": [ - "ms-azuretools.vscode-docker", - "ms-kubernetes-tools.vscode-kubernetes-tools", - "davidanson.vscode-markdownlint", - "timonwong.shellcheck", - "docsmsft.docs-yaml", - "Grafana.vscode-jsonnet", - "codezombiech.gitignore", - "golang.Go" - ] + "name": "Codespaces for Thanos", + "build": { + "context": "..", + "dockerfile": "Dockerfile" + }, + + "workspaceFolder": "/go/src/github.com/thanos-io/thanos", + "workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/thanos-io/thanos,type=bind,consistency=cached", + + // Features to add to the dev container. More info: https://containers.dev/features. + "features": { + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, + "ghcr.io/devcontainers/features/node:1": {}, + "ghcr.io/devcontainers/features/kubectl-helm-minikube:1": {} + }, + + "onCreateCommand": "make build", + "postAttachCommand": { + "Run quickstart": "bash scripts/quickstart.sh" + }, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + "forwardPorts": [ + 1313, + 9090, + 9091, + 9092, + 10904, + 10914 + ], + "portsAttributes": { + "1313": { + "label": "Website" + }, + "9090": { + "label": "Prometheus Instance 1" + }, + "9091": { + "label": "Prometheus Instance 2" + }, + "9092": { + "label": "Prometheus Instance 3" + }, + "10904": { + "label": "Thanos Query 1" + }, + "10914": { + "label": "Thanos Query 2" + } + }, + + // Configure tool-specific properties. + "customizations": { + "vscode": { + "extensions": [ + "ms-azuretools.vscode-docker", + "ms-kubernetes-tools.vscode-kubernetes-tools", + "davidanson.vscode-markdownlint", + "timonwong.shellcheck", + "docsmsft.docs-yaml", + "Grafana.vscode-jsonnet", + "codezombiech.gitignore" + ] + } + } } diff --git a/.devcontainer/welcome-message.txt b/.devcontainer/welcome-message.txt new file mode 100644 index 0000000000..bafd1016d1 --- /dev/null +++ b/.devcontainer/welcome-message.txt @@ -0,0 +1,10 @@ +👋 Welcome to "Thanos" in GitHub Codespaces! + +🛠️ Your environment is fully setup with all the required software. + +🚀 All the components of Thanos are spinning up with the "quickstart" script, navigate over to the "Ports" tab to access it. + +ℹ️ Note: Change the visibility of "Prometheus instances" to "public" so that the API endpoints can be updated. + +🔍 To explore VS Code to its fullest, search using the Command Palette (Cmd/Ctrl + Shift + P or F1). + diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d751f8c96c..58255b5f70 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,6 +6,18 @@ updates: schedule: interval: "weekly" labels: ["dependencies"] + ignore: + - dependency-name: "objstore" + - dependency-name: "promql-engine" + - package-ecosystem: "gomod" + directory: "/" + vendor: false + schedule: + interval: "daily" + labels: ["dependencies"] + allow: + - dependency-name: "objstore" + - dependency-name: "promql-engine" - package-ecosystem: "docker" directory: "/" schedule: @@ -18,4 +30,4 @@ updates: interval: weekly labels: - "dependencies" - + \ No newline at end of file diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index eeaedfe0c5..a129e83763 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -34,17 +34,17 @@ jobs: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - - name: Checkout repository + - name: Checkout code uses: actions/checkout@v3 - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: 1.20.x # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} config-file: ./.github/codeql/codeql-config.yml @@ -56,7 +56,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v1 + uses: github/codeql-action/autobuild@v2 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -70,4 +70,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/container-version.yaml b/.github/workflows/container-version.yaml index 6d35aa8e91..e4a443cd62 100644 --- a/.github/workflows/container-version.yaml +++ b/.github/workflows/container-version.yaml @@ -1,18 +1,21 @@ on: workflow_dispatch: schedule: - - cron: '37 13 * * *' + - cron: '0 * * * *' name: busybox-update workflow jobs: checkVersionAndCreatePR: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - name: Checkout code + uses: actions/checkout@v3 + - name: Run busybox updater run: | chmod +x ./scripts/busybox-updater.sh ./scripts/busybox-updater.sh shell: bash + - name: Create Pull Request uses: peter-evans/create-pull-request@v3 with: diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index b4d0e66c43..f722d7fb16 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -14,13 +14,13 @@ jobs: env: GOBIN: /tmp/.bin steps: - - name: Checkout code into the Go module directory. + - name: Checkout code uses: actions/checkout@v3 - name: Install Go uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.20.x - uses: actions/cache@v3 with: @@ -38,5 +38,6 @@ jobs: - name: Check docs run: make check-docs + - name: Check examples run: make check-examples diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index ee555720ff..fa32c4951e 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -15,40 +15,47 @@ jobs: env: GOBIN: /tmp/.bin steps: - - name: Install Go. + - name: Checkout code + uses: actions/checkout@v3 + + - name: Install Go uses: actions/setup-go@v3 with: - go-version: 1.19.x - - - name: Check out code into the Go module directory - uses: actions/checkout@v1 + go-version: 1.20.x - uses: actions/cache@v3 with: - path: ~/go/pkg/mod + path: | + ~/.cache/go-build + ~/.cache/golangci-lint + ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go- - - name: Cross build check. + - name: Cross build check run: make crossbuild + lint: runs-on: ubuntu-latest name: Linters (Static Analysis) for Go env: GOBIN: /tmp/.bin steps: - - name: Checkout code into the Go module directory. + - name: Checkout code uses: actions/checkout@v3 - name: Install Go uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.20.x - uses: actions/cache@v3 with: - path: ~/go/pkg/mod + path: | + ~/.cache/go-build + ~/.cache/golangci-lint + ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go- @@ -56,8 +63,9 @@ jobs: - name: Format. run: make format - - name: Linting & vetting. + - name: Linting & vetting run: make go-lint + e2e: strategy: fail-fast: false @@ -69,18 +77,21 @@ jobs: env: GOBIN: /tmp/.bin steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Install Go. uses: actions/setup-go@v3 with: - go-version: 1.19.x - - - name: Check out code into the Go module directory. - uses: actions/checkout@v3 + go-version: 1.20.x - uses: actions/cache@v3 with: - path: ~/go/pkg/mod + path: | + ~/.cache/go-build + ~/.cache/golangci-lint + ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - - name: Run e2e docker-based tests. + - name: Run e2e docker-based tests run: make test-e2e GH_PARALLEL=${{ matrix.parallelism }} GH_INDEX=${{ matrix.index }} diff --git a/.github/workflows/mixin.yaml b/.github/workflows/mixin.yaml index b745fdb1de..a06cf91956 100644 --- a/.github/workflows/mixin.yaml +++ b/.github/workflows/mixin.yaml @@ -10,13 +10,13 @@ jobs: build: runs-on: ubuntu-latest steps: - - name: Check out code into the Go module directory + - name: Checkout code uses: actions/checkout@v3 - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.20.x - name: Generate run: make examples @@ -28,13 +28,13 @@ jobs: runs-on: ubuntu-latest name: Linters (Static Analysis) for Jsonnet (mixin) steps: - - name: Checkout code into the Go module directory. + - name: Checkout code uses: actions/checkout@v3 - name: Install Go uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.20.x - name: Format run: | diff --git a/.github/workflows/react.yml b/.github/workflows/react.yml index 38eee69652..97a818917d 100644 --- a/.github/workflows/react.yml +++ b/.github/workflows/react.yml @@ -14,15 +14,19 @@ jobs: node: [ '14' ] name: React UI test on Node ${{ matrix.node }} steps: - - uses: actions/checkout@v2 - - name: install nodejs - uses: actions/setup-node@v2 + - name: Checkout code + uses: actions/checkout@v3 + + - name: Install nodejs + uses: actions/setup-node@v3 with: node-version: ${{ matrix.node }} + - uses: actions/cache@v3 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} restore-keys: | ${{ runner.os }}-node- + - run: make react-app-test diff --git a/.gitignore b/.gitignore index b11d066506..87538edf29 100644 --- a/.gitignore +++ b/.gitignore @@ -14,8 +14,9 @@ kube/.minikube .mdoxcache # Ignore e2e working dirs. -data/ +/data/ test/e2e/e2e_* +scripts/data/ # Ignore benchmarks dir. benchmarks/ diff --git a/.go-version b/.go-version index bc4493477a..5fb5a6b4f5 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.19 +1.20 diff --git a/.promu.yml b/.promu.yml index 347e01d714..f5313b0a6b 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,5 +1,5 @@ go: - version: 1.19 + version: 1.20 repository: path: github.com/thanos-io/thanos build: diff --git a/CHANGELOG.md b/CHANGELOG.md index 782f020a99..f6a03a5ae9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,28 +10,144 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ## Unreleased +### Added + +- [#6185](https://github.com/thanos-io/thanos/pull/6185) Tracing: tracing in OTLP support configuring service_name. +- [#6192](https://github.com/thanos-io/thanos/pull/6192) Store: add flag `bucket-web-label` to select the label to use as timeline title in web UI +- [#6167](https://github.com/thanos-io/thanos/pull/6195) Receive: add flag `tsdb.too-far-in-future.time-window` to prevent clock skewed samples to pollute TSDB head and block all valid incoming samples. +- [#6273](https://github.com/thanos-io/thanos/pull/6273) Mixin: Allow specifying an instance name filter in dashboards +- [#6163](https://github.com/thanos-io/thanos/pull/6163) Receiver: Add hidden flag `--receive-forward-max-backoff` to configure the max backoff for forwarding requests. + ### Fixed + +- [#6172](https://github.com/thanos-io/thanos/pull/6172) query-frontend: return JSON formatted errors for invalid PromQL expression in the split by interval middleware. +- [#6171](https://github.com/thanos-io/thanos/pull/6171) Store: fix error handling on limits. +- [#6183](https://github.com/thanos-io/thanos/pull/6183) Receiver: fix off by one in multitsdb flush that will result in empty blocks if the head only contains one sample +- [#6197](https://github.com/thanos-io/thanos/pull/6197) Exemplar OTel: Fix exemplar for otel to use traceId instead of spanId and sample only if trace is sampled +- [#6207](https://github.com/thanos-io/thanos/pull/6207) Receive: Remove the shipper once a tenant has been pruned. +- [#6216](https://github.com/thanos-io/thanos/pull/6216) Receiver: removed hard-coded value of EnableExemplarStorage flag and set it according to max-exemplar value. +- [#6222](https://github.com/thanos-io/thanos/pull/6222) mixin(Receive): Fix tenant series received charts. +- [#6218](https://github.com/thanos-io/thanos/pull/6218) mixin(Store): handle ResourceExhausted as a non-server error. As a consequence, this error won't contribute to Store's grpc errors alerts. +- [#6271](https://github.com/thanos-io/thanos/pull/6271) Receive: Fix segfault in `LabelValues` during head compaction. +- [#6306](https://github.com/thanos-io/thanos/pull/6306) Tracing: tracing in OTLP utilize the OTEL_TRACES_SAMPLER env variable +- [#6330](https://github.com/thanos-io/thanos/pull/6330) Store: Fix inconsistent error for series limits. +- [#6342](https://github.com/thanos-io/thanos/pull/6342) Cache/Redis: Upgrade `rueidis` to v1.0.2 to to improve error handling while shrinking a redis cluster. +- [#6325](https://github.com/thanos-io/thanos/pull/6325) Store: return gRPC resource exhausted error for byte limiter. + +### Changed +- [#6168](https://github.com/thanos-io/thanos/pull/6168) Receiver: Make ketama hashring fail early when configured with number of nodes lower than the replication factor. +- [#6201](https://github.com/thanos-io/thanos/pull/6201) Query-Frontend: Disable absent and absent_over_time for vertical sharding. +- [#6212](https://github.com/thanos-io/thanos/pull/6212) Query-Frontend: Disable scalar for vertical sharding. +- [#6107](https://github.com/thanos-io/thanos/pull/6107) Change default user id in container image from 0(root) to 1001 +- [#6228](https://github.com/thanos-io/thanos/pull/6228) Conditionally generate debug messages in ProxyStore to avoid memory bloat. +- [#6231](https://github.com/thanos-io/thanos/pull/6231) mixins: Add code/grpc-code dimension to error widgets. +- [#6244](https://github.com/thanos-io/thanos/pull/6244) mixin(Rule): Add rule evaluation failures to the Rule dashboard. +- [#6303](https://github.com/thanos-io/thanos/pull/6303) Store: added and start using streamed snappy encoding for postings list instead of block based one. This leads to constant memory usage during decompression. This approximately halves memory usage when decompressing a postings list in index cache. +- [#6071](https://github.com/thanos-io/thanos/pull/6071) Query Frontend: *breaking :warning:* Add experimental native histogram support for which we updated and aligned with the [Prometheus common](https://github.com/prometheus/common) model, which is used for caching so a cache reset required. +- [#6163](https://github.com/thanos-io/thanos/pull/6163) Receiver: changed max backoff from 30s to 5s for forwarding requests. Can be configured with `--receive-forward-max-backoff`. +- [#6327](https://github.com/thanos-io/thanos/pull/6327) *: *breaking :warning:* Use histograms instead of summaries for instrumented handlers. +- [#6322](https://github.com/thanos-io/thanos/pull/6322) Logging: Avoid expensive log.Valuer evaluation for disallowed levels. +- [#6358](https://github.com/thanos-io/thanos/pull/6358) Query: Add +Inf bucket to query duration metrics +- [#6363](https://github.com/thanos-io/thanos/pull/6363) Store: Check context error when expanding postings. + +### Removed + +## [v0.31.0](https://github.com/thanos-io/thanos/tree/release-0.31) - 22.03.2023 + +### Added + +- [#5990](https://github.com/thanos-io/thanos/pull/5990) Cache/Redis: add support for Redis Sentinel via new option `master_name`. +- [#6008](https://github.com/thanos-io/thanos/pull/6008) *: Add counter metric `gate_queries_total` to gate. +- [#5926](https://github.com/thanos-io/thanos/pull/5926) Receiver: Add experimental string interning in writer. Can be enabled with a hidden flag `--writer.intern`. +- [#5773](https://github.com/thanos-io/thanos/pull/5773) Store: Support disabling cache index header file by setting `--no-cache-index-header`. When toggled, Stores can run without needing persistent disks. +- [#5653](https://github.com/thanos-io/thanos/pull/5653) Receive: Allow setting hashing algorithm per tenant in hashrings config. +- [#6074](https://github.com/thanos-io/thanos/pull/6074) *: Add histogram metrics `thanos_store_server_series_requested` and `thanos_store_server_chunks_requested` to all Stores. +- [#6074](https://github.com/thanos-io/thanos/pull/6074) *: Allow configuring series and sample limits per `Series` request for all Stores. +- [#6104](https://github.com/thanos-io/thanos/pull/6104) Objstore: Support S3 session token. +- [#5548](https://github.com/thanos-io/thanos/pull/5548) Query: Add experimental support for load balancing across multiple Store endpoints. +- [#6148](https://github.com/thanos-io/thanos/pull/6148) Query-frontend: Add `traceID` to slow query detected log line. +- [#6153](https://github.com/thanos-io/thanos/pull/6153) Query-frontend: Add `remote_user` (from http basic auth) and `remote_addr` to slow query detected log line. + +### Fixed + +- [#5995](https://github.com/thanos-io/thanos/pull/5995) Sidecar: Loads TLS certificate during startup. +- [#6044](https://github.com/thanos-io/thanos/pull/6044) Receive: Mark out-of-window errors as conflict when out-of-window samples ingestion is used. +- [#6050](https://github.com/thanos-io/thanos/pull/6050) Store: Re-try bucket store initial sync upon failure. +- [#6067](https://github.com/thanos-io/thanos/pull/6067) Receive: Fix panic when querying uninitialized TSDBs. +- [#6082](https://github.com/thanos-io/thanos/pull/6082) Query: Don't error when no stores are matched. +- [#6098](https://github.com/thanos-io/thanos/pull/6098) Cache/Redis: Upgrade `rueidis` to v0.0.93 to fix potential panic when the client-side caching is disabled. +- [#6103](https://github.com/thanos-io/thanos/pull/6103) Mixins(Rule): Fix expression for long rule evaluations. +- [#6121](https://github.com/thanos-io/thanos/pull/6121) Receive: Deduplicate meta-monitoring queries for [Active Series Limiting](https://thanos.io/tip/components/receive.md/#active-series-limiting-experimental). +- [#6137](https://github.com/thanos-io/thanos/pull/6137) Downsample: Repair of non-empty XOR chunks during 1h downsampling. +- [#6125](https://github.com/thanos-io/thanos/pull/6125) Query Frontend: Fix vertical shardable instant queries do not produce sorted results for `sort`, `sort_desc`, `topk` and `bottomk` functions. +- [#6203](https://github.com/thanos-io/thanos/pull/6203) Receive: Fix panic in head compaction under high query load. + +### Changed + +- [#6010](https://github.com/thanos-io/thanos/pull/6010) *: Upgrade Prometheus to v0.42.0. +- [#5999](https://github.com/thanos-io/thanos/pull/5999) *: Upgrade Alertmanager dependency to v0.25.0. +- [#5887](https://github.com/thanos-io/thanos/pull/5887) Tracing: Make sure rate limiting sampler is the default, as was the case in version pre-0.29.0. +- [#5997](https://github.com/thanos-io/thanos/pull/5997) Rule: switch to miekgdns DNS resolver as the default one. +- [#6126](https://github.com/thanos-io/thanos/pull/6126) Build with Go 1.20 +- [#6035](https://github.com/thanos-io/thanos/pull/6035) Tools (replicate): Support all types of matchers to match blocks for replication. Change matcher parameter from string slice to a single string. +- [#6131](https://github.com/thanos-io/thanos/pull/6131) Store: *breaking :warning:* Use Histograms instead of Summaries for bucket metrics. + +## [v0.30.2](https://github.com/thanos-io/thanos/tree/release-0.30) - 28.01.2023 + +### Fixed + +- [#6066](https://github.com/thanos-io/thanos/pull/6066) Tracing: fixed panic because of nil sampler +- [#6086](https://github.com/thanos-io/thanos/pull/6086) Store Gateway: Fix store-gateway deadlock due to not close BlockSeriesClient + +## [v0.30.1](https://github.com/thanos-io/thanos/tree/release-0.30) - 4.01.2023 + +### Fixed + +- [#6009](https://github.com/thanos-io/thanos/pull/6009) Query Frontend/Store: fix duplicate metrics registration in Redis client + +## [v0.30.0](https://github.com/thanos-io/thanos/tree/release-0.30) - 2.01.2023 + +NOTE: Querier's `query.promql-engine` flag enabling new PromQL engine is now unhidden. We encourage users to use new experimental PromQL engine for efficiency reasons. + +### Fixed + +- [#5716](https://github.com/thanos-io/thanos/pull/5716) DNS: Fix miekgdns resolver LookupSRV to work with CNAME records. - [#5844](https://github.com/thanos-io/thanos/pull/5844) Query Frontend: Fixes @ modifier time range when splitting queries by interval. -- [#5854](https://github.com/thanos-io/thanos/pull/5854) Query Frontend: Handles `lookback_delta` param in query frontend. +- [#5854](https://github.com/thanos-io/thanos/pull/5854) Query Frontend: `lookback_delta` param is now handled in query frontend. +- [#5860](https://github.com/thanos-io/thanos/pull/5860) Query: Fixed bug of not showing query warnings in Thanos UI. +- [#5856](https://github.com/thanos-io/thanos/pull/5856) Store: Fixed handling of debug logging flag. - [#5230](https://github.com/thanos-io/thanos/pull/5230) Rule: Stateless ruler support restoring `for` state from query API servers. The query API servers should be able to access the remote write storage. - [#5880](https://github.com/thanos-io/thanos/pull/5880) Query Frontend: Fixes some edge cases of query sharding analysis. -- [#5893](https://github.com/thanos-io/thanos/pull/5893) Cache: fixed redis client not respecting `SetMultiBatchSize` config value. +- [#5893](https://github.com/thanos-io/thanos/pull/5893) Cache: Fixed redis client not respecting `SetMultiBatchSize` config value. +- [#5966](https://github.com/thanos-io/thanos/pull/5966) Query: Fixed mint and maxt when selecting series for the `api/v1/series` HTTP endpoint. +- [#5948](https://github.com/thanos-io/thanos/pull/5948) Store: `chunks_fetched_duration` wrong calculation. +- [#5910](https://github.com/thanos-io/thanos/pull/5910) Receive: Fixed ketama quorum bug that was could cause success response for failed replication. This also optimize heavily receiver CPU use. ### Added -- [#5814](https://github.com/thanos-io/thanos/pull/5814) Store: Add metric `thanos_bucket_store_postings_size_bytes` that shows the distribution of how many postings (in bytes) were needed for each Series() call in Thanos Store. Useful for determining limits. -- [#5801](https://github.com/thanos-io/thanos/pull/5801) Store: add a new limiter `--store.grpc.downloaded-bytes-limit` that limits the number of bytes downloaded in each Series/LabelNames/LabelValues call. Use `thanos_bucket_store_postings_size_bytes` for determining the limits. -- [#5839](https://github.com/thanos-io/thanos/pull/5839) Receive: Add parameter `--tsdb.out-of-order.time-window` to set time window for experimental out-of-order samples ingestion. Disabled by default (set to 0s). Please note if you enable this option and you use compactor, make sure you set the `--enable-vertical-compaction` flag, otherwise you might risk compactor halt. -- [#5836](https://github.com/thanos-io/thanos/pull/5836) Receive: Add hidden flag `tsdb.memory-snapshot-on-shutdown` to enable experimental TSDB feature to snapshot on shutdown. This is intended to speed up receiver restart. +- [#5814](https://github.com/thanos-io/thanos/pull/5814) Store: Added metric `thanos_bucket_store_postings_size_bytes` that shows the distribution of how many postings (in bytes) were needed for each Series() call in Thanos Store. Useful for determining limits. +- [#5703](https://github.com/thanos-io/thanos/pull/5703) StoreAPI: Added `hash` field to series' chunks. Store gateway and receive implements that field and proxy leverage that for quicker deduplication. +- [#5801](https://github.com/thanos-io/thanos/pull/5801) Store: Added a new flag `--store.grpc.downloaded-bytes-limit` that limits the number of bytes downloaded in each Series/LabelNames/LabelValues call. Use `thanos_bucket_store_postings_size_bytes` for determining the limits. +- [#5836](https://github.com/thanos-io/thanos/pull/5836) Receive: Added hidden flag `tsdb.memory-snapshot-on-shutdown` to enable experimental TSDB feature to snapshot on shutdown. This is intended to speed up receiver restart. +- [#5839](https://github.com/thanos-io/thanos/pull/5839) Receive: Added parameter `--tsdb.out-of-order.time-window` to set time window for experimental out-of-order samples ingestion. Disabled by default (set to 0s). Please note if you enable this option and you use compactor, make sure you set the `--enable-vertical-compaction` flag, otherwise you might risk compactor halt. +- [#5889](https://github.com/thanos-io/thanos/pull/5889) Query Frontend: Added support for vertical sharding `label_replace` and `label_join` functions. - [#5865](https://github.com/thanos-io/thanos/pull/5865) Compact: Retry on sync metas error. -- [#5889](https://github.com/thanos-io/thanos/pull/5889) Query Frontend: Support sharding vertical sharding `label_replace` and `label_join` functions. -- [#5819](https://github.com/thanos-io/thanos/pull/5819) Store: Add a few objectives for Store's data touched/fetched amount and sizes. They are: 50, 95, and 99 quantiles. +- [#5819](https://github.com/thanos-io/thanos/pull/5819) Store: Added a few objectives for Store's data summaries (touched/fetched amount and sizes). They are: 50, 95, and 99 quantiles. +- [#5837](https://github.com/thanos-io/thanos/pull/5837) Store: Added streaming retrival of series from object storage. +- [#5940](https://github.com/thanos-io/thanos/pull/5940) Objstore: Support for authenticating to Swift using application credentials. +- [#5945](https://github.com/thanos-io/thanos/pull/5945) Tools: Added new `no-downsample` marker to skip blocks when downsampling via `thanos tools bucket mark --marker=no-downsample-mark.json`. This will skip downsampling for blocks with the new marker. +- [#5977](https://github.com/thanos-io/thanos/pull/5977) Tools: Added remove flag on bucket mark command to remove deletion, no-downsample or no-compact markers on the block ### Changed -- [#5716](https://github.com/thanos-io/thanos/pull/5716) DNS: Fix miekgdns resolver LookupSRV to work with CNAME records. +- [#5785](https://github.com/thanos-io/thanos/pull/5785) Query: `thanos_store_nodes_grpc_connections` now trimms `external_labels` label name longer than 1000 character. It also allows customizations in what labels to preserve using `query.conn-metric.label` flag. +- [#5542](https://github.com/thanos-io/thanos/pull/5542) Mixin: Added query concurrency panel to Querier dashboard. - [#5846](https://github.com/thanos-io/thanos/pull/5846) Query Frontend: vertical query sharding supports subqueries. -- [#5909](https://github.com/thanos-io/thanos/pull/5909) Receive: compact tenant head after no appends have happened for 1.5 `tsdb.max-block-size`. +- [#5593](https://github.com/thanos-io/thanos/pull/5593) Cache: switch Redis client to [Rueidis](https://github.com/rueian/rueidis). Rueidis is [faster](https://github.com/rueian/rueidis#benchmark-comparison-with-go-redis-v9) and provides [client-side caching](https://redis.io/docs/manual/client-side-caching/). It is highly recommended to use it so that repeated requests for the same key would not be needed. +- [#5896](https://github.com/thanos-io/thanos/pull/5896) *: Upgrade Prometheus to v0.40.7 without implementing native histogram support. *Querying native histograms will fail with `Error executing query: invalid chunk encoding ""` and native histograms in write requests are ignored.* +- [#5909](https://github.com/thanos-io/thanos/pull/5909) Receive: Compact tenant head after no appends have happened for 1.5 `tsdb.max-block-size`. +- [#5838](https://github.com/thanos-io/thanos/pull/5838) Mixin: Added data touched type to Store dashboard. +- [#5922](https://github.com/thanos-io/thanos/pull/5922) Compact: Retry on clean, partial marked errors when possible. ### Removed @@ -183,7 +299,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#5145](https://github.com/thanos-io/thanos/pull/5145) UI: Remove old Prometheus UI. -## [v0.25.2](https://github.com/thanos-io/thanos/tree/release-0.25) - 2022.03.24 +## [v0.25.2](https://github.com/thanos-io/thanos/releases/tag/v0.25.2) - 2022.03.24 ### Fixed diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f78ed3b81e..560b1cabcb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,10 +66,14 @@ The following section explains various suggestions and procedures to note during ### Prerequisites -* It is strongly recommended that you use Linux distributions systems or OSX for development. -* Go 1.19.x or higher. +* It is strongly recommended that you use Linux distributions systems or macOS for development. +* Running [WSL 2 (on Windows)](https://learn.microsoft.com/en-us/windows/wsl/) is also possible. Note that if during development you run a local Kubernetes cluster and have a Service with `service.spec.sessionAffinity: ClientIP`, it will break things until it's removed[^windows_xt_recent]. +* Go 1.20.x or higher. +* Docker (to run e2e tests) * For React UI, you will need a working NodeJS environment and the npm package manager to compile the Web UI assets. +[^windows_xt_recent]: A WSL 2 kernel recompilation is required to enable the `xt_recent` kernel module, used by `iptables` in `kube-proxy` to implement ClientIP session affinity. See [issue in WSL](https://github.com/microsoft/WSL/issues/7124). + ### First Steps It's key to get familiarized with the style guide and mechanics of Thanos, especially if your contribution touches more than one component of the Thanos distributed system. We recommend: @@ -77,6 +81,9 @@ It's key to get familiarized with the style guide and mechanics of Thanos, espec * Reading the [getting started docs](docs/getting-started.md) and working through them, or alternatively working through the [Thanos tutorial](https://killercoda.com/thanos). * Familiarizing yourself with our [coding style guidelines.](docs/contributing/coding-style-guide.md). * Familiarizing yourself with the [Makefile](Makefile) commands, for example `format`, `build`, `proto`, `docker` and `test`. `make help` will print most of available commands with relevant details. +* To get started, create a codespace for this repository by clicking this 👉 [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=109162639) + * A codespace will open in a web-based version of Visual Studio Code. The [dev container](.devcontainer/devcontainer.json) is fully configured with software needed for this project. + * **Note**: Dev containers is an open spec which is supported by [GitHub Codespaces](https://github.com/codespaces) and [other tools](https://containers.dev/supporting). * Spin up a prebuilt dev environment using Gitpod.io [![Gitpod Ready-to-Code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/thanos-io/thanos) * In case you want to develop the project locally, install **Golang** in your machine. Here is a nice [gist](https://gist.github.com/nikhita/432436d570b89cab172dcf2894465753) for this purpose. * You can run an interactive example, which populates some data as well, by following the steps mentioned [here](https://github.com/thanos-io/thanos/blob/main/tutorials/interactive-example/README.md). diff --git a/Dockerfile b/Dockerfile index 5c67815a70..74dc0f2c1a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,4 +5,11 @@ LABEL maintainer="The Thanos Authors" COPY /thanos_tmp_for_docker /bin/thanos +RUN adduser \ + -D `#Dont assign a password` \ + -H `#Dont create home directory` \ + -u 1001 `#User id`\ + thanos && \ + chown thanos /bin/thanos +USER 1001 ENTRYPOINT [ "/bin/thanos" ] diff --git a/Dockerfile.multi-arch b/Dockerfile.multi-arch index 756b3e83c3..2660ccf6e7 100644 --- a/Dockerfile.multi-arch +++ b/Dockerfile.multi-arch @@ -9,4 +9,11 @@ ARG OS="linux" COPY .build/${OS}-${ARCH}/thanos /bin/thanos +RUN adduser \ + -D `#Dont assign a password` \ + -H `#Dont create home directory` \ + -u 1001 `#User id`\ + thanos && \ + chown thanos /bin/thanos +USER 1001 ENTRYPOINT [ "/bin/thanos" ] diff --git a/Dockerfile.multi-stage b/Dockerfile.multi-stage index 17df03017a..1573e3114b 100644 --- a/Dockerfile.multi-stage +++ b/Dockerfile.multi-stage @@ -1,6 +1,6 @@ # By default we pin to amd64 sha. Use make docker to automatically adjust for arm64 versions. ARG BASE_DOCKER_SHA="14d68ca3d69fceaa6224250c83d81d935c053fb13594c811038c461194599973" -FROM golang:1.19-alpine3.16 as builder +FROM golang:1.20-alpine3.16 as builder WORKDIR $GOPATH/src/github.com/thanos-io/thanos # Change in the docker context invalidates the cache so to leverage docker @@ -21,4 +21,11 @@ LABEL maintainer="The Thanos Authors" COPY --from=builder /go/bin/thanos /bin/thanos +RUN adduser \ + -D `#Dont assign a password` \ + -H `#Dont create home directory` \ + -u 1001 `#User id`\ + thanos && \ + chown thanos /bin/thanos +USER 1001 ENTRYPOINT [ "/bin/thanos" ] diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 1361748a25..8422048e09 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,19 +1,21 @@ # Core Maintainers of this repository -| Name | Email | Slack | GitHub | Company | -|-----------------------|------------------------|--------------------------|------------------------------------------------|---------------------| -| Bartłomiej Płotka | bwplotka@gmail.com | `@bwplotka` | [@bwplotka](https://github.com/bwplotka) | Red Hat | -| Frederic Branczyk | fbranczyk@gmail.com | `@brancz` | [@brancz](https://github.com/brancz) | Polar Signals | -| Giedrius Statkevičius | giedriuswork@gmail.com | `@Giedrius Statkevičius` | [@GiedriusS](https://github.com/GiedriusS) | Vinted | -| Kemal Akkoyun | kakkoyun@gmail.com | `@kakkoyun` | [@kakkoyun](https://github.com/kakkoyun) | Polar Signals | -| Lucas Servén Marín | lserven@gmail.com | `@squat` | [@squat](https://github.com/squat) | Red Hat | -| Prem Saraswat | prmsrswt@gmail.com | `@Prem Saraswat` | [@onprem](https://github.com/onprem) | Red Hat | -| Matthias Loibl | mail@matthiasloibl.com | `@metalmatze` | [@metalmatze](https://github.com/metalmatze) | Polar Signals | -| Ben Ye | yb532204897@gmail.com | `@yeya24` | [@yeya24](https://github.com/yeya24) | Amazon Web Services | -| Wiard van Rij | wiard@outlook.com | `@wiard van Rij` | [@wiardvanrij](https://github.com/wiardvanrij) | Roku | -| Matej Gera | matejgera@gmail.com | `@Matej Gera` | [@matej-g](https://github.com/matej-g) | Red Hat | - -We are bunch of people from different companies with various interests and skills. We are from different parts of the world: Germany, Italy, Lithuania, Poland, UK, India and China. We have something in common though: We all share the love for OpenSource, Go, Prometheus, :coffee: and Observability topics. +| Name | Email | Slack | GitHub | Company | +|-----------------------|---------------------------|--------------------------|--------------------------------------------------|---------------------| +| Bartłomiej Płotka | bwplotka@gmail.com | `@bwplotka` | [@bwplotka](https://github.com/bwplotka) | Red Hat | +| Frederic Branczyk | fbranczyk@gmail.com | `@brancz` | [@brancz](https://github.com/brancz) | Polar Signals | +| Giedrius Statkevičius | giedriuswork@gmail.com | `@Giedrius Statkevičius` | [@GiedriusS](https://github.com/GiedriusS) | Vinted | +| Kemal Akkoyun | kakkoyun@gmail.com | `@kakkoyun` | [@kakkoyun](https://github.com/kakkoyun) | Polar Signals | +| Lucas Servén Marín | lserven@gmail.com | `@squat` | [@squat](https://github.com/squat) | Red Hat | +| Prem Saraswat | prmsrswt@gmail.com | `@Prem Saraswat` | [@onprem](https://github.com/onprem) | Red Hat | +| Matthias Loibl | mail@matthiasloibl.com | `@metalmatze` | [@metalmatze](https://github.com/metalmatze) | Polar Signals | +| Ben Ye | yb532204897@gmail.com | `@yeya24` | [@yeya24](https://github.com/yeya24) | Amazon Web Services | +| Wiard van Rij | wiard@outlook.com | `@wiard van Rij` | [@wiardvanrij](https://github.com/wiardvanrij) | Roku | +| Matej Gera | matejgera@gmail.com | `@Matej Gera` | [@matej-g](https://github.com/matej-g) | Red Hat | +| Filip Petkovski | filip.petkovsky@gmail.com | `@Filip Petkovski` | [@fpetkovski](https://github.com/fpetkovski) | Shopify | +| Saswata Mukherjee | saswata.mukhe@gmail.com | `@saswatamcode` | [@saswatamcode](https://github.com/saswatamcode) | Red Hat | + +We are bunch of people from different companies with various interests and skills. We are from different parts of the world: Germany, Holland, Lithuania, US, UK and India. We have something in common though: We all share the love for OpenSource, Go, Prometheus, :coffee: and Observability topics. As either Software Developers or SRE (or both!) we've chosen to maintain (mostly in our free time) Thanos, the de facto way to scale awesome [Prometheus](https://prometheus.io) project. @@ -29,15 +31,14 @@ We also have some nice souls that help triaging issues and PRs. See [here](https Full list of triage persons is displayed below: -| Name | Slack | GitHub | Company | -|-----------------|--------------------|----------------------------------------------|---------| -| Adrien Fillon | `@Adrien F` | [@adrien-f](https://github.com/adrien-f) | | -| Ian Billett | `@billett` | [@bill3tt](https://github.com/bill3tt) | Red Hat | -| Martin Chodur | `@FUSAKLA` | [@fusakla](https://github.com/fusakla) | | -| Michael Dai | `@jojohappy` | [@jojohappy](https://github.com/jojohappy) | | -| Xiang Dai | `@daixiang0` | [@daixiang0](https://github.com/daixiang0) | | -| Jimmie Han | `@hanjm` | [@hanjm](https://github.com/hanjm) | Tencent | -| Filip Petkovski | `@Filip Petkovski` | [@fpetkovski](https://github.com/fpetkovski) | Shopify | +| Name | Slack | GitHub | Company | +|---------------|--------------|--------------------------------------------|---------| +| Adrien Fillon | `@Adrien F` | [@adrien-f](https://github.com/adrien-f) | | +| Ian Billett | `@billett` | [@bill3tt](https://github.com/bill3tt) | Red Hat | +| Martin Chodur | `@FUSAKLA` | [@fusakla](https://github.com/fusakla) | | +| Michael Dai | `@jojohappy` | [@jojohappy](https://github.com/jojohappy) | | +| Xiang Dai | `@daixiang0` | [@daixiang0](https://github.com/daixiang0) | | +| Jimmie Han | `@hanjm` | [@hanjm](https://github.com/hanjm) | Tencent | Please reach any of the maintainer on slack or email if you want to help as well. diff --git a/Makefile b/Makefile index 4f7293778f..986503f1e0 100644 --- a/Makefile +++ b/Makefile @@ -308,7 +308,7 @@ test: export THANOS_TEST_ALERTMANAGER_PATH= $(ALERTMANAGER) test: check-git install-tool-deps @echo ">> install thanos GOOPTS=${GOOPTS}" @echo ">> running unit tests (without /test/e2e). Do export THANOS_TEST_OBJSTORE_SKIP=GCS,S3,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI if you want to skip e2e tests against all real store buckets. Current value: ${THANOS_TEST_OBJSTORE_SKIP}" - @go test $(shell go list ./... | grep -v /vendor/ | grep -v /test/e2e); + @go test -timeout 15m $(shell go list ./... | grep -v /vendor/ | grep -v /test/e2e); .PHONY: test-local test-local: ## Runs test excluding tests for ALL object storage integrations. @@ -395,6 +395,7 @@ github.com/prometheus/client_golang/prometheus.{DefaultGatherer,DefBuckets,NewUn github.com/prometheus/client_golang/prometheus.{NewCounter,NewCounterVec,NewCounterVec,NewGauge,NewGaugeVec,NewGaugeFunc,\ NewHistorgram,NewHistogramVec,NewSummary,NewSummaryVec}=github.com/prometheus/client_golang/prometheus/promauto.{NewCounter,\ NewCounterVec,NewCounterVec,NewGauge,NewGaugeVec,NewGaugeFunc,NewHistorgram,NewHistogramVec,NewSummary,NewSummaryVec},\ +github.com/NYTimes/gziphandler.{GzipHandler}=github.com/klauspost/compress/gzhttp.{GzipHandler},\ sync/atomic=go.uber.org/atomic,github.com/cortexproject/cortex=github.com/thanos-io/thanos/internal/cortex,\ io/ioutil.{Discard,NopCloser,ReadAll,ReadDir,ReadFile,TempDir,TempFile,Writefile}" $(shell go list ./... | grep -v "internal/cortex") @$(FAILLINT) -paths "fmt.{Print,Println,Sprint}" -ignore-tests ./... diff --git a/README.md b/README.md index ca2f452eca..4236e6b7e7 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Latest Release](https://img.shields.io/github/release/thanos-io/thanos.svg?style=flat-square)](https://github.com/thanos-io/thanos/releases/latest) [![Go Report Card](https://goreportcard.com/badge/github.com/thanos-io/thanos)](https://goreportcard.com/report/github.com/thanos-io/thanos) [![Go Code reference](https://img.shields.io/badge/code%20reference-go.dev-darkblue.svg)](https://pkg.go.dev/github.com/thanos-io/thanos?tab=subdirectories) [![Slack](https://img.shields.io/badge/join%20slack-%23thanos-brightgreen.svg)](https://slack.cncf.io/) [![Netlify Status](https://api.netlify.com/api/v1/badges/664a5091-934c-4b0e-a7b6-bc12f822a590/deploy-status)](https://app.netlify.com/sites/thanos-io/deploys) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3048/badge)](https://bestpractices.coreinfrastructure.org/projects/3048) -[![CI](https://github.com/thanos-io/thanos/workflows/CI/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3ACI) [![CI](https://circleci.com/gh/thanos-io/thanos.svg?style=svg)](https://circleci.com/gh/thanos-io/thanos) [![go](https://github.com/thanos-io/thanos/workflows/go/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3Ago) [![react](https://github.com/thanos-io/thanos/workflows/react/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3Areact) [![docs](https://github.com/thanos-io/thanos/workflows/docs/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3Adocs) [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/thanos-io/thanos) +[![CI](https://github.com/thanos-io/thanos/workflows/CI/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3ACI) [![CI](https://circleci.com/gh/thanos-io/thanos.svg?style=svg)](https://circleci.com/gh/thanos-io/thanos) [![go](https://github.com/thanos-io/thanos/workflows/go/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3Ago) [![react](https://github.com/thanos-io/thanos/workflows/react/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3Areact) [![docs](https://github.com/thanos-io/thanos/workflows/docs/badge.svg)](https://github.com/thanos-io/thanos/actions?query=workflow%3Adocs) [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/thanos-io/thanos) [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=109162639) ## Overview @@ -41,13 +41,21 @@ Concretely the aims of the project are: ## Architecture Overview -Deployment with Sidecar: +Deployment with Sidecar for Kubernetes: -![Sidecar](https://docs.google.com/drawings/d/e/2PACX-1vTBFKKgf8YDInJyRakPE8eZZg9phTlOsBB2ogNkFvhNGbZ8YDvz_cGMbxWZBG1G6hpsQfSX145FpYcv/pub?w=960&h=720) + -Deployment with Receive: +![Sidecar](https://docs.google.com/drawings/d/e/2PACX-1vSJd32gPh8-MC5Ko0-P-v1KQ0Xnxa0qmsVXowtkwVGlczGfVW-Vd415Y6F129zvh3y0vHLBZcJeZEoz/pub?w=960&h=720) -![Receive](https://docs.google.com/drawings/d/e/2PACX-1vTfko27YB_3ab7ZL8ODNG5uCcrpqKxhmqaz3lW-yhGN3_oNxkTrqXmwwlcZjaWf3cGgAJIM4CMwwkEV/pub?w=960&h=720) +Deployment with Receive in order to scale out or implement with other remote write compatible sources: + + + +![Receive](https://docs.google.com/drawings/d/e/2PACX-1vRdYP__uDuygGR5ym1dxBzU6LEx5v7Rs1cAUKPsl5BZrRGVl5YIj5lsD_FOljeIVOGWatdAI9pazbCP/pub?w=960&h=720) ## Thanos Philosophy diff --git a/VERSION b/VERSION index 8b0a86f6bb..5301df0d6d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.30.0-dev +0.32.0-dev \ No newline at end of file diff --git a/cmd/thanos/compact.go b/cmd/thanos/compact.go index 6cb4eae3e9..1368b56989 100644 --- a/cmd/thanos/compact.go +++ b/cmd/thanos/compact.go @@ -406,7 +406,6 @@ func runCompact( defer cleanMtx.Unlock() if err := sy.SyncMetas(ctx); err != nil { - cancel() return errors.Wrap(err, "syncing metas") } @@ -782,5 +781,5 @@ func (cc *compactConfig) registerFlag(cmd extkingpin.FlagClause) { cc.webConf.registerFlag(cmd) - cmd.Flag("bucket-web-label", "Prometheus label to use as timeline title in the bucket web UI").StringVar(&cc.label) + cmd.Flag("bucket-web-label", "External block label to use as group title in the bucket web UI").StringVar(&cc.label) } diff --git a/cmd/thanos/config.go b/cmd/thanos/config.go index 48d459cec9..4986de9af8 100644 --- a/cmd/thanos/config.go +++ b/cmd/thanos/config.go @@ -13,24 +13,23 @@ import ( extflag "github.com/efficientgo/tools/extkingpin" "github.com/prometheus/common/model" + "github.com/thanos-io/thanos/pkg/extkingpin" ) type grpcConfig struct { - bindAddress string - gracePeriod model.Duration - tlsSrvCert string - tlsSrvKey string - tlsSrvClientCA string + bindAddress string + tlsSrvCert string + tlsSrvKey string + tlsSrvClientCA string + gracePeriod time.Duration + maxConnectionAge time.Duration } func (gc *grpcConfig) registerFlag(cmd extkingpin.FlagClause) *grpcConfig { cmd.Flag("grpc-address", "Listen ip:port address for gRPC endpoints (StoreAPI). Make sure this address is routable from other components."). Default("0.0.0.0:10901").StringVar(&gc.bindAddress) - cmd.Flag("grpc-grace-period", - "Time to wait after an interrupt received for GRPC Server."). - Default("2m").SetValue(&gc.gracePeriod) cmd.Flag("grpc-server-tls-cert", "TLS Certificate for gRPC server, leave blank to disable TLS"). Default("").StringVar(&gc.tlsSrvCert) @@ -40,6 +39,12 @@ func (gc *grpcConfig) registerFlag(cmd extkingpin.FlagClause) *grpcConfig { cmd.Flag("grpc-server-tls-client-ca", "TLS CA to verify clients against. If no client CA is specified, there is no client verification on server side. (tls.NoClientCert)"). Default("").StringVar(&gc.tlsSrvClientCA) + cmd.Flag("grpc-server-max-connection-age", "The grpc server max connection age. This controls how often to re-establish connections and redo TLS handshakes."). + Default("60m").DurationVar(&gc.maxConnectionAge) + cmd.Flag("grpc-grace-period", + "Time to wait after an interrupt received for GRPC Server."). + Default("2m").DurationVar(&gc.gracePeriod) + return gc } @@ -198,7 +203,7 @@ func (qc *queryConfig) registerFlag(cmd extkingpin.FlagClause) *queryConfig { cmd.Flag("query.http-method", "HTTP method to use when sending queries. Possible options: [GET, POST]"). Default("POST").EnumVar(&qc.httpMethod, "GET", "POST") cmd.Flag("query.sd-dns-resolver", "Resolver to use. Possible options: [golang, miekgdns]"). - Default("golang").Hidden().StringVar(&qc.dnsSDResolver) + Default("miekgdns").Hidden().StringVar(&qc.dnsSDResolver) cmd.Flag("query.default-step", "Default range query step to use. This is only used in stateless Ruler and alert state restoration."). Default("1s").DurationVar(&qc.step) return qc diff --git a/cmd/thanos/downsample.go b/cmd/thanos/downsample.go index 693823afc2..5b2dbfeca7 100644 --- a/cmd/thanos/downsample.go +++ b/cmd/thanos/downsample.go @@ -85,8 +85,10 @@ func RunDownsample( return err } + // While fetching blocks, filter out blocks that were marked for no downsample. metaFetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, bkt, "", extprom.WrapRegistererWithPrefix("thanos_", reg), []block.MetadataFilter{ block.NewDeduplicateFilter(block.FetcherConcurrency), + downsample.NewGatherNoDownsampleMarkFilter(logger, bkt), }) if err != nil { return errors.Wrap(err, "create meta fetcher") diff --git a/cmd/thanos/main_test.go b/cmd/thanos/main_test.go index 74daa223d8..deeb085640 100644 --- a/cmd/thanos/main_test.go +++ b/cmd/thanos/main_test.go @@ -20,10 +20,10 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/thanos-io/objstore" + "github.com/efficientgo/core/testutil" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" - "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 8aa6d28bf6..48c3d318cc 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -11,6 +11,8 @@ import ( "strings" "time" + "google.golang.org/grpc" + "github.com/go-kit/log" "github.com/go-kit/log/level" grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" @@ -25,12 +27,10 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" - "google.golang.org/grpc" - - v1 "github.com/prometheus/prometheus/web/api/v1" - "github.com/thanos-community/promql-engine/engine" + "github.com/thanos-community/promql-engine/api" apiv1 "github.com/thanos-io/thanos/pkg/api/query" + "github.com/thanos-io/thanos/pkg/api/query/querypb" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/discovery/cache" @@ -65,11 +65,11 @@ const ( queryPushdown = "query-pushdown" ) -type promqlEngineType string +type queryMode string const ( - promqlEnginePrometheus promqlEngineType = "prometheus" - promqlEngineThanos promqlEngineType = "thanos" + queryModeLocal queryMode = "local" + queryModeDistributed queryMode = "distributed" ) // registerQuery registers a query command. @@ -78,7 +78,9 @@ func registerQuery(app *extkingpin.App) { cmd := app.Command(comp.String(), "Query node exposing PromQL enabled Query API with data retrieved from multiple store nodes.") httpBindAddr, httpGracePeriod, httpTLSConfig := extkingpin.RegisterHTTPFlags(cmd) - grpcBindAddr, grpcGracePeriod, grpcCert, grpcKey, grpcClientCA, grpcMaxConnAge := extkingpin.RegisterGRPCFlags(cmd) + + var grpcServerConfig grpcConfig + grpcServerConfig.registerFlag(cmd) secure := cmd.Flag("grpc-client-tls-secure", "Use TLS when talking to the gRPC server").Default("false").Bool() skipVerify := cmd.Flag("grpc-client-tls-skip-verify", "Disable TLS certificate verification i.e self signed, signed by fake CA").Default("false").Bool() @@ -99,8 +101,13 @@ func registerQuery(app *extkingpin.App) { queryTimeout := extkingpin.ModelDuration(cmd.Flag("query.timeout", "Maximum time to process query by query node."). Default("2m")) - promqlEngine := cmd.Flag("query.promql-engine", "PromQL engine to use.").Default(string(promqlEnginePrometheus)). - Enum(string(promqlEnginePrometheus), string(promqlEngineThanos)) + defaultEngine := cmd.Flag("query.promql-engine", "Default PromQL engine to use.").Default(string(apiv1.PromqlEnginePrometheus)). + Enum(string(apiv1.PromqlEnginePrometheus), string(apiv1.PromqlEngineThanos)) + + promqlQueryMode := cmd.Flag("query.mode", "PromQL query mode. One of: local, distributed."). + Hidden(). + Default(string(queryModeLocal)). + Enum(string(queryModeLocal), string(queryModeDistributed)) maxConcurrentQueries := cmd.Flag("query.max-concurrent", "Maximum number of queries processed concurrently by query node."). Default("20").Int() @@ -128,6 +135,9 @@ func registerQuery(app *extkingpin.App) { endpoints := extkingpin.Addrs(cmd.Flag("endpoint", "Addresses of statically configured Thanos API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect Thanos API servers through respective DNS lookups."). PlaceHolder("")) + endpointGroups := extkingpin.Addrs(cmd.Flag("endpoint-group", "Experimental: DNS name of statically configured Thanos API server groups (repeatable). Targets resolved from the DNS name will be queried in a round-robin, instead of a fanout manner. This flag should be used when connecting a Thanos Query to HA groups of Thanos components."). + PlaceHolder("")) + stores := extkingpin.Addrs(cmd.Flag("store", "Deprecation Warning - This flag is deprecated and replaced with `endpoint`. Addresses of statically configured store API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect store API servers through respective DNS lookups."). PlaceHolder("")) @@ -151,6 +161,9 @@ func registerQuery(app *extkingpin.App) { strictEndpoints := cmd.Flag("endpoint-strict", "Addresses of only statically configured Thanos API servers that are always used, even if the health check fails. Useful if you have a caching layer on top."). PlaceHolder("").Strings() + strictEndpointGroups := extkingpin.Addrs(cmd.Flag("endpoint-group-strict", "Experimental: DNS name of statically configured Thanos API server groups (repeatable) that are always used, even if the health check fails."). + PlaceHolder("")) + fileSDFiles := cmd.Flag("store.sd-files", "Path to files that contain addresses of store API servers. The path can be a glob pattern (repeatable)."). PlaceHolder("").Strings() @@ -202,10 +215,13 @@ func registerQuery(app *extkingpin.App) { grpcProxyStrategy := cmd.Flag("grpc.proxy-strategy", "Strategy to use when proxying Series requests to leaf nodes. Hidden and only used for testing, will be removed after lazy becomes the default.").Default(string(store.EagerRetrieval)).Hidden().Enum(string(store.EagerRetrieval), string(store.LazyRetrieval)) queryTelemetryDurationQuantiles := cmd.Flag("query.telemetry.request-duration-seconds-quantiles", "The quantiles for exporting metrics about the request duration quantiles.").Default("0.1", "0.25", "0.75", "1.25", "1.75", "2.5", "3", "5", "10").Float64List() - queryTelemetrySamplesQuantiles := cmd.Flag("query.telemetry.request-samples-quantiles", "The quantiles for exporting metrics about the samples count quantiles.").Default("100", "1000", "10000", "100000", "1000000").Int64List() - queryTelemetrySeriesQuantiles := cmd.Flag("query.telemetry.request-series-seconds-quantiles", "The quantiles for exporting metrics about the series count quantiles.").Default("10", "100", "1000", "10000", "100000").Int64List() + queryTelemetrySamplesQuantiles := cmd.Flag("query.telemetry.request-samples-quantiles", "The quantiles for exporting metrics about the samples count quantiles.").Default("100", "1000", "10000", "100000", "1000000").Float64List() + queryTelemetrySeriesQuantiles := cmd.Flag("query.telemetry.request-series-seconds-quantiles", "The quantiles for exporting metrics about the series count quantiles.").Default("10", "100", "1000", "10000", "100000").Float64List() + + var storeRateLimits store.SeriesSelectLimits + storeRateLimits.RegisterFlags(cmd) - cmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ <-chan struct{}, _ bool) error { + cmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ <-chan struct{}, debugLogging bool) error { selectorLset, err := parseFlagLabels(*selectorLabels) if err != nil { return errors.Wrap(err, "parse federation labels") @@ -254,17 +270,13 @@ func registerQuery(app *extkingpin.App) { return runQuery( g, logger, + debugLogging, reg, tracer, httpLogOpts, grpcLogOpts, tagOpts, - *grpcBindAddr, - time.Duration(*grpcGracePeriod), - *grpcCert, - *grpcKey, - *grpcClientCA, - *grpcMaxConnAge, + grpcServerConfig, *grpcCompression, *secure, *skipVerify, @@ -291,6 +303,7 @@ func registerQuery(app *extkingpin.App) { selectorLset, getFlagsMap(cmd.Flags()), *endpoints, + *endpointGroups, *stores, *ruleEndpoints, *targetEndpoints, @@ -312,6 +325,7 @@ func registerQuery(app *extkingpin.App) { *defaultMetadataTimeRange, *strictStores, *strictEndpoints, + *strictEndpointGroups, *webDisableCORS, enableQueryPushdown, *alertQueryURL, @@ -320,7 +334,9 @@ func registerQuery(app *extkingpin.App) { *queryTelemetryDurationQuantiles, *queryTelemetrySamplesQuantiles, *queryTelemetrySeriesQuantiles, - promqlEngineType(*promqlEngine), + *defaultEngine, + storeRateLimits, + queryMode(*promqlQueryMode), ) }) } @@ -330,17 +346,13 @@ func registerQuery(app *extkingpin.App) { func runQuery( g *run.Group, logger log.Logger, + debugLogging bool, reg *prometheus.Registry, tracer opentracing.Tracer, httpLogOpts []logging.Option, grpcLogOpts []grpc_logging.Option, tagOpts []tags.Option, - grpcBindAddr string, - grpcGracePeriod time.Duration, - grpcCert string, - grpcKey string, - grpcClientCA string, - grpcMaxConnAge time.Duration, + grpcServerConfig grpcConfig, grpcCompression string, secure bool, skipVerify bool, @@ -367,6 +379,7 @@ func runQuery( selectorLset labels.Labels, flagsMap map[string]string, endpointAddrs []string, + endpointGroupAddrs []string, storeAddrs []string, ruleAddrs []string, targetAddrs []string, @@ -388,15 +401,18 @@ func runQuery( defaultMetadataTimeRange time.Duration, strictStores []string, strictEndpoints []string, + strictEndpointGroups []string, disableCORS bool, enableQueryPushdown bool, alertQueryURL string, grpcProxyStrategy string, comp component.Component, queryTelemetryDurationQuantiles []float64, - queryTelemetrySamplesQuantiles []int64, - queryTelemetrySeriesQuantiles []int64, - promqlEngine promqlEngineType, + queryTelemetrySamplesQuantiles []float64, + queryTelemetrySeriesQuantiles []float64, + defaultEngine string, + storeRateLimits store.SeriesSelectLimits, + queryMode queryMode, ) error { if alertQueryURL == "" { lastColon := strings.LastIndex(httpBindAddr, ":") @@ -468,6 +484,11 @@ func runQuery( dns.ResolverType(dnsSDResolver), ) + options := []store.ProxyStoreOption{} + if debugLogging { + options = append(options, store.WithProxyStoreDebugLogging()) + } + var ( endpoints = query.NewEndpointSet( time.Now, @@ -500,6 +521,18 @@ func runQuery( specs = append(specs, tmpSpecs...) } + for _, eg := range endpointGroupAddrs { + addr := fmt.Sprintf("dns:///%s", eg) + spec := query.NewGRPCEndpointSpec(addr, false, extgrpc.EndpointGroupGRPCOpts()...) + specs = append(specs, spec) + } + + for _, eg := range strictEndpointGroups { + addr := fmt.Sprintf("dns:///%s", eg) + spec := query.NewGRPCEndpointSpec(addr, true, extgrpc.EndpointGroupGRPCOpts()...) + specs = append(specs, spec) + } + return specs }, dialOpts, @@ -507,7 +540,7 @@ func runQuery( endpointInfoTimeout, queryConnMetricLabels..., ) - proxy = store.NewProxyStore(logger, reg, endpoints.GetStoreClients, component.Query, selectorLset, storeResponseTimeout, store.RetrievalStrategy(grpcProxyStrategy)) + proxy = store.NewProxyStore(logger, reg, endpoints.GetStoreClients, component.Query, selectorLset, storeResponseTimeout, store.RetrievalStrategy(grpcProxyStrategy), options...) rulesProxy = rules.NewProxy(logger, endpoints.GetRulesClients) targetsProxy = targets.NewProxy(logger, endpoints.GetTargetsClients) metadataProxy = metadata.NewProxy(logger, endpoints.GetMetricMetadataClients) @@ -635,16 +668,21 @@ func runQuery( engineOpts.ActiveQueryTracker = promql.NewActiveQueryTracker(activeQueryDir, maxConcurrentQueries, logger) } - var queryEngine v1.QueryEngine - switch promqlEngine { - case promqlEnginePrometheus: - queryEngine = promql.NewEngine(engineOpts) - case promqlEngineThanos: - queryEngine = engine.New(engine.Opts{EngineOpts: engineOpts}) - default: - return errors.Errorf("unknown query.promql-engine type %v", promqlEngine) + var remoteEngineEndpoints api.RemoteEndpoints + if queryMode != queryModeLocal { + remoteEngineEndpoints = query.NewRemoteEndpoints(logger, endpoints.GetQueryAPIClients, query.Opts{ + AutoDownsample: enableAutodownsampling, + ReplicaLabels: queryReplicaLabels, + Timeout: queryTimeout, + EnablePartialResponse: enableQueryPartialResponse, + }) } + engineFactory := apiv1.NewQueryEngineFactory( + engineOpts, + remoteEngineEndpoints, + ) + lookbackDeltaCreator := LookbackDeltaFactory(engineOpts, dynamicLookbackDelta) // Start query API + UI HTTP server. @@ -675,7 +713,8 @@ func runQuery( api := apiv1.NewQueryAPI( logger, endpoints.GetEndpointStatus, - queryEngine, + engineFactory, + apiv1.PromqlEngineType(defaultEngine), lookbackDeltaCreator, queryableCreator, // NOTE: Will share the same replica label as the query for now. @@ -699,6 +738,7 @@ func runQuery( gate.New( extprom.WrapRegistererWithPrefix("thanos_query_concurrent_", reg), maxConcurrentQueries, + gate.Queries, ), store.NewSeriesStatsAggregator( reg, @@ -731,7 +771,7 @@ func runQuery( } // Start query (proxy) gRPC StoreAPI. { - tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), grpcCert, grpcKey, grpcClientCA) + tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), grpcServerConfig.tlsSrvCert, grpcServerConfig.tlsSrvKey, grpcServerConfig.tlsSrvClientCA) if err != nil { return errors.Wrap(err, "setup gRPC server") } @@ -743,10 +783,11 @@ func runQuery( if httpProbe.IsReady() { mint, maxt := proxy.TimeRange() return &infopb.StoreInfo{ - MinTime: mint, - MaxTime: maxt, - SupportsSharding: true, - SendsSortedSeries: true, + MinTime: mint, + MaxTime: maxt, + SupportsSharding: true, + SupportsWithoutReplicaLabels: true, + TsdbInfos: proxy.TSDBInfos(), } } return nil @@ -758,19 +799,21 @@ func runQuery( info.WithQueryAPIInfoFunc(), ) - grpcAPI := apiv1.NewGRPCAPI(time.Now, queryReplicaLabels, queryableCreator, queryEngine, lookbackDeltaCreator, instantDefaultMaxSourceResolution) + defaultEngineType := querypb.EngineType(querypb.EngineType_value[defaultEngine]) + grpcAPI := apiv1.NewGRPCAPI(time.Now, queryReplicaLabels, queryableCreator, engineFactory, defaultEngineType, lookbackDeltaCreator, instantDefaultMaxSourceResolution) + storeServer := store.NewLimitedStoreServer(store.NewInstrumentedStoreServer(reg, proxy), reg, storeRateLimits) s := grpcserver.New(logger, reg, tracer, grpcLogOpts, tagOpts, comp, grpcProbe, grpcserver.WithServer(apiv1.RegisterQueryServer(grpcAPI)), - grpcserver.WithServer(store.RegisterStoreServer(proxy)), + grpcserver.WithServer(store.RegisterStoreServer(storeServer, logger)), grpcserver.WithServer(rules.RegisterRulesServer(rulesProxy)), grpcserver.WithServer(targets.RegisterTargetsServer(targetsProxy)), grpcserver.WithServer(metadata.RegisterMetadataServer(metadataProxy)), grpcserver.WithServer(exemplars.RegisterExemplarsServer(exemplarsProxy)), grpcserver.WithServer(info.RegisterInfoServer(infoSrv)), - grpcserver.WithListen(grpcBindAddr), - grpcserver.WithGracePeriod(grpcGracePeriod), + grpcserver.WithListen(grpcServerConfig.bindAddress), + grpcserver.WithGracePeriod(grpcServerConfig.gracePeriod), + grpcserver.WithMaxConnAge(grpcServerConfig.maxConnectionAge), grpcserver.WithTLSConfig(tlsCfg), - grpcserver.WithMaxConnAge(grpcMaxConnAge), ) g.Add(func() error { diff --git a/cmd/thanos/query_frontend.go b/cmd/thanos/query_frontend.go index df7f2707bc..f3d2b1a9f7 100644 --- a/cmd/thanos/query_frontend.go +++ b/cmd/thanos/query_frontend.go @@ -8,10 +8,10 @@ import ( "net/http" "time" - "github.com/NYTimes/gziphandler" extflag "github.com/efficientgo/tools/extkingpin" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/klauspost/compress/gzhttp" "github.com/oklog/run" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" @@ -277,7 +277,7 @@ func runQueryFrontend( // Create the query frontend transport. handler := transport.NewHandler(*cfg.CortexHandlerConfig, roundTripper, logger, nil) if cfg.CompressResponses { - handler = gziphandler.GzipHandler(handler) + handler = gzhttp.GzipHandler(handler) } httpProbe := prober.NewHTTP() @@ -311,7 +311,7 @@ func runQueryFrontend( logger, ins.NewHandler( name, - gziphandler.GzipHandler( + gzhttp.GzipHandler( middleware.RequestID( logMiddleware.HTTPMiddleware(name, f), ), diff --git a/cmd/thanos/query_frontend_test.go b/cmd/thanos/query_frontend_test.go index d83acbd4a7..4473eae38a 100644 --- a/cmd/thanos/query_frontend_test.go +++ b/cmd/thanos/query_frontend_test.go @@ -7,7 +7,7 @@ import ( "net/http" "testing" - "github.com/thanos-io/thanos/pkg/testutil" + "github.com/efficientgo/core/testutil" ) func Test_extractOrgId(t *testing.T) { diff --git a/cmd/thanos/query_test.go b/cmd/thanos/query_test.go index e4643c267c..803f241e4d 100644 --- a/cmd/thanos/query_test.go +++ b/cmd/thanos/query_test.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/prometheus/promql" - "github.com/thanos-io/thanos/pkg/testutil" + "github.com/efficientgo/core/testutil" ) func TestLookbackDeltaFactory(t *testing.T) { diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index b312ec4b5f..763b5cb4a3 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -58,7 +58,7 @@ func registerReceive(app *extkingpin.App) { conf := &receiveConfig{} conf.registerFlag(cmd) - cmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ <-chan struct{}, _ bool) error { + cmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ <-chan struct{}, debugLogging bool) error { lset, err := parseFlagLabels(conf.labelStrs) if err != nil { return errors.Wrap(err, "parse labels") @@ -85,9 +85,10 @@ func registerReceive(app *extkingpin.App) { NoLockfile: conf.noLockFile, WALCompression: conf.walCompression, MaxExemplars: conf.tsdbMaxExemplars, - EnableExemplarStorage: true, + EnableExemplarStorage: conf.tsdbMaxExemplars > 0, HeadChunksWriteQueueSize: int(conf.tsdbWriteQueueSize), EnableMemorySnapshotOnShutdown: conf.tsdbMemorySnapshotOnShutdown, + EnableNativeHistograms: conf.tsdbEnableNativeHistograms, } // Are we running in IngestorOnly, RouterOnly or RouterIngestor mode? @@ -96,6 +97,7 @@ func registerReceive(app *extkingpin.App) { return runReceive( g, logger, + debugLogging, reg, tracer, grpcLogOpts, tagOpts, @@ -112,6 +114,7 @@ func registerReceive(app *extkingpin.App) { func runReceive( g *run.Group, logger log.Logger, + debugLogging bool, reg *prometheus.Registry, tracer opentracing.Tracer, grpcLogOpts []grpc_logging.Option, @@ -136,8 +139,8 @@ func runReceive( logger, reg, tracer, - *conf.grpcCert != "", - *conf.grpcClientCA == "", + conf.grpcConfig.tlsSrvCert != "", + conf.grpcConfig.tlsSrvClientCA == "", conf.rwClientCert, conf.rwClientKey, conf.rwClientServerCA, @@ -206,11 +209,14 @@ func runReceive( conf.allowOutOfOrderUpload, hashFunc, ) - writer := receive.NewWriter(log.With(logger, "component", "receive-writer"), dbs) + writer := receive.NewWriter(log.With(logger, "component", "receive-writer"), dbs, &receive.WriterOptions{ + Intern: conf.writerInterning, + TooFarInFutureTimeWindow: int64(time.Duration(*conf.tsdbTooFarInFutureTimeWindow)), + }) var limitsConfig *receive.RootLimitsConfig - if conf.limitsConfig != nil { - limitsContentYaml, err := conf.limitsConfig.Content() + if conf.writeLimitsConfig != nil { + limitsContentYaml, err := conf.writeLimitsConfig.Content() if err != nil { return errors.Wrap(err, "get content of limit configuration") } @@ -219,7 +225,7 @@ func runReceive( return errors.Wrap(err, "parse limit configuration") } } - limiter, err := receive.NewLimiter(conf.limitsConfig, reg, receiveMode, log.With(logger, "component", "receive-limiter")) + limiter, err := receive.NewLimiter(conf.writeLimitsConfig, reg, receiveMode, log.With(logger, "component", "receive-limiter")) if err != nil { return errors.Wrap(err, "creating limiter") } @@ -240,6 +246,7 @@ func runReceive( TLSConfig: rwTLSConfig, DialOpts: dialOpts, ForwardTimeout: time.Duration(*conf.forwardTimeout), + MaxBackoff: time.Duration(*conf.maxBackoff), TSDBStats: dbs, Limiter: limiter, }) @@ -299,12 +306,17 @@ func runReceive( level.Debug(logger).Log("msg", "setting up gRPC server") { - tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), *conf.grpcCert, *conf.grpcKey, *conf.grpcClientCA) + tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), conf.grpcConfig.tlsSrvCert, conf.grpcConfig.tlsSrvKey, conf.grpcConfig.tlsSrvClientCA) if err != nil { return errors.Wrap(err, "setup gRPC server") } - mts := store.NewProxyStore( + options := []store.ProxyStoreOption{} + if debugLogging { + options = append(options, store.WithProxyStoreDebugLogging()) + } + + proxy := store.NewProxyStore( logger, reg, dbs.TSDBLocalClients, @@ -312,7 +324,9 @@ func runReceive( labels.Labels{}, 0, store.LazyRetrieval, + options..., ) + mts := store.NewLimitedStoreServer(store.NewInstrumentedStoreServer(reg, proxy), reg, conf.storeRateLimits) rw := store.ReadWriteTSDBStore{ StoreServer: mts, WriteableStoreServer: webHandler, @@ -320,15 +334,16 @@ func runReceive( infoSrv := info.NewInfoServer( component.Receive.String(), - info.WithLabelSetFunc(func() []labelpb.ZLabelSet { return mts.LabelSet() }), + info.WithLabelSetFunc(func() []labelpb.ZLabelSet { return proxy.LabelSet() }), info.WithStoreInfoFunc(func() *infopb.StoreInfo { if httpProbe.IsReady() { - minTime, maxTime := mts.TimeRange() + minTime, maxTime := proxy.TimeRange() return &infopb.StoreInfo{ - MinTime: minTime, - MaxTime: maxTime, - SupportsSharding: true, - SendsSortedSeries: true, + MinTime: minTime, + MaxTime: maxTime, + SupportsSharding: true, + SupportsWithoutReplicaLabels: true, + TsdbInfos: proxy.TSDBInfos(), } } return nil @@ -337,19 +352,19 @@ func runReceive( ) srv := grpcserver.New(logger, receive.NewUnRegisterer(reg), tracer, grpcLogOpts, tagOpts, comp, grpcProbe, - grpcserver.WithServer(store.RegisterStoreServer(rw)), + grpcserver.WithServer(store.RegisterStoreServer(rw, logger)), grpcserver.WithServer(store.RegisterWritableStoreServer(rw)), grpcserver.WithServer(exemplars.RegisterExemplarsServer(exemplars.NewMultiTSDB(dbs.TSDBExemplars))), grpcserver.WithServer(info.RegisterInfoServer(infoSrv)), - grpcserver.WithListen(*conf.grpcBindAddr), - grpcserver.WithGracePeriod(time.Duration(*conf.grpcGracePeriod)), + grpcserver.WithListen(conf.grpcConfig.bindAddress), + grpcserver.WithGracePeriod(conf.grpcConfig.gracePeriod), + grpcserver.WithMaxConnAge(conf.grpcConfig.maxConnectionAge), grpcserver.WithTLSConfig(tlsCfg), - grpcserver.WithMaxConnAge(*conf.grpcMaxConnAge), ) g.Add( func() error { - level.Info(logger).Log("msg", "listening for StoreAPI and WritableStoreAPI gRPC", "address", *conf.grpcBindAddr) + level.Info(logger).Log("msg", "listening for StoreAPI and WritableStoreAPI gRPC", "address", conf.grpcConfig.bindAddress) statusProber.Healthy() return srv.ListenAndServe() }, @@ -738,12 +753,7 @@ type receiveConfig struct { httpGracePeriod *model.Duration httpTLSConfig *string - grpcBindAddr *string - grpcGracePeriod *model.Duration - grpcCert *string - grpcKey *string - grpcClientCA *string - grpcMaxConnAge *time.Duration + grpcConfig grpcConfig rwAddress string rwServerCert string @@ -773,19 +783,23 @@ type receiveConfig struct { replicaHeader string replicationFactor uint64 forwardTimeout *model.Duration + maxBackoff *model.Duration compression string tsdbMinBlockDuration *model.Duration tsdbMaxBlockDuration *model.Duration + tsdbTooFarInFutureTimeWindow *model.Duration tsdbOutOfOrderTimeWindow *model.Duration tsdbOutOfOrderCapMax int64 tsdbAllowOverlappingBlocks bool tsdbMaxExemplars int64 tsdbWriteQueueSize int64 tsdbMemorySnapshotOnShutdown bool + tsdbEnableNativeHistograms bool - walCompression bool - noLockFile bool + walCompression bool + noLockFile bool + writerInterning bool hashFunc string @@ -795,12 +809,14 @@ type receiveConfig struct { reqLogConfig *extflag.PathOrContent relabelConfigPath *extflag.PathOrContent - limitsConfig *extflag.PathOrContent + writeLimitsConfig *extflag.PathOrContent + storeRateLimits store.SeriesSelectLimits } func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { rc.httpBindAddr, rc.httpGracePeriod, rc.httpTLSConfig = extkingpin.RegisterHTTPFlags(cmd) - rc.grpcBindAddr, rc.grpcGracePeriod, rc.grpcCert, rc.grpcKey, rc.grpcClientCA, rc.grpcMaxConnAge = extkingpin.RegisterGRPCFlags(cmd) + rc.grpcConfig.registerFlag(cmd) + rc.storeRateLimits.RegisterFlags(cmd) cmd.Flag("remote-write.address", "Address to listen on for remote write requests."). Default("0.0.0.0:19291").StringVar(&rc.rwAddress) @@ -826,14 +842,14 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { rc.objStoreConfig = extkingpin.RegisterCommonObjStoreFlags(cmd, "", false) - rc.retention = extkingpin.ModelDuration(cmd.Flag("tsdb.retention", "How long to retain raw samples on local storage. 0d - disables this retention. For more details on how retention is enforced for individual tenants, please refer to the Tenant lifecycle management section in the Receive documentation: https://thanos.io/tip/components/receive.md/#tenant-lifecycle-management").Default("15d")) + rc.retention = extkingpin.ModelDuration(cmd.Flag("tsdb.retention", "How long to retain raw samples on local storage. 0d - disables the retention policy (i.e. infinite retention). For more details on how retention is enforced for individual tenants, please refer to the Tenant lifecycle management section in the Receive documentation: https://thanos.io/tip/components/receive.md/#tenant-lifecycle-management").Default("15d")) cmd.Flag("receive.hashrings-file", "Path to file that contains the hashring configuration. A watcher is initialized to watch changes and update the hashring dynamically.").PlaceHolder("").StringVar(&rc.hashringsFilePath) cmd.Flag("receive.hashrings", "Alternative to 'receive.hashrings-file' flag (lower priority). Content of file that contains the hashring configuration.").PlaceHolder("").StringVar(&rc.hashringsFileContent) hashringAlgorithmsHelptext := strings.Join([]string{string(receive.AlgorithmHashmod), string(receive.AlgorithmKetama)}, ", ") - cmd.Flag("receive.hashrings-algorithm", "The algorithm used when distributing series in the hashrings. Must be one of "+hashringAlgorithmsHelptext). + cmd.Flag("receive.hashrings-algorithm", "The algorithm used when distributing series in the hashrings. Must be one of "+hashringAlgorithmsHelptext+". Will be overwritten by the tenant-specific algorithm in the hashring config."). Default(string(receive.AlgorithmHashmod)). EnumVar(&rc.hashringsAlgorithm, string(receive.AlgorithmHashmod), string(receive.AlgorithmKetama)) @@ -859,12 +875,19 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { rc.forwardTimeout = extkingpin.ModelDuration(cmd.Flag("receive-forward-timeout", "Timeout for each forward request.").Default("5s").Hidden()) + rc.maxBackoff = extkingpin.ModelDuration(cmd.Flag("receive-forward-max-backoff", "Maximum backoff for each forward fan-out request").Default("5s").Hidden()) + rc.relabelConfigPath = extflag.RegisterPathOrContent(cmd, "receive.relabel-config", "YAML file that contains relabeling configuration.", extflag.WithEnvSubstitution()) rc.tsdbMinBlockDuration = extkingpin.ModelDuration(cmd.Flag("tsdb.min-block-duration", "Min duration for local TSDB blocks").Default("2h").Hidden()) rc.tsdbMaxBlockDuration = extkingpin.ModelDuration(cmd.Flag("tsdb.max-block-duration", "Max duration for local TSDB blocks").Default("2h").Hidden()) + rc.tsdbTooFarInFutureTimeWindow = extkingpin.ModelDuration(cmd.Flag("tsdb.too-far-in-future.time-window", + "[EXPERIMENTAL] Configures the allowed time window for ingesting samples too far in the future. Disabled (0s) by default"+ + "Please note enable this flag will reject samples in the future of receive local NTP time + configured duration due to clock skew in remote write clients.", + ).Default("0s")) + rc.tsdbOutOfOrderTimeWindow = extkingpin.ModelDuration(cmd.Flag("tsdb.out-of-order.time-window", "[EXPERIMENTAL] Configures the allowed time window for ingestion of out-of-order samples. Disabled (0s) by default"+ "Please note if you enable this option and you use compactor, make sure you have the --enable-vertical-compaction flag enabled, otherwise you might risk compactor halt.", @@ -895,6 +918,14 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { "[EXPERIMENTAL] Enables feature to snapshot in-memory chunks on shutdown for faster restarts."). Default("false").Hidden().BoolVar(&rc.tsdbMemorySnapshotOnShutdown) + cmd.Flag("tsdb.enable-native-histograms", + "[EXPERIMENTAL] Enables the ingestion of native histograms."). + Default("false").Hidden().BoolVar(&rc.tsdbEnableNativeHistograms) + + cmd.Flag("writer.intern", + "[EXPERIMENTAL] Enables string interning in receive writer, for more optimized memory usage."). + Default("false").Hidden().BoolVar(&rc.writerInterning) + cmd.Flag("hash-func", "Specify which hash function to use when calculating the hashes of produced files. If no function has been specified, it does not happen. This permits avoiding downloading some files twice albeit at some performance cost. Possible values are: \"\", \"SHA256\"."). Default("").EnumVar(&rc.hashFunc, "SHA256", "") @@ -908,7 +939,7 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { rc.reqLogConfig = extkingpin.RegisterRequestLoggingFlags(cmd) - rc.limitsConfig = extflag.RegisterPathOrContent(cmd, "receive.limits-config", "YAML file that contains limit configuration.", extflag.WithEnvSubstitution(), extflag.WithHidden()) + rc.writeLimitsConfig = extflag.RegisterPathOrContent(cmd, "receive.limits-config", "YAML file that contains limit configuration.", extflag.WithEnvSubstitution(), extflag.WithHidden()) } // determineMode returns the ReceiverMode that this receiver is configured to run in. diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 0bd29b905d..6a57a55296 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -94,6 +94,7 @@ type ruleConfig struct { dataDir string lset labels.Labels ignoredLabelNames []string + storeRateLimits store.SeriesSelectLimits } func (rc *ruleConfig) registerFlag(cmd extkingpin.FlagClause) { @@ -103,6 +104,7 @@ func (rc *ruleConfig) registerFlag(cmd extkingpin.FlagClause) { rc.shipper.registerFlag(cmd) rc.query.registerFlag(cmd) rc.alertmgr.registerFlag(cmd) + rc.storeRateLimits.RegisterFlags(cmd) } // registerRule registers a rule command. @@ -610,7 +612,8 @@ func runRule( options := []grpcserver.Option{ grpcserver.WithServer(thanosrules.RegisterRulesServer(ruleMgr)), grpcserver.WithListen(conf.grpc.bindAddress), - grpcserver.WithGracePeriod(time.Duration(conf.grpc.gracePeriod)), + grpcserver.WithGracePeriod(conf.grpc.gracePeriod), + grpcserver.WithGracePeriod(conf.grpc.maxConnectionAge), grpcserver.WithTLSConfig(tlsCfg), } infoOptions := []info.ServerOptionFunc{info.WithRulesInfoFunc()} @@ -625,16 +628,18 @@ func runRule( if httpProbe.IsReady() { mint, maxt := tsdbStore.TimeRange() return &infopb.StoreInfo{ - MinTime: mint, - MaxTime: maxt, - SupportsSharding: true, - SendsSortedSeries: true, + MinTime: mint, + MaxTime: maxt, + SupportsSharding: true, + SupportsWithoutReplicaLabels: true, + TsdbInfos: tsdbStore.TSDBInfos(), } } return nil }), ) - options = append(options, grpcserver.WithServer(store.RegisterStoreServer(tsdbStore))) + storeServer := store.NewLimitedStoreServer(store.NewInstrumentedStoreServer(reg, tsdbStore), reg, conf.storeRateLimits) + options = append(options, grpcserver.WithServer(store.RegisterStoreServer(storeServer, logger))) } options = append(options, grpcserver.WithServer( diff --git a/cmd/thanos/rule_test.go b/cmd/thanos/rule_test.go index 96a9bc66b1..5703c7b3cc 100644 --- a/cmd/thanos/rule_test.go +++ b/cmd/thanos/rule_test.go @@ -6,7 +6,7 @@ package main import ( "testing" - "github.com/thanos-io/thanos/pkg/testutil" + "github.com/efficientgo/core/testutil" ) func Test_parseFlagLabels(t *testing.T) { diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index 7d6da14d1a..ddd24104ed 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -268,10 +268,11 @@ func runSidecar( if httpProbe.IsReady() { mint, maxt := promStore.Timestamps() return &infopb.StoreInfo{ - MinTime: mint, - MaxTime: maxt, - SupportsSharding: true, - SendsSortedSeries: true, + MinTime: mint, + MaxTime: maxt, + SupportsSharding: true, + SupportsWithoutReplicaLabels: true, + TsdbInfos: promStore.TSDBInfos(), } } return nil @@ -282,15 +283,17 @@ func runSidecar( info.WithMetricMetadataInfoFunc(), ) + storeServer := store.NewLimitedStoreServer(store.NewInstrumentedStoreServer(reg, promStore), reg, conf.storeRateLimits) s := grpcserver.New(logger, reg, tracer, grpcLogOpts, tagOpts, comp, grpcProbe, - grpcserver.WithServer(store.RegisterStoreServer(promStore)), + grpcserver.WithServer(store.RegisterStoreServer(storeServer, logger)), grpcserver.WithServer(rules.RegisterRulesServer(rules.NewPrometheus(conf.prometheus.url, c, m.Labels))), grpcserver.WithServer(targets.RegisterTargetsServer(targets.NewPrometheus(conf.prometheus.url, c, m.Labels))), grpcserver.WithServer(meta.RegisterMetadataServer(meta.NewPrometheus(conf.prometheus.url, c))), grpcserver.WithServer(exemplars.RegisterExemplarsServer(exemplarSrv)), grpcserver.WithServer(info.RegisterInfoServer(infoSrv)), grpcserver.WithListen(conf.grpc.bindAddress), - grpcserver.WithGracePeriod(time.Duration(conf.grpc.gracePeriod)), + grpcserver.WithGracePeriod(conf.grpc.gracePeriod), + grpcserver.WithMaxConnAge(conf.grpc.maxConnectionAge), grpcserver.WithTLSConfig(tlsCfg), ) g.Add(func() error { @@ -474,15 +477,16 @@ func (s *promMetadata) Version() string { } type sidecarConfig struct { - http httpConfig - grpc grpcConfig - prometheus prometheusConfig - tsdb tsdbConfig - reloader reloaderConfig - reqLogConfig *extflag.PathOrContent - objStore extflag.PathOrContent - shipper shipperConfig - limitMinTime thanosmodel.TimeOrDurationValue + http httpConfig + grpc grpcConfig + prometheus prometheusConfig + tsdb tsdbConfig + reloader reloaderConfig + reqLogConfig *extflag.PathOrContent + objStore extflag.PathOrContent + shipper shipperConfig + limitMinTime thanosmodel.TimeOrDurationValue + storeRateLimits store.SeriesSelectLimits } func (sc *sidecarConfig) registerFlag(cmd extkingpin.FlagClause) { @@ -494,6 +498,7 @@ func (sc *sidecarConfig) registerFlag(cmd extkingpin.FlagClause) { sc.reqLogConfig = extkingpin.RegisterRequestLoggingFlags(cmd) sc.objStore = *extkingpin.RegisterCommonObjStoreFlags(cmd, "", false) sc.shipper.registerFlag(cmd) + sc.storeRateLimits.RegisterFlags(cmd) cmd.Flag("min-time", "Start of time range limit to serve. Thanos sidecar will serve only metrics, which happened later than this value. Option can be a constant time in RFC3339 format or time duration relative to current time, such as -1d or 2h45m. Valid duration units are ms, s, m, h, d, w, y."). Default("0000-01-01T00:00:00Z").SetValue(&sc.limitMinTime) } diff --git a/cmd/thanos/store.go b/cmd/thanos/store.go index 30df09ba5e..8661e95c25 100644 --- a/cmd/thanos/store.go +++ b/cmd/thanos/store.go @@ -49,17 +49,22 @@ import ( "github.com/thanos-io/thanos/pkg/ui" ) +const ( + retryTimeoutDuration = 30 + retryIntervalDuration = 10 +) + type storeConfig struct { indexCacheConfigs extflag.PathOrContent objStoreConfig extflag.PathOrContent dataDir string + cacheIndexHeader bool grpcConfig grpcConfig httpConfig httpConfig indexCacheSizeBytes units.Base2Bytes chunkPoolSize units.Base2Bytes seriesBatchSize int - maxSampleCount uint64 - maxTouchedSeriesCount uint64 + storeRateLimits store.SeriesSelectLimits maxDownloadedBytes units.Base2Bytes maxConcurrency int component component.StoreAPI @@ -74,6 +79,7 @@ type storeConfig struct { ignoreDeletionMarksDelay commonmodel.Duration disableWeb bool webConfig webConfig + label string postingOffsetsInMemSampling int cachingBucketConfig extflag.PathOrContent reqLogConfig *extflag.PathOrContent @@ -84,10 +90,14 @@ type storeConfig struct { func (sc *storeConfig) registerFlag(cmd extkingpin.FlagClause) { sc.httpConfig = *sc.httpConfig.registerFlag(cmd) sc.grpcConfig = *sc.grpcConfig.registerFlag(cmd) + sc.storeRateLimits.RegisterFlags(cmd) - cmd.Flag("data-dir", "Local data directory used for caching purposes (index-header, in-mem cache items and meta.jsons). If removed, no data will be lost, just store will have to rebuild the cache. NOTE: Putting raw blocks here will not cause the store to read them. For such use cases use Prometheus + sidecar."). + cmd.Flag("data-dir", "Local data directory used for caching purposes (index-header, in-mem cache items and meta.jsons). If removed, no data will be lost, just store will have to rebuild the cache. NOTE: Putting raw blocks here will not cause the store to read them. For such use cases use Prometheus + sidecar. Ignored if --no-cache-index-header option is specified."). Default("./data").StringVar(&sc.dataDir) + cmd.Flag("cache-index-header", "Cache TSDB index-headers on disk to reduce startup time. When set to true, Thanos Store will download index headers from remote object storage on startup and create a header file on disk. Use --data-dir to set the directory in which index headers will be downloaded."). + Default("true").BoolVar(&sc.cacheIndexHeader) + cmd.Flag("index-cache-size", "Maximum size of items held in the in-memory index cache. Ignored if --index-cache.config or --index-cache.config-file option is specified."). Default("250MB").BytesVar(&sc.indexCacheSizeBytes) @@ -104,13 +114,8 @@ func (sc *storeConfig) registerFlag(cmd extkingpin.FlagClause) { cmd.Flag("chunk-pool-size", "Maximum size of concurrently allocatable bytes reserved strictly to reuse for chunks in memory."). Default("2GB").BytesVar(&sc.chunkPoolSize) - cmd.Flag("store.grpc.series-sample-limit", - "Maximum amount of samples returned via a single Series call. The Series call fails if this limit is exceeded. 0 means no limit. NOTE: For efficiency the limit is internally implemented as 'chunks limit' considering each chunk contains 120 samples (it's the max number of samples each chunk can contain), so the actual number of samples might be lower, even though the maximum could be hit."). - Default("0").Uint64Var(&sc.maxSampleCount) - - cmd.Flag("store.grpc.touched-series-limit", - "Maximum amount of touched series returned via a single Series call. The Series call fails if this limit is exceeded. 0 means no limit."). - Default("0").Uint64Var(&sc.maxTouchedSeriesCount) + cmd.Flag("store.grpc.touched-series-limit", "DEPRECATED: use store.limits.request-series.").Default("0").Uint64Var(&sc.storeRateLimits.SeriesPerRequest) + cmd.Flag("store.grpc.series-sample-limit", "DEPRECATED: use store.limits.request-samples.").Default("0").Uint64Var(&sc.storeRateLimits.SamplesPerRequest) cmd.Flag("store.grpc.downloaded-bytes-limit", "Maximum amount of downloaded (either fetched or touched) bytes in a single Series/LabelNames/LabelValues call. The Series call fails if this limit is exceeded. 0 means no limit."). @@ -179,6 +184,8 @@ func (sc *storeConfig) registerFlag(cmd extkingpin.FlagClause) { cmd.Flag("web.disable-cors", "Whether to disable CORS headers to be set by Thanos. By default Thanos sets CORS headers to be allowed by all."). Default("false").BoolVar(&sc.webConfig.disableCORS) + cmd.Flag("bucket-web-label", "External block label to use as group title in the bucket web UI").StringVar(&sc.label) + sc.reqLogConfig = extkingpin.RegisterRequestLoggingFlags(cmd) } @@ -232,6 +239,11 @@ func runStore( conf storeConfig, flagsMap map[string]string, ) error { + dataDir := conf.dataDir + if !conf.cacheIndexHeader { + dataDir = "" + } + grpcProbe := prober.NewGRPC() httpProbe := prober.NewHTTP() statusProber := prober.Combine( @@ -313,7 +325,7 @@ func runStore( } ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(logger, bkt, time.Duration(conf.ignoreDeletionMarksDelay), conf.blockMetaFetchConcurrency) - metaFetcher, err := block.NewMetaFetcher(logger, conf.blockMetaFetchConcurrency, bkt, conf.dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg), + metaFetcher, err := block.NewMetaFetcher(logger, conf.blockMetaFetchConcurrency, bkt, dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg), []block.MetadataFilter{ block.NewTimePartitionMetaFilter(conf.filterConf.MinTime, conf.filterConf.MaxTime), block.NewLabelShardedMetaFilter(relabelConfig), @@ -330,7 +342,7 @@ func runStore( return errors.Errorf("max concurrency value cannot be lower than 0 (got %v)", conf.maxConcurrency) } - queriesGate := gate.New(extprom.WrapRegistererWithPrefix("thanos_bucket_store_series_", reg), int(conf.maxConcurrency)) + queriesGate := gate.New(extprom.WrapRegistererWithPrefix("thanos_bucket_store_series_", reg), int(conf.maxConcurrency), gate.Queries) chunkPool, err := store.NewDefaultChunkBytesPool(uint64(conf.chunkPoolSize)) if err != nil { @@ -355,9 +367,9 @@ func runStore( bs, err := store.NewBucketStore( bkt, metaFetcher, - conf.dataDir, - store.NewChunksLimiterFactory(conf.maxSampleCount/store.MaxSamplesPerChunk), // The samples limit is an approximation based on the max number of samples per chunk. - store.NewSeriesLimiterFactory(conf.maxTouchedSeriesCount), + dataDir, + store.NewChunksLimiterFactory(conf.storeRateLimits.SamplesPerRequest/store.MaxSamplesPerChunk), // The samples limit is an approximation based on the max number of samples per chunk. + store.NewSeriesLimiterFactory(conf.storeRateLimits.SeriesPerRequest), store.NewBytesLimiterFactory(conf.maxDownloadedBytes), store.NewGapBasedPartitioner(store.PartitionerMaxGapSize), conf.blockSyncConcurrency, @@ -381,14 +393,25 @@ func runStore( level.Info(logger).Log("msg", "initializing bucket store") begin := time.Now() - if err := bs.InitialSync(ctx); err != nil { + + // This will stop retrying after set timeout duration. + initialSyncCtx, cancel := context.WithTimeout(ctx, retryTimeoutDuration*time.Second) + defer cancel() + + // Retry in case of error. + err := runutil.Retry(retryIntervalDuration*time.Second, initialSyncCtx.Done(), func() error { + return bs.InitialSync(ctx) + }) + + if err != nil { close(bucketStoreReady) return errors.Wrap(err, "bucket store initial sync") } + level.Info(logger).Log("msg", "bucket store ready", "init_duration", time.Since(begin).String()) close(bucketStoreReady) - err := runutil.Repeat(conf.syncInterval, ctx.Done(), func() error { + err = runutil.Repeat(conf.syncInterval, ctx.Done(), func() error { if err := bs.SyncBlocks(ctx); err != nil { level.Warn(logger).Log("msg", "syncing blocks failed", "err", err) } @@ -411,10 +434,11 @@ func runStore( if httpProbe.IsReady() { mint, maxt := bs.TimeRange() return &infopb.StoreInfo{ - MinTime: mint, - MaxTime: maxt, - SupportsSharding: true, - SendsSortedSeries: true, + MinTime: mint, + MaxTime: maxt, + SupportsSharding: true, + SupportsWithoutReplicaLabels: true, + TsdbInfos: bs.TSDBInfos(), } } return nil @@ -428,11 +452,13 @@ func runStore( return errors.Wrap(err, "setup gRPC server") } + storeServer := store.NewInstrumentedStoreServer(reg, bs) s := grpcserver.New(logger, reg, tracer, grpcLogOpts, tagOpts, conf.component, grpcProbe, - grpcserver.WithServer(store.RegisterStoreServer(bs)), + grpcserver.WithServer(store.RegisterStoreServer(storeServer, logger)), grpcserver.WithServer(info.RegisterInfoServer(infoSrv)), grpcserver.WithListen(conf.grpcConfig.bindAddress), - grpcserver.WithGracePeriod(time.Duration(conf.grpcConfig.gracePeriod)), + grpcserver.WithGracePeriod(conf.grpcConfig.gracePeriod), + grpcserver.WithMaxConnAge(conf.grpcConfig.maxConnectionAge), grpcserver.WithTLSConfig(tlsCfg), ) @@ -455,7 +481,7 @@ func runStore( // Configure Request Logging for HTTP calls. logMiddleware := logging.NewHTTPServerMiddleware(logger, httpLogOpts...) - api := blocksAPI.NewBlocksAPI(logger, conf.webConfig.disableCORS, "", flagsMap, bkt) + api := blocksAPI.NewBlocksAPI(logger, conf.webConfig.disableCORS, conf.label, flagsMap, bkt) api.Register(r.WithPrefix("/api/v1"), tracer, logger, ins, logMiddleware) metaFetcher.UpdateOnChange(func(blocks []metadata.Meta, err error) { diff --git a/cmd/thanos/tools_bucket.go b/cmd/thanos/tools_bucket.go index f3e0e44fec..b9682406df 100644 --- a/cmd/thanos/tools_bucket.go +++ b/cmd/thanos/tools_bucket.go @@ -123,7 +123,7 @@ type bucketWebConfig struct { type bucketReplicateConfig struct { resolutions []time.Duration compactions []int - matcherStrs []string + matcherStrs string singleRun bool } @@ -147,9 +147,10 @@ type bucketRetentionConfig struct { } type bucketMarkBlockConfig struct { - details string - marker string - blockIDs []string + details string + marker string + blockIDs []string + removeMarker bool } func (tbc *bucketVerifyConfig) registerBucketVerifyFlag(cmd extkingpin.FlagClause) *bucketVerifyConfig { @@ -197,7 +198,7 @@ func (tbc *bucketWebConfig) registerBucketWebFlag(cmd extkingpin.FlagClause) *bu cmd.Flag("timeout", "Timeout to download metadata from remote storage").Default("5m").DurationVar(&tbc.timeout) - cmd.Flag("label", "Prometheus label to use as timeline title").StringVar(&tbc.label) + cmd.Flag("label", "External block label to use as group title").StringVar(&tbc.label) return tbc } @@ -206,7 +207,7 @@ func (tbc *bucketReplicateConfig) registerBucketReplicateFlag(cmd extkingpin.Fla cmd.Flag("compaction", "Only blocks with these compaction levels will be replicated. Repeated flag.").Default("1", "2", "3", "4").IntsVar(&tbc.compactions) - cmd.Flag("matcher", "Only blocks whose external labels exactly match this matcher will be replicated.").PlaceHolder("key=\"value\"").StringsVar(&tbc.matcherStrs) + cmd.Flag("matcher", "blocks whose external labels match this matcher will be replicated. All Prometheus matchers are supported, including =, !=, =~ and !~.").StringVar(&tbc.matcherStrs) cmd.Flag("single-run", "Run replication only one time, then exit.").Default("false").BoolVar(&tbc.singleRun) @@ -238,9 +239,9 @@ func (tbc *bucketDownsampleConfig) registerBucketDownsampleFlag(cmd extkingpin.F func (tbc *bucketMarkBlockConfig) registerBucketMarkBlockFlag(cmd extkingpin.FlagClause) *bucketMarkBlockConfig { cmd.Flag("id", "ID (ULID) of the blocks to be marked for deletion (repeated flag)").Required().StringsVar(&tbc.blockIDs) - cmd.Flag("marker", "Marker to be put.").Required().EnumVar(&tbc.marker, metadata.DeletionMarkFilename, metadata.NoCompactMarkFilename) - cmd.Flag("details", "Human readable details to be put into marker.").Required().StringVar(&tbc.details) - + cmd.Flag("marker", "Marker to be put.").Required().EnumVar(&tbc.marker, metadata.DeletionMarkFilename, metadata.NoCompactMarkFilename, metadata.NoDownsampleMarkFilename) + cmd.Flag("details", "Human readable details to be put into marker.").StringVar(&tbc.details) + cmd.Flag("remove", "Remove the marker.").Default("false").BoolVar(&tbc.removeMarker) return tbc } @@ -1047,9 +1048,20 @@ func registerBucketMarkBlock(app extkingpin.AppClause, objStoreConfig *extflag.P ids = append(ids, u) } + if !tbc.removeMarker && tbc.details == "" { + return errors.Errorf("required flag --details not provided") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) g.Add(func() error { for _, id := range ids { + if tbc.removeMarker { + err := block.RemoveMark(ctx, logger, bkt, id, promauto.With(nil).NewCounter(prometheus.CounterOpts{}), tbc.marker) + if err != nil { + return errors.Wrapf(err, "remove mark %v for %v", id, tbc.marker) + } + continue + } switch tbc.marker { case metadata.DeletionMarkFilename: if err := block.MarkForDeletion(ctx, logger, bkt, id, tbc.details, promauto.With(nil).NewCounter(prometheus.CounterOpts{})); err != nil { @@ -1059,6 +1071,10 @@ func registerBucketMarkBlock(app extkingpin.AppClause, objStoreConfig *extflag.P if err := block.MarkForNoCompact(ctx, logger, bkt, id, metadata.ManualNoCompactReason, tbc.details, promauto.With(nil).NewCounter(prometheus.CounterOpts{})); err != nil { return errors.Wrapf(err, "mark %v for %v", id, tbc.marker) } + case metadata.NoDownsampleMarkFilename: + if err := block.MarkForNoDownsample(ctx, logger, bkt, id, metadata.ManualNoDownsampleReason, tbc.details, promauto.With(nil).NewCounter(prometheus.CounterOpts{})); err != nil { + return errors.Wrapf(err, "mark %v for %v", id, tbc.marker) + } default: return errors.Errorf("not supported marker %v", tbc.marker) } diff --git a/cmd/thanos/tools_test.go b/cmd/thanos/tools_test.go index 09949e016b..81b616111f 100644 --- a/cmd/thanos/tools_test.go +++ b/cmd/thanos/tools_test.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/log" - "github.com/thanos-io/thanos/pkg/testutil" + "github.com/efficientgo/core/testutil" ) func Test_CheckRules(t *testing.T) { diff --git a/docs/components/compact.md b/docs/components/compact.md index bf81a7f70c..958ea07b21 100644 --- a/docs/components/compact.md +++ b/docs/components/compact.md @@ -295,8 +295,8 @@ Flags: local and remote view for /global Block Viewer UI. --bucket-web-label=BUCKET-WEB-LABEL - Prometheus label to use as timeline title in the - bucket web UI + External block label to use as group title in + the bucket web UI --compact.blocks-fetch-concurrency=1 Number of goroutines to use when download block during compaction. diff --git a/docs/components/query-frontend.md b/docs/components/query-frontend.md index b79bbcc5ff..8ae6aee8e7 100644 --- a/docs/components/query-frontend.md +++ b/docs/components/query-frontend.md @@ -132,6 +132,8 @@ config: key_file: "" server_name: "" insecure_skip_verify: false + cache_size: 0 + master_name: "" expiration: 24h0m0s ``` diff --git a/docs/components/query.md b/docs/components/query.md index 9de81aab5a..364d62e54a 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -11,8 +11,8 @@ Example command to run Querier: ```bash thanos query \ --http-address "0.0.0.0:9090" \ - --store ":" \ - --store ":" + --endpoint ":" \ + --endpoint ":" ``` ## Querier use cases, why do I need this component? @@ -71,8 +71,8 @@ If we configure Querier like this: thanos query \ --http-address "0.0.0.0:9090" \ --query.replica-label "replica" \ - --store ":" \ - --store ":" \ + --endpoint ":" \ + --endpoint ":" \ ``` And we query for metric `up{job="prometheus",env="2"}` with this option we will get 2 results: @@ -97,8 +97,8 @@ thanos query \ --http-address "0.0.0.0:9090" \ --query.replica-label "replica" \ --query.replica-label "replicaX" \ - --store ":" \ - --store ":" \ + --endpoint ":" \ + --endpoint ":" \ ``` This logic can also be controlled via parameter on QueryAPI. More details below. @@ -280,6 +280,17 @@ Flags: prefixed with 'dns+' or 'dnssrv+' to detect Thanos API servers through respective DNS lookups. + --endpoint-group= ... + Experimental: DNS name of statically configured + Thanos API server groups (repeatable). Targets + resolved from the DNS name will be queried in + a round-robin, instead of a fanout manner. + This flag should be used when connecting a + Thanos Query to HA groups of Thanos components. + --endpoint-group-strict= ... + Experimental: DNS name of statically configured + Thanos API server groups (repeatable) that are + always used, even if the health check fails. --endpoint-strict= ... Addresses of only statically configured Thanos API servers that are always used, even if @@ -307,9 +318,9 @@ Flags: --grpc-grace-period=2m Time to wait after an interrupt received for GRPC Server. --grpc-server-max-connection-age=60m - The grpc server max connection age. - This controls how often to re-read the tls - certificates and redo the TLS handshake + The grpc server max connection age. This + controls how often to re-establish connections + and redo TLS handshakes. --grpc-server-tls-cert="" TLS Certificate for gRPC server, leave blank to disable TLS --grpc-server-tls-client-ca="" @@ -386,7 +397,7 @@ Flags: no partial_response param is specified. --no-query.partial-response for disabling. --query.promql-engine=prometheus - PromQL engine to use. + Default PromQL engine to use. --query.replica-label=QUERY.REPLICA-LABEL ... Labels to treat as a replica indicator along which data is deduplicated. Still you will @@ -429,6 +440,17 @@ Flags: that are always used, even if the health check fails. Useful if you have a caching layer on top. + --store.limits.request-samples=0 + The maximum samples allowed for a single + Series request, The Series call fails if + this limit is exceeded. 0 means no limit. + NOTE: For efficiency the limit is internally + implemented as 'chunks limit' considering each + chunk contains a maximum of 120 samples. + --store.limits.request-series=0 + The maximum series allowed for a single Series + request. The Series call fails if this limit is + exceeded. 0 means no limit. --store.response-timeout=0ms If a Store doesn't send any data in this specified duration then a Store will be ignored diff --git a/docs/components/receive.md b/docs/components/receive.md index fbf4f9a4db..50bf682fad 100644 --- a/docs/components/receive.md +++ b/docs/components/receive.md @@ -12,6 +12,24 @@ For more information please check out [initial design proposal](../proposals-don > NOTE: As the block producer it's important to set correct "external labels" that will identify data block across Thanos clusters. See [external labels](../storage.md#external-labels) docs for details. +## Series distribution algorithms + +The Receive component currently supports two algorithms for distributing timeseries across Receive nodes and can be set using the `receive.hashrings-algorithm` flag. + +### Ketama (recommended) + +The Ketama algorithm is a consistent hashing scheme which enables stable scaling of Receivers without the drawbacks of the `hashmod` algorithm. This is the recommended algorithm for all new installations. + +If you are using the `hashmod` algorithm and wish to migrate to `ketama`, the simplest and safest way would be to set up a new pool receivers with `ketama` hashrings and start remote-writing to them. Provided you are on the latest Thanos version, old receivers will flush their TSDBs after the configured retention period and will upload blocks to object storage. Once you have verified that is done, decommission the old receivers. + +### Hashmod (discouraged) + +This algorithm uses a `hashmod` function over all labels to decide which receiver is responsible for a given timeseries. This is the default algorithm due to historical reasons. However, its usage for new Receive installations is discouraged since adding new Receiver nodes leads to series churn and memory usage spikes. + +### Hashring management and autoscaling in Kubernetes + +The [Thanos Receive Controller](https://github.com/observatorium/thanos-receive-controller) project aims to automate hashring management when running Thanos in Kubernetes. In combination with the Ketama hashring algorithm, this controller can also be used to keep hashrings up to date when Receivers are scaled automatically using an HPA or [Keda](https://keda.sh/). + ## TSDB stats Thanos Receive supports getting TSDB stats using the `/api/v1/status/tsdb` endpoint. Use the `THANOS-TENANT` HTTP header to get stats for individual Tenants. The output format of the endpoint is compatible with [Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats). @@ -204,9 +222,9 @@ Flags: --grpc-grace-period=2m Time to wait after an interrupt received for GRPC Server. --grpc-server-max-connection-age=60m - The grpc server max connection age. - This controls how often to re-read the tls - certificates and redo the TLS handshake + The grpc server max connection age. This + controls how often to re-establish connections + and redo TLS handshakes. --grpc-server-tls-cert="" TLS Certificate for gRPC server, leave blank to disable TLS --grpc-server-tls-client-ca="" @@ -259,7 +277,9 @@ Flags: the hashring configuration. --receive.hashrings-algorithm=hashmod The algorithm used when distributing series in - the hashrings. Must be one of hashmod, ketama + the hashrings. Must be one of hashmod, ketama. + Will be overwritten by the tenant-specific + algorithm in the hashring config. --receive.hashrings-file= Path to file that contains the hashring configuration. A watcher is initialized @@ -332,6 +352,17 @@ Flags: Path to YAML file with request logging configuration. See format details: https://thanos.io/tip/thanos/logging.md/#configuration + --store.limits.request-samples=0 + The maximum samples allowed for a single + Series request, The Series call fails if + this limit is exceeded. 0 means no limit. + NOTE: For efficiency the limit is internally + implemented as 'chunks limit' considering each + chunk contains a maximum of 120 samples. + --store.limits.request-series=0 + The maximum series allowed for a single Series + request. The Series call fails if this limit is + exceeded. 0 means no limit. --tracing.config= Alternative to 'tracing.config-file' flag (mutually exclusive). Content of YAML file @@ -358,12 +389,21 @@ Flags: next startup. --tsdb.path="./data" Data directory of TSDB. --tsdb.retention=15d How long to retain raw samples on local - storage. 0d - disables this retention. + storage. 0d - disables the retention + policy (i.e. infinite retention). For more details on how retention is enforced for individual tenants, please refer to the Tenant lifecycle management section in the Receive documentation: https://thanos.io/tip/components/receive.md/#tenant-lifecycle-management + --tsdb.too-far-in-future.time-window=0s + [EXPERIMENTAL] Configures the allowed time + window for ingesting samples too far in the + future. Disabled (0s) by defaultPlease note + enable this flag will reject samples in the + future of receive local NTP time + configured + duration due to clock skew in remote write + clients. --tsdb.wal-compression Compress the tsdb WAL. --version Show application version. diff --git a/docs/components/rule.md b/docs/components/rule.md index bef4f56b11..93e5e91cfe 100644 --- a/docs/components/rule.md +++ b/docs/components/rule.md @@ -158,7 +158,7 @@ The most important metrics to alert on are: * `prometheus_rule_evaluation_failures_total`. If greater than 0, it means that that rule failed to be evaluated, which results in either gap in rule or potentially ignored alert. This metric might indicate problems on the queryAPI endpoint you use. Alert heavily on this if this happens for longer than your alert thresholds. `strategy` label will tell you if failures comes from rules that tolerate [partial response](#partial-response) or not. -* `prometheus_rule_group_last_duration_seconds < prometheus_rule_group_interval_seconds` If the difference is large, it means that rule evaluation took more time than the scheduled interval. It can indicate that your query backend (e.g Querier) takes too much time to evaluate the query, i.e. that it is not fast enough to fill the rule. This might indicate other problems like slow StoreAPis or too complex query expression in rule. +* `prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds` If the difference is positive, it means that rule evaluation took more time than the scheduled interval, and data for some intervals could be missing. It can indicate that your query backend (e.g Querier) takes too much time to evaluate the query, i.e. that it is not fast enough to fill the rule. This might indicate other problems like slow StoreAPis or too complex query expression in rule. * `thanos_rule_evaluation_with_warnings_total`. If you choose to use Rules and Alerts with [partial response strategy's](#partial-response) value as "warn", this metric will tell you how many evaluation ended up with some kind of warning. To see the actual warnings see WARN log level. This might suggest that those evaluations return partial response and might not be accurate. @@ -323,6 +323,10 @@ Flags: from other components. --grpc-grace-period=2m Time to wait after an interrupt received for GRPC Server. + --grpc-server-max-connection-age=60m + The grpc server max connection age. This + controls how often to re-establish connections + and redo TLS handshakes. --grpc-server-tls-cert="" TLS Certificate for gRPC server, leave blank to disable TLS --grpc-server-tls-client-ca="" @@ -453,6 +457,17 @@ Flags: Works only if compaction is disabled on Prometheus. Do it once and then disable the flag when done. + --store.limits.request-samples=0 + The maximum samples allowed for a single + Series request, The Series call fails if + this limit is exceeded. 0 means no limit. + NOTE: For efficiency the limit is internally + implemented as 'chunks limit' considering each + chunk contains a maximum of 120 samples. + --store.limits.request-series=0 + The maximum series allowed for a single Series + request. The Series call fails if this limit is + exceeded. 0 means no limit. --tracing.config= Alternative to 'tracing.config-file' flag (mutually exclusive). Content of YAML file diff --git a/docs/components/sidecar.md b/docs/components/sidecar.md index d5aed39634..9bc309734a 100644 --- a/docs/components/sidecar.md +++ b/docs/components/sidecar.md @@ -82,6 +82,10 @@ Flags: from other components. --grpc-grace-period=2m Time to wait after an interrupt received for GRPC Server. + --grpc-server-max-connection-age=60m + The grpc server max connection age. This + controls how often to re-establish connections + and redo TLS handshakes. --grpc-server-tls-cert="" TLS Certificate for gRPC server, leave blank to disable TLS --grpc-server-tls-client-ca="" @@ -174,6 +178,17 @@ Flags: Works only if compaction is disabled on Prometheus. Do it once and then disable the flag when done. + --store.limits.request-samples=0 + The maximum samples allowed for a single + Series request, The Series call fails if + this limit is exceeded. 0 means no limit. + NOTE: For efficiency the limit is internally + implemented as 'chunks limit' considering each + chunk contains a maximum of 120 samples. + --store.limits.request-series=0 + The maximum series allowed for a single Series + request. The Series call fails if this limit is + exceeded. 0 means no limit. --tracing.config= Alternative to 'tracing.config-file' flag (mutually exclusive). Content of YAML file diff --git a/docs/components/store.md b/docs/components/store.md index fdfec870ab..a8b03a224a 100644 --- a/docs/components/store.md +++ b/docs/components/store.md @@ -36,6 +36,15 @@ Flags: Number of goroutines to use when constructing index-cache.json blocks from object storage. Must be equal or greater than 1. + --bucket-web-label=BUCKET-WEB-LABEL + External block label to use as group title in + the bucket web UI + --cache-index-header Cache TSDB index-headers on disk to reduce + startup time. When set to true, Thanos Store + will download index headers from remote object + storage on startup and create a header file on + disk. Use --data-dir to set the directory in + which index headers will be downloaded. --chunk-pool-size=2GB Maximum size of concurrently allocatable bytes reserved strictly to reuse for chunks in memory. @@ -47,15 +56,20 @@ Flags: purposes (index-header, in-mem cache items and meta.jsons). If removed, no data will be lost, just store will have to rebuild the cache. - NOTE: Putting raw blocks here will not cause - the store to read them. For such use cases use - Prometheus + sidecar. + NOTE: Putting raw blocks here will not + cause the store to read them. For such use + cases use Prometheus + sidecar. Ignored if + --no-cache-index-header option is specified. --grpc-address="0.0.0.0:10901" Listen ip:port address for gRPC endpoints (StoreAPI). Make sure this address is routable from other components. --grpc-grace-period=2m Time to wait after an interrupt received for GRPC Server. + --grpc-server-max-connection-age=60m + The grpc server max connection age. This + controls how often to re-establish connections + and redo TLS handshakes. --grpc-server-tls-cert="" TLS Certificate for gRPC server, leave blank to disable TLS --grpc-server-tls-client-ca="" @@ -171,19 +185,20 @@ Flags: --store.grpc.series-max-concurrency=20 Maximum number of concurrent Series calls. --store.grpc.series-sample-limit=0 - Maximum amount of samples returned via a - single Series call. The Series call fails - if this limit is exceeded. 0 means no limit. - NOTE: For efficiency the limit is internally - implemented as 'chunks limit' considering - each chunk contains 120 samples (it's the max - number of samples each chunk can contain), - so the actual number of samples might be lower, - even though the maximum could be hit. + DEPRECATED: use store.limits.request-samples. --store.grpc.touched-series-limit=0 - Maximum amount of touched series returned via - a single Series call. The Series call fails if + DEPRECATED: use store.limits.request-series. + --store.limits.request-samples=0 + The maximum samples allowed for a single + Series request, The Series call fails if this limit is exceeded. 0 means no limit. + NOTE: For efficiency the limit is internally + implemented as 'chunks limit' considering each + chunk contains a maximum of 120 samples. + --store.limits.request-series=0 + The maximum series allowed for a single Series + request. The Series call fails if this limit is + exceeded. 0 means no limit. --sync-block-duration=3m Repeat interval for syncing the blocks between local and remote view. --tracing.config= @@ -342,6 +357,8 @@ config: key_file: "" server_name: "" insecure_skip_verify: false + cache_size: 0 + master_name: "" ``` The **required** settings are: @@ -356,6 +373,12 @@ While the remaining settings are **optional**: - `dial_timeout`: the redis dial timeout. - `read_timeout`: the redis read timeout. - `write_timeout`: the redis write timeout. +- `cache_size` size of the in-memory cache used for client-side caching. Client-side caching is enabled when this value is not zero. See [official documentation](https://redis.io/docs/manual/client-side-caching/) for more. It is highly recommended to enable this so that Thanos Store would not need to continuously retrieve data from Redis for repeated requests of the same key(-s). + +Here is an example of what effect client-side caching could have: + +Example of client-side in action - reduced network usage by a lot + - `pool_size`: maximum number of socket connections. - `min_idle_conns`: specifies the minimum number of idle connections which is useful when establishing new connection is slow. - `idle_timeout`: amount of time after which client closes idle connections. Should be less than server's timeout. @@ -435,7 +458,7 @@ Here is how it looks like: Example of a groupcache group showing that each Thanos Store instance communicates with all others in the group -Note that with groupcache enabled, new routes are registed on the HTTP server with the prefix `/_groupcache`. Using those routes, anyone can access any kind of data in the configured remote object storage. So, if you are exposing your Thanos Store to the Internet then it is highly recommended to use a reverse proxy in front and disable access to `/_groupcache/...`. +Note that with groupcache enabled, new routes are registered on the HTTP server with the prefix `/_groupcache`. Using those routes, anyone can access any kind of data in the configured remote object storage. So, if you are exposing your Thanos Store to the Internet then it is highly recommended to use a reverse proxy in front and disable access to `/_groupcache/...`. Currently TLS *is* supported but on the client's side no verification is done of the received certificate. This will be added in the future. HTTP2 over cleartext is also enabled to improve the performance for users that don't use TLS. diff --git a/docs/components/tools.md b/docs/components/tools.md index b137f1e40a..430ff77ae4 100644 --- a/docs/components/tools.md +++ b/docs/components/tools.md @@ -52,7 +52,7 @@ Subcommands: tools bucket cleanup [] Cleans up all blocks marked for deletion. - tools bucket mark --id=ID --marker=MARKER --details=DETAILS + tools bucket mark --id=ID --marker=MARKER [] Mark block for deletion or no-compact in a safe way. NOTE: If the compactor is currently running compacting same block, this operation would be potentially a noop. @@ -161,7 +161,7 @@ Subcommands: tools bucket cleanup [] Cleans up all blocks marked for deletion. - tools bucket mark --id=ID --marker=MARKER --details=DETAILS + tools bucket mark --id=ID --marker=MARKER [] Mark block for deletion or no-compact in a safe way. NOTE: If the compactor is currently running compacting same block, this operation would be potentially a noop. @@ -219,7 +219,7 @@ Flags: --http.config="" [EXPERIMENTAL] Path to the configuration file that can enable TLS or authentication for all HTTP endpoints. - --label=LABEL Prometheus label to use as timeline title + --label=LABEL External block label to use as group title --log.format=logfmt Log format to use. Possible options: logfmt or json. --log.level=info Log filtering level. @@ -507,81 +507,79 @@ Replicate data from one object storage to another. NOTE: Currently it works only with Thanos blocks (meta.json has to have Thanos metadata). Flags: - --compaction=1... ... Only blocks with these compaction levels will - be replicated. Repeated flag. - -h, --help Show context-sensitive help (also try - --help-long and --help-man). + --compaction=1... ... Only blocks with these compaction levels will be + replicated. Repeated flag. + -h, --help Show context-sensitive help (also try --help-long + and --help-man). --http-address="0.0.0.0:10902" - Listen host:port for HTTP endpoints. - --http-grace-period=2m Time to wait after an interrupt received for - HTTP Server. - --http.config="" [EXPERIMENTAL] Path to the configuration file - that can enable TLS or authentication for all - HTTP endpoints. - --id=ID ... Block to be replicated to the destination - bucket. IDs will be used to match blocks and - other matchers will be ignored. When specified, - this command will be run only once after - successful replication. Repeated field + Listen host:port for HTTP endpoints. + --http-grace-period=2m Time to wait after an interrupt received for HTTP + Server. + --http.config="" [EXPERIMENTAL] Path to the configuration file + that can enable TLS or authentication for all HTTP + endpoints. + --id=ID ... Block to be replicated to the destination bucket. + IDs will be used to match blocks and other + matchers will be ignored. When specified, this + command will be run only once after successful + replication. Repeated field --ignore-marked-for-deletion - Do not replicate blocks that have deletion - mark. - --log.format=logfmt Log format to use. Possible options: logfmt or - json. - --log.level=info Log filtering level. - --matcher=key="value" ... Only blocks whose external labels exactly match - this matcher will be replicated. + Do not replicate blocks that have deletion mark. + --log.format=logfmt Log format to use. Possible options: logfmt or + json. + --log.level=info Log filtering level. + --matcher=MATCHER blocks whose external labels match this matcher + will be replicated. All Prometheus matchers are + supported, including =, !=, =~ and !~. --max-time=9999-12-31T23:59:59Z - End of time range limit to replicate. - Thanos Replicate will replicate only metrics, - which happened earlier than this value. - Option can be a constant time in RFC3339 format - or time duration relative to current time, such - as -1d or 2h45m. Valid duration units are ms, - s, m, h, d, w, y. + End of time range limit to replicate. Thanos + Replicate will replicate only metrics, which + happened earlier than this value. Option can be a + constant time in RFC3339 format or time duration + relative to current time, such as -1d or 2h45m. + Valid duration units are ms, s, m, h, d, w, y. --min-time=0000-01-01T00:00:00Z - Start of time range limit to replicate. - Thanos Replicate will replicate only metrics, - which happened later than this value. Option - can be a constant time in RFC3339 format or - time duration relative to current time, such as - -1d or 2h45m. Valid duration units are ms, s, - m, h, d, w, y. + Start of time range limit to replicate. Thanos + Replicate will replicate only metrics, which + happened later than this value. Option can be a + constant time in RFC3339 format or time duration + relative to current time, such as -1d or 2h45m. + Valid duration units are ms, s, m, h, d, w, y. --objstore-to.config= - Alternative to 'objstore-to.config-file' - flag (mutually exclusive). Content of - YAML file that contains object store-to - configuration. See format details: - https://thanos.io/tip/thanos/storage.md/#configuration - The object storage which replicate data to. + Alternative to 'objstore-to.config-file' + flag (mutually exclusive). Content of + YAML file that contains object store-to + configuration. See format details: + https://thanos.io/tip/thanos/storage.md/#configuration + The object storage which replicate data to. --objstore-to.config-file= - Path to YAML file that contains object - store-to configuration. See format details: - https://thanos.io/tip/thanos/storage.md/#configuration - The object storage which replicate data to. + Path to YAML file that contains object + store-to configuration. See format details: + https://thanos.io/tip/thanos/storage.md/#configuration + The object storage which replicate data to. --objstore.config= - Alternative to 'objstore.config-file' - flag (mutually exclusive). Content of - YAML file that contains object store - configuration. See format details: - https://thanos.io/tip/thanos/storage.md/#configuration + Alternative to 'objstore.config-file' + flag (mutually exclusive). Content of + YAML file that contains object store + configuration. See format details: + https://thanos.io/tip/thanos/storage.md/#configuration --objstore.config-file= - Path to YAML file that contains object - store configuration. See format details: - https://thanos.io/tip/thanos/storage.md/#configuration - --resolution=0s... ... Only blocks with these resolutions will be - replicated. Repeated flag. - --single-run Run replication only one time, then exit. + Path to YAML file that contains object + store configuration. See format details: + https://thanos.io/tip/thanos/storage.md/#configuration + --resolution=0s... ... Only blocks with these resolutions will be + replicated. Repeated flag. + --single-run Run replication only one time, then exit. --tracing.config= - Alternative to 'tracing.config-file' flag - (mutually exclusive). Content of YAML file - with tracing configuration. See format details: - https://thanos.io/tip/thanos/tracing.md/#configuration + Alternative to 'tracing.config-file' flag + (mutually exclusive). Content of YAML file + with tracing configuration. See format details: + https://thanos.io/tip/thanos/tracing.md/#configuration --tracing.config-file= - Path to YAML file with tracing - configuration. See format details: - https://thanos.io/tip/thanos/tracing.md/#configuration - --version Show application version. + Path to YAML file with tracing + configuration. See format details: + https://thanos.io/tip/thanos/tracing.md/#configuration + --version Show application version. ``` @@ -681,7 +679,7 @@ prefix: "" ``` ```$ mdox-exec="thanos tools bucket mark --help" -usage: thanos tools bucket mark --id=ID --marker=MARKER --details=DETAILS +usage: thanos tools bucket mark --id=ID --marker=MARKER [] Mark block for deletion or no-compact in a safe way. NOTE: If the compactor is currently running compacting same block, this operation would be potentially a @@ -705,6 +703,7 @@ Flags: Path to YAML file that contains object store configuration. See format details: https://thanos.io/tip/thanos/storage.md/#configuration + --remove Remove the marker. --tracing.config= Alternative to 'tracing.config-file' flag (mutually exclusive). Content of YAML file diff --git a/docs/contributing/coding-style-guide.md b/docs/contributing/coding-style-guide.md index 7de71381ec..b50983d62b 100644 --- a/docs/contributing/coding-style-guide.md +++ b/docs/contributing/coding-style-guide.md @@ -283,16 +283,16 @@ NOTE: Why you cannot just allocate slice and release and in new iteration alloca ```go var messages []string for _, msg := range recv { - messages = append(messages, msg) - - if len(messages) > maxMessageLen { - marshalAndSend(messages) - // This creates new array. Previous array - // will be garbage collected only after - // some time (seconds), which - // can create enormous memory pressure. - messages = []string - } + messages = append(messages, msg) + + if len(messages) > maxMessageLen { + marshalAndSend(messages) + // This creates new array. Previous array + // will be garbage collected only after + // some time (seconds), which + // can create enormous memory pressure. + messages = []string + } } ``` @@ -523,26 +523,26 @@ func OpenSomeFileAndDoSomeStuff() (*os.File, error) { ```go func OpenSomeFileAndDoSomeStuff() (f *os.File, err error) { - f, err = os.OpenFile("file.txt", os.O_RDONLY, 0) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - runutil.CloseWithErrCapture(&err, f, "close file") - } - } + f, err = os.OpenFile("file.txt", os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + runutil.CloseWithErrCapture(&err, f, "close file") + } + }() - if err := doStuff1(); err != nil { - return nil, err - } - if err := doStuff2(); err != nil { - return nil, err - } - if err := doStuff232241(); err != nil { - return nil, err - } - return f, nil + if err := doStuff1(); err != nil { + return nil, err + } + if err := doStuff2(); err != nil { + return nil, err + } + if err := doStuff232241(); err != nil { + return nil, err + } + return f, nil } ``` diff --git a/docs/contributing/community.md b/docs/contributing/community.md index bd27c14dcf..a2205ea688 100644 --- a/docs/contributing/community.md +++ b/docs/contributing/community.md @@ -14,7 +14,7 @@ Thanos Community Office Hour (previously Contributor Office Hour) is a public we > NOTE: If no agenda items will be added 1h before the meeting, the meeting will be cancelled. -**Meeting Time:** Every-second Thursday 15:00 CET +**Meeting Time:** Every-second Thursday 14:00 GMT **Calendar Event:** https://www.cncf.io/calendar/ @@ -78,12 +78,10 @@ Schedule: | Month | Shepherd (GitHub handle) | |---------|:-------------------------| -| 2022.13 | TBD | -| 2022.12 | TBD | -| 2022.11 | TBD | -| 2022.10 | TBD | -| 2022.09 | `@bwplotka` | -| 2022.08 | `@bwplotka` | +| 2023.03 | TBD | +| 2023.02 | TBD | +| 2023.01 | TBD | +| 2022.12 | `@bwplotka` | Each month we rotate the role of Community Office Shepherds among Team Members. This role is focused on leading the safe and friendly discussions during our [Community Office Hours](#thanos-community-office-hours). @@ -95,7 +93,7 @@ Each month we rotate the role of Community Office Shepherds among Team Members. The Same Day Morning Before Meeting: -* Add entry for today on the [agenda document](https://docs.google.com/document/d/137XnxfOT2p1NcNUq6NWZjwmtlSdA6Wyti86Pd6cyQhs. +* Add entry for today on the [agenda document](https://docs.google.com/document/d/137XnxfOT2p1NcNUq6NWZjwmtlSdA6Wyti86Pd6cyQhs). * Announce meeting and call for agenda the same day morning on #thanos-dev and Twitter (`@ThanosMetrics` or your own account retweeted by ThanosMetrics). 30 minutes Before: diff --git a/docs/contributing/mentorship.md b/docs/contributing/mentorship.md index 05ebd41de0..5fb0f501ed 100644 --- a/docs/contributing/mentorship.md +++ b/docs/contributing/mentorship.md @@ -6,8 +6,8 @@ Both Thanos and Prometheus projects participate in various, periodic mentoring p Programs we participated / are participating: -- [LFX Mentorship (previously Community Bridge)](https://github.com/cncf/mentoring/tree/master/lfx-mentorship) -- [Google Summer of Code](https://github.com/cncf/mentoring/tree/master/summerofcode) +- [LFX Mentorship (previously Community Bridge)](https://github.com/cncf/mentoring/tree/main/programs/lfx-mentorship) +- [Google Summer of Code](https://github.com/cncf/mentoring/tree/main/programs/summerofcode) - [Red Hat Beyond](https://research.redhat.com/blog/2020/05/24/open-source-development-course-and-devops-methodology/) ## For Mentees diff --git a/docs/getting-started.md b/docs/getting-started.md index 38dda21708..6445f29240 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -79,7 +79,7 @@ If you want to add yourself to this list, let us know! ## Deploying Thanos -* [WIP] Detailed, free, in-browser interactive tutorial [as Killercoda Thanos Course](https://killercoda.com/thanos/) +* Detailed, free, in-browser interactive tutorial [as Killercoda Thanos Course](https://killercoda.com/thanos/) * [Quick Tutorial](quick-tutorial.md) on Thanos website. ## Operating @@ -88,34 +88,52 @@ See up to date [jsonnet mixins](https://github.com/thanos-io/thanos/tree/main/mi ## Talks -* 10.2021: [Adopting Thanos gradually across all of LastPass infrastructures](https://www.youtube.com/watch?v=Ddq8m04594A) -* 12.2020: [Absorbing Thanos Infinite Powers for Multi-Cluster Telemetry](https://www.youtube.com/watch?v=6Nx2BFyr7qQ) -* 12.2020: [Turn It Up to a Million: Ingesting Millions of Metrics with Thanos Receive](https://www.youtube.com/watch?v=5MJqdJq41Ms) -* 02.2018: [Very first Prometheus Meetup Slides](https://www.slideshare.net/BartomiejPotka/thanos-global-durable-prometheus-monitoring) -* 02.2019: [FOSDEM + demo](https://fosdem.org/2019/schedule/event/thanos_transforming_prometheus_to_a_global_scale_in_a_seven_simple_steps/) -* 03.2019: [Alibaba Cloud user story](https://www.youtube.com/watch?v=ZS6zMksfipc) -* 09.2019: [CloudNative Warsaw Slides](https://docs.google.com/presentation/d/1cKpbJY3jIAtr03M-zcNujwBA38_LDj7NqE4LjNfvglE/edit?usp=sharing) -* 11.2019: [CloudNative Deep Dive](https://www.youtube.com/watch?v=qQN0N14HXPM) -* 11.2019: [CloudNative Intro](https://www.youtube.com/watch?v=m0JgWlTc60Q) -* 2019: [Prometheus in Practice: HA with Thanos](https://www.slideshare.net/ThomasRiley45/prometheus-in-practice-high-availability-with-thanos-devopsdays-edinburgh-2019) +* 2022 + * [Story of Correlation: Integrating Thanos Metrics with Observability Signals](https://www.youtube.com/watch?v=rWFb01GW0mQ) + * [Running the Observability As a Service For Your Teams With Thanos](https://www.youtube.com/watch?v=I4Mfyfd_4M8) + * [Monitoring multiple Kubernetes Clusters with Thanos](https://www.youtube.com/watch?v=V4v-c0VeqLw) + * [Thanos: Scaling Prometheus 101](https://www.youtube.com/watch?v=iN6DR28gAyQ) + * [MaaS for the Masses: Build Your Monitoring-as-a-Service Solution With Prometheus](https://www.youtube.com/watch?v=EFPPic9dBS4) + +* 2021 + * [Adopting Thanos gradually across all of LastPass infrastructures](https://www.youtube.com/watch?v=Ddq8m04594A) + * [Using Thanos to gain a unified way to query over multiple clusters](https://www.youtube.com/watch?v=yefffBLuVh0) + * [Thanos: Easier Than Ever to Scale Prometheus and Make It Highly Available](https://www.youtube.com/watch?v=mtwwUqeIHAw) + +* 2020 + * [Absorbing Thanos Infinite Powers for Multi-Cluster Telemetry](https://www.youtube.com/watch?v=6Nx2BFyr7qQ) + * [Turn It Up to a Million: Ingesting Millions of Metrics with Thanos Receive](https://www.youtube.com/watch?v=5MJqdJq41Ms) + * [Thanos: Cheap, Simple and Scalable Prometheus](https://www.youtube.com/watch?v=Wroo1n5GWwg) + * [Thanos: Prometheus at Scale!](https://www.youtube.com/watch?v=q9j8vpgFkoY) + * [Introduction to Thanos](https://www.youtube.com/watch?v=j4TAGO019HU) + * [Using Thanos as a long term storage for your Prometheus metrics](https://www.youtube.com/watch?v=cedzqLgRgaM) + +* 2019 + * [FOSDEM + demo](https://fosdem.org/2019/schedule/event/thanos_transforming_prometheus_to_a_global_scale_in_a_seven_simple_steps/) + * [Alibaba Cloud user story](https://www.youtube.com/watch?v=ZS6zMksfipc) + * [CloudNative Warsaw Slides](https://docs.google.com/presentation/d/1cKpbJY3jIAtr03M-zcNujwBA38_LDj7NqE4LjNfvglE/edit?usp=sharing) + * [CloudNative Deep Dive](https://www.youtube.com/watch?v=qQN0N14HXPM) + * [CloudNative Intro](https://www.youtube.com/watch?v=m0JgWlTc60Q) + * [Prometheus in Practice: HA with Thanos](https://www.slideshare.net/ThomasRiley45/prometheus-in-practice-high-availability-with-thanos-devopsdays-edinburgh-2019) + +* 2018 + * [Very first Prometheus Meetup Slides](https://www.slideshare.net/BartomiejPotka/thanos-global-durable-prometheus-monitoring) ## Blog posts * 2022: - + * [Thanos at Medallia: A Hybrid Architecture Scaled to Support 1 Billion+ Series Across 40+ Data Centers](https://thanos.io/blog/2022-09-08-thanos-at-medallia/) + * [Deploy Thanos Receive with native OCI Object Storage on Oracle Kubernetes Engine](https://medium.com/@lmukadam/deploy-thanos-receive-with-native-oci-object-storage-on-kubernetes-829326ea0bc6) * [Leveraging Consul for Thanos Query Discovery](https://nicolastakashi.medium.com/leveraging-consul-for-thanos-query-discovery-34212d496c88) * 2021: - * [Adopting Thanos at LastPass](https://krisztianfekete.org/adopting-thanos-at-lastpass/) * 2020: - * [Banzai Cloud user story](https://banzaicloud.com/blog/multi-cluster-monitoring/) * [Monitoring the Beat microservices: A tale of evolution](https://build.thebeat.co/monitoring-the-beat-microservices-a-tale-of-evolution-4e246882606e) * 2019: - * [Metric monitoring architecture](https://improbable.io/blog/thanos-architecture-at-improbable) * [Red Hat user story](https://blog.openshift.com/federated-prometheus-with-thanos-receive/) * [HelloFresh blog posts part 1](https://engineering.hellofresh.com/monitoring-at-hellofresh-part-1-architecture-677b4bd6b728) @@ -125,7 +143,6 @@ See up to date [jsonnet mixins](https://github.com/thanos-io/thanos/tree/main/mi * [Thanos via Prometheus Operator](https://kkc.github.io/2019/02/10/prometheus-operator-with-thanos/) * 2018: - * [Introduction blog post](https://improbable.io/blog/thanos-prometheus-at-scale) * [Monzo user story](https://monzo.com/blog/2018/07/27/how-we-monitor-monzo) * [Banzai Cloud hand's on](https://banzaicloud.com/blog/hands-on-thanos/) diff --git a/docs/img/bottleneck-globalsort.png b/docs/img/bottleneck-globalsort.png new file mode 100644 index 0000000000..bc1c59a28f Binary files /dev/null and b/docs/img/bottleneck-globalsort.png differ diff --git a/docs/img/distributed-execution-proposal-1.png b/docs/img/distributed-execution-proposal-1.png new file mode 100644 index 0000000000..f4dbb7c5b8 Binary files /dev/null and b/docs/img/distributed-execution-proposal-1.png differ diff --git a/docs/img/distributed-execution-proposal-2.png b/docs/img/distributed-execution-proposal-2.png new file mode 100644 index 0000000000..3e6e3c0b74 Binary files /dev/null and b/docs/img/distributed-execution-proposal-2.png differ diff --git a/docs/img/distributed-execution-proposal-3.png b/docs/img/distributed-execution-proposal-3.png new file mode 100644 index 0000000000..1e9e520b37 Binary files /dev/null and b/docs/img/distributed-execution-proposal-3.png differ diff --git a/docs/img/distributed-execution-proposal-4.png b/docs/img/distributed-execution-proposal-4.png new file mode 100644 index 0000000000..df613b375d Binary files /dev/null and b/docs/img/distributed-execution-proposal-4.png differ diff --git a/docs/img/distributed-execution-proposal-5.png b/docs/img/distributed-execution-proposal-5.png new file mode 100644 index 0000000000..ce333fb6df Binary files /dev/null and b/docs/img/distributed-execution-proposal-5.png differ diff --git a/docs/img/distributed-execution-proposal-6.png b/docs/img/distributed-execution-proposal-6.png new file mode 100644 index 0000000000..1624d300f1 Binary files /dev/null and b/docs/img/distributed-execution-proposal-6.png differ diff --git a/docs/img/globalsort-nonoptimized.png b/docs/img/globalsort-nonoptimized.png new file mode 100644 index 0000000000..0c81e2e45b Binary files /dev/null and b/docs/img/globalsort-nonoptimized.png differ diff --git a/docs/img/globalsort-optimized.png b/docs/img/globalsort-optimized.png new file mode 100644 index 0000000000..7085f9c229 Binary files /dev/null and b/docs/img/globalsort-optimized.png differ diff --git a/docs/img/rueidis-client-side.png b/docs/img/rueidis-client-side.png new file mode 100644 index 0000000000..f28b54d221 Binary files /dev/null and b/docs/img/rueidis-client-side.png differ diff --git a/docs/operating/cross-cluster-tls-communication.md b/docs/operating/cross-cluster-tls-communication.md index 81f22cf752..77d370f7f5 100644 --- a/docs/operating/cross-cluster-tls-communication.md +++ b/docs/operating/cross-cluster-tls-communication.md @@ -72,15 +72,15 @@ metadata: - '--http-address=0.0.0.0:10902' - '--query.replica-label=replica' - >- - --store=dnssrv+_grpc._tcp.thanos-global-test-storegateway.thanos-global.svc.cluster.local + --endpoint=dnssrv+_grpc._tcp.thanos-global-test-storegateway.thanos-global.svc.cluster.local - >- - --store=dnssrv+_grpc._tcp.thanos-global-test-sidecar.thanos-global.svc.cluster.local + --endpoint=dnssrv+_grpc._tcp.thanos-global-test-sidecar.thanos-global.svc.cluster.local - >- - --store=dnssrv+_grpc._tcp.thanos-global-test-ruler.thanos-global.svc.cluster.local + --endpoint=dnssrv+_grpc._tcp.thanos-global-test-ruler.thanos-global.svc.cluster.local - >- - --store=dnssrv+_[port_name]._tcp.[service-name].[namespace].svc.cluster.local + --endpoint=dnssrv+_[port_name]._tcp.[service-name].[namespace].svc.cluster.local - >- - --store=dnssrv+_[port_name_2]._tcp.[service-name].[namespace].svc.cluster.local + --endpoint=dnssrv+_[port_name_2]._tcp.[service-name].[namespace].svc.cluster.local ports: - name: http containerPort: 10902 diff --git a/docs/proposals-accepted/202012-receive-split.md b/docs/proposals-accepted/202012-receive-split.md index 82d6687247..abcffd685f 100644 --- a/docs/proposals-accepted/202012-receive-split.md +++ b/docs/proposals-accepted/202012-receive-split.md @@ -52,7 +52,7 @@ This allows us to (optionally) model deployment in a way that avoid expensive re In comparison to previous proposal (as mentioned in [alternatives](#previous-proposal-separate-receive-route-command) we have big advantages: -1. We can *reduce number of components* in Thanos system, we can reuse similar component flags and documentation. Users has to learn about one less command and in result Thanos design is much more approachable. Less components mean less maintainance, code and other implicit duties: Separate changelogs, issue confusions, boilerplates, etc. +1. We can *reduce number of components* in Thanos system, we can reuse similar component flags and documentation. Users has to learn about one less command and in result Thanos design is much more approachable. Less components mean less maintenance, code and other implicit duties: Separate changelogs, issue confusions, boilerplates, etc. 2. Allow consistent pattern with Query. We don't have separate StoreAPI component for proxying, we have that baked into Querier. This has been proven to be flexible and understandable, so I would like to propose similar pattern in Receiver. 3. This is more future proof for potential advanced cases like *chain of routers -> receivers -> routers -> receivers* for federated writes, so ***trees with depth n***. diff --git a/docs/proposals-accepted/202106-automated-per-endpoint-mTLS.md b/docs/proposals-accepted/202106-automated-per-endpoint-mTLS.md index 9c2e9397a5..c3f9b3cf73 100644 --- a/docs/proposals-accepted/202106-automated-per-endpoint-mTLS.md +++ b/docs/proposals-accepted/202106-automated-per-endpoint-mTLS.md @@ -81,4 +81,4 @@ While reading the *cert_file* on each handshake we can improve the performance b * Implement `--endpoint.config`. * Implement gRPC certificate rotation as designed. * Add e2e tests for above changes. -* We would remove the support for seperate CLI options `--secure`, `--cert`, `--key`, `--caCert`, `--serverName` after few releases. As they are already covered in `--endpoint-config`. +* We would remove the support for separate CLI options `--secure`, `--cert`, `--key`, `--caCert`, `--serverName` after few releases. As they are already covered in `--endpoint-config`. diff --git a/docs/proposals-accepted/202209-receive-tenant-external-labels.md b/docs/proposals-accepted/202209-receive-tenant-external-labels.md new file mode 100644 index 0000000000..b36c668151 --- /dev/null +++ b/docs/proposals-accepted/202209-receive-tenant-external-labels.md @@ -0,0 +1,86 @@ +--- +type: proposal +title: Allow statically specifying tenant-specific external labels in Receivers +status: accepted +owner: haanhvu +menu: proposals-accepted +--- + +## 1 Related links/tickets + +https://github.com/thanos-io/thanos/issues/5434 + +## 2 Why + +We would like to do cross-tenant activities like grouping tenants' blocks or querying tenants that share the same attributes. Tenant's external labels can help us do those. + +## 3 Pitfalls of the current solution + +Currently, we can only add external labels to Receiver itself, not to each tenant in the Receiver. So we can't do cross-tenant activities like grouping tenants' blocks or querying tenants that share the same attributes. + +## 4 Goals + +* Allow users to statically add arbitrary tenants’ external labels in the easiest way possible +* Allow users to statically change arbitrary tenants’ external labels in the easiest way possible +* Changes in tenants’ external labels are handled correctly +* Backward compatibility (e.g., with Receiver’s external labels) is assured +* Tenants’ external labels are handled separately in RouterIngestor, RouterOnly, and IngestorOnly modes + +## 5 Non-goals + +* Logically split RouterOnly and IngestorOnly modes (Issue [#5643](https://github.com/thanos-io/thanos/issues/5643)): If RouterOnly and IngestorOnly modes are logically split, implementing tenants’ external labels in RouterOnly and IngestorOnly modes would be less challenging. However, fixing this issue will not be a goal of this proposal, because it's not directly related to tenants' external labels. Regardless, fixing this issue before implementing tenants’ external labels in RouterOnly and IngestorOnly modes would be the best-case scenario. +* Dynamically extract tenants' external labels from time series' data: This proposal only covers statically specifying tenants' external labels. Dynamically receiving and extracting tenants' external labels from time series' data will be added as a follow-up to this proposal. + +## 6 Audience + +Users who are admin personas and need to perform admin operations on Thanos for multiple tenants + +## 7 How + +In the hashring config, there will be new field `external_labels`. Something like this: + +``` + [ + { + "hashring": "tenant-a-b", + "endpoints": ["127.0.0.1:10901"], + "tenants": ["tenant-a, tenant-b"] + "external_labels": ["key1=value1", "key2=value2", "key3=value3"] + }, + ] +``` + +In Receivers' MultiTSDB, external labels will be extended to each corresponding tenant's label set when the tenant's TSDB is started. + +Next thing we have to do is handling changes for tenants' external labels. That is, whenever users make any changes to tenants' external labels, Receivers' MultiTSDB will update those changes in each corresponding tenant's label set. + +We will handle the cases of hard tenancy first. Once tenants' external labels can be handled in those cases, we will move to soft tenancy cases. + +Tenants’ external labels will be first implemented in RouterIngestor, since this is the most commonly used mode. + +After that, we can implement tenants’ external labels in RouterOnly and IngestorOnly modes. As stated above, the best-case scenario would be logically splitting RouterOnly and IngestorOnl (Issue [#5643](https://github.com/thanos-io/thanos/issues/5643)) before implement tenants’ external labels in each. + +For the tests, the foremost ones are testing defining one or multiple tenants’ external labels correctly, handling changes in tenants’ external labels correctly, backward compatibility with Receiver’s external labels, and shipper detecting and uploading tenants’ external labels correctly to block storage. We may add more tests in the future but currently these are the most important ones to do first. + +## 8 Implementation plan + +* Add a new `external_labels` field in the hashring config +* Allow MultiTSDB to extend external labels to each corresponding tenant's label set +* Allow MultiTSDB to update each tenant's label set whenever its external labels change +* Handle external labels in soft tenancy cases +* Implement tenants’ external labels in RouterOnly +* Implement tenants’ external labels in IngestorOnly + +### 9 Test plan + +* Defining one or multiple tenants’ external labels correctly +* Handling changes in tenants’ external labels correctly +* Backward compatibility with Receiver’s external labels +* Shipper detecting and uploading tenants’ external labels correctly to block storage + +### 10 Follow-up + +* Dynamically extract tenants' external labels from time series' data: Once statically specifying tenants' external labels have been implemented and tested successfully and completely, we can think of implementing dynamically receiving and extracting tenants' external labels from time series' data. +* Automatically making use of tenants' external labels: We can think of the most useful use cases with tenants' external labels and whether we should automate any of those use cases. One typical case is automatically grouping new blocks based on tenants' external labels. + +(Both of these are Ben's ideas expressed [here](https://github.com/thanos-io/thanos/pull/5720#pullrequestreview-1167923565).) diff --git a/docs/proposals-accepted/20221129-avoid-global-sort.md b/docs/proposals-accepted/20221129-avoid-global-sort.md new file mode 100644 index 0000000000..19de5d88b5 --- /dev/null +++ b/docs/proposals-accepted/20221129-avoid-global-sort.md @@ -0,0 +1,183 @@ +## Avoid Global Sort on Querier Select + +* **Owners:** + * @bwplotka, @fpetkovski + +* **Related Tickets:** + * https://github.com/thanos-io/thanos/issues/5719 + * https://github.com/thanos-io/thanos/commit/043c5bfcc2464d3ae7af82a1428f6e0d6510f020 + * https://github.com/thanos-io/thanos/pull/5796 also alternatives (https://github.com/thanos-io/thanos/pull/5692) + +> TL;DR: We propose solution that allows saving query and query_range latency on common setups when deduplication on and data replication. Initial benchmarks indicate ~20% latency improvement for data replicated 2 times. +> +> To make it work we propose adding field to Store API Series call "WithoutReplicaLabels []string", guarded by "SupportsWithoutReplicaLabels" field propagated via Info API. It allows telling store implementations to remove given labels (if they are replica labels) from result, preserving sorting by labels after the removal. +> +> NOTE: This change will break unlikely setups that deduplicate on non-replica label (misconfiguration or wrong setup). + +## Glossary + +**replica**: We use term "replica labels" as a subset of (or equal to) "external labels": Labels that indicate unique replication group for our data, usually taken from the metadata about origin/source. + +## Why + +Currently, we spent a lof of storage selection CPU time on resorting resulting time series needed for deduplication (exactly in [`sortDedupLabels`](https://github.com/thanos-io/thanos/blob/main/pkg/query/querier.go#L400)). However, given distributed effort and current sorting guarantees of StoreAPI there is potential to reduce sorting effort or/and distribute it to leafs or multiple threads. + +### Pitfalls of the current solution + +Current flow can be represented as follows: + +![img.png](../img/bottleneck-globalsort.png) + +1. Querier PromQL Engine selects data. At this point we know if users asked for deduplicated data or not and [what replica labels to use](https://thanos.io/tip/components/query.md/#deduplication-replica-labels). +2. Querier selection asks internal, in-process Store API which is represented by Proxy code component. It asks relevant store API for data, using StoreAPI.Series. +3. Responses are pulled and k-way merged by the time series. StoreAPI guarantees the responses are sorted by series and the external labels (including replica) are included in the time series. +* There was a [bug in receiver](https://github.com/thanos-io/thanos/commit/043c5bfcc2464d3ae7af82a1428f6e0d6510f020#diff-b3f73a54121d88de203946e84955da7027e3cfce7f0cd82580bf215ac57c02f4) that caused series to be not sorted when returned. Fixed in v0.29.0. +4. Querier selection waits until all responses are buffered and then it deduplicates the data, given the requested replica labels. Before it's done it globally sort data with moving replica label at the end of the time series in `sortDedupLabels`. +5. Data is deduplicated using `dedup` package. + +The pittfall is in the fact that global sort can be in many cases completely avoided, even when deduplication is enabled. Many storeAPIs can drop certain replica labels without need to resort and others can k-way merge different data sets without certain replica labels without extra effort. + +## Goals + +Goals and use cases for the solution as proposed in [How](#how): + +* Avoid expensive global sort of all series before passing them to PromQL engine in Querier. +* Allow StoreAPI implementation to announce if it supports sorting feature or not. The rationale is that we want to make it possible to create simpler StoreAPI servers, if operator wants to trade-off it with latency. +* Clear the behaviour in tricky cases when there is an overlap of replica labels between what's in TSDB vs what's attached as external labels. +* Ensure this change can be rolled out in compatible way. + +## Non-Goals + +* Allow consuming series in streamed way in PromQL engine. + * While this pitfall (global sort) blocks the above idea, it's currently still more beneficial to pull all series upfront (eager approach) as soon as possible. This is due to current PromQL architecture which requires info upfront for query planners and execution. We don't plan to change it yet, thus no need to push explicitly for that. + +## How + +### Invariants + +To understand proposal, let's go through important, yet perhaps not trivial, facts: + +* For StoreAPI or generally data that belongs to one replica, if you exclude certain replica label during sort, it does not impact sorting order for returned series. This means, any feature that desired different sort for replicated series is generally noop for sidecars, rules, single tenant receiver or within single block (or one stream of blocks). +* You can't stream sorting of unsorted data. Furthermore, it's not possible to detect that data is unsorted, unless we fetch and buffer all series. +* In v0.29 and below, you can deduplicate on any labels, including non replicas. This is assumed semantically wrong, yet someone might depend on it. +* Thanos never handled overlap of chunks within one set of store API response. + +### Solution + +To avoid global sort, we propose removing required replica labels and sort on store API level. + +For the first step (which is required for compatibility purposes anyway), we propose a logic in proxy Store API implementation that when deduplication is requested with given replica labels will: + +* Fallback to eager retrieval. +* Remove given labels from series (this is can remove non-replica labels too, same as it is possible now). +* Resort all series (just on local level). + +Thanks of that the k-way merge will sort based on series without replica labels that will allow querier dedup to be done in streaming way without global sort and replica label removal. + +As the second step we propose adding `without_replica_labels` field to `SeriesResponse` proto message of Store API: + +```protobuf +message SeriesRequest { + // ... + + // without_replica_labels are replica labels which have to be excluded from series set results. + // The sorting requirement has to be preserved, so series should be sorted without those labels. + // If the requested label is NOT a replica label (labels that identify replication group) it should be not affected by + // this setting (label should be included in sorting and response). + // It is the server responsibility to detect and track what is replica label and what is not. + // This allows faster deduplication by clients. + // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows + // server supports it. + repeated string without_replica_labels = 14; +``` + +Since it's a new field, for compatibility we also propose adding `supports_without_replica_labels` in InfoAPI to indicate a server supports it explicitly. + +```protobuf +// StoreInfo holds the metadata related to Store API exposed by the component. +message StoreInfo { + reserved 4; // Deprecated send_sorted, replaced by supports_without_replica_labels now. + + int64 min_time = 1; + int64 max_time = 2; + bool supports_sharding = 3; + + // supports_without_replica_labels means this store supports without_replica_labels of StoreAPI.Series. + bool supports_without_replica_labels = 5; +} +``` + +Thanks of that implementations can optionally support this feature. We can make all Thanos StoreAPI support it, which will allow faster deduplication queries on all types of setups. + +In the initial tests we see 60% improvements on my test data (8M series block, requests for ~200k series) with querier and store gateway. + +Without this change: + +![1](../img/globalsort-nonoptimized.png) + +After implementing this proposal: + +![2](../img/globalsort-optimized.png) + +## Alternatives + +1. Version StoreAPI. + +As a best practice gRPC services should be versioned. This should allow easier iterations for everybody implementing or using it. However, having multiple versions (vs extra feature enablement field) might make client side more complex, so we propose to postpone it. + +2. Optimization: Add "replica group" as another message in `SeriesResponse` + +Extra slice in all Series might feel redundant, given all series are always grouped within the same replica. Let's do this once we see it being a bottleneck (will require change in StoreAPI version). + +3. Instead of removing some replica labels, just sort without them and leave at the end. + +For debugging purposes we could keep the replica labels we want to dedup on at the end of label set. + +This might however be less clean way of providing better debuggability, which is not yet required. + +Cons: +* Feels hacky. Proper way for preserving this information would be like alternative 4. +* Debuggability might be not needed here - YAGNI + +4. Replica label struct + +We could make Store API response fully replica aware. This means that series response will now include an extra slice of replica labels that this series belongs to: + +```protobuf +message Series { + repeated Label labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel"]; + repeated Label replica_labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel"]; // Added. + + repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; +} +``` + +Pros: +* Easy to tell what is replica what's not on client of Store API level + +Cons: +* Extra code and protobuf complexity +* Semantics of replica labels are hard to maintain when partial deduplication is configured (we only dedup by part of replica labels, not by all of them). This dynamic policy makes it hard to have clean response with separation of replica labels (i.e. should included replica labels be in "labels" or "replica labels")? + +This might be not needed for now. We can add more awareness of replication later on. + +## Action Plan + +The tasks to do in order to migrate to the new idea. + +* [X] Merging the PR with the proposal (also includes implementation) +* [X] Add support for `without_replica_label` to other store API servers. +* [ ] Move to deduplicate over chunks from series See [TODO in querier.go:405](../../pkg/query/querier.go) + +```go +// TODO(bwplotka): Move to deduplication on chunk level inside promSeriesSet, similar to what we have in dedup.NewDedupChunkMerger(). +// This however require big refactor, caring about correct AggrChunk to iterator conversion, pushdown logic and counter reset apply. +// For now we apply simple logic that splits potential overlapping chunks into separate replica series, so we can split the work. +set := &promSeriesSet{ + mint: q.mint, + maxt: q.maxt, + set: dedup.NewOverlapSplit(newStoreSeriesSet(resp.seriesSet)), + aggrs: aggrs, + warns: warns, +} +``` diff --git a/docs/proposals-accepted/202301-distributed-query-execution.md b/docs/proposals-accepted/202301-distributed-query-execution.md new file mode 100644 index 0000000000..c4d6984df7 --- /dev/null +++ b/docs/proposals-accepted/202301-distributed-query-execution.md @@ -0,0 +1,197 @@ +--- +type: proposal +title: Distributed Query Execution +status: accepted +owner: fpetkovski +menu: proposals-accepted +--- + +## 1 Related links/tickets + +* https://github.com/thanos-io/thanos/pull/5250 +* https://github.com/thanos-io/thanos/pull/4917 +* https://github.com/thanos-io/thanos/pull/5350 +* https://github.com/thanos-community/promql-engine/issues/25 + +## 2 Why + +Thanos Queriers currently need to pull in all data from Stores in memory before they can start evaluating a query. This has a large impact on the used memory inside a single querier, and drastically increases query execution latency. + +Even when a Querier is connected to other Queriers, it will still pull raw series instead of delegating parts of the execution to its downstreams. This document proposes a mode in the Thanos Querier where it will dispatch parts of the execution plan to different, independent Queriers. + +## 3 Pitfalls of current solutions + +We have two mechanisms in Thanos to distribute queries among different components. + +Query pushdown is a mechanism enabled by query hints which allows a Thanos sidecar to execute certain queries against Prometheus as part of a `Series` call. Since data is usually replicated in at least two Prometheus instances, the subset of queries that can be pushed down is quite limited. In addition to that, this approach has introduced additional complexity in the deduplication iterator to allow the Querier to distinguish between storage series and PromQL series. + +Query Sharding is a execution method initiated by Query Frontend and allows for an aggregation query with grouping labels to be distributed to different Queriers. Even though the number of queries that can be sharded is larger than the ones that can be pushed down, query sharding still has a limited applicability since a query has to contain grouping labels. We have also noticed in practice that the execution latency does not fall linearly with the number of vertical shards, and often plateaus off at around ~4 shards. This is especially pronounced when querying data from Store Gateways, likely due to amplifying `Series` calls against Store components. + +## 4 Audience + +* Thanos users who have challenges with evaluating PromQL queries due to high cardinality. + +## 5 Goals + +* Enable decentralized query execution by delegating query plan fragments to independent Queriers. + +## 6 Proposal + +The key advantage of distributed execution is the fact that the number of series is drastically reduced when a query contains an aggregation operator (`sum`, `group`, `max`, etc..). Most (if not all) high cardinality PromQL queries are in-fact aggregations since users will struggle to sensibly visualise more than a handful of series. + +We therefore propose an execution model that allows running a Thanos Querier in a mode where it transforms a query to subqueries which are delegated to independent Queriers, and a central aggregation that is executed locally on the result of all subqueries. A simple example of this transformation is a `sum(rate(metric[2m]))` expression which can be transformed as + +``` +sum( + coalesce( + sum(rate(metric[2m])), + sum(rate(metric[2m])) + ) +) +``` + +### How + +The proposed method of transforming the query is extending the Thanos Engine with a logical optimizer that has references to other query engines. An example API could look as follows: + +``` +type DistributedExecutionOptimizer struct { + Endpoints api.RemoteEndpoints +} + +type RemoteEndpoints interface { + Engines() []RemoteEngine +} + +type RemoteEngine interface { + NewInstantQuery(opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) + NewRangeQuery(opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) +} +``` + +The implementation of the `RemoteEngine` will be provided by Thanos itself and will use the gRPC Query API added in [https://github.com/thanos-io/thanos/pull/5250](https://github.com/thanos-io/thanos/pull/5250). + +Keeping PromQL execution in Query components allows for deduplication between Prometheus pairs to happen before series are aggregated. + +Distributed query execution + +The initial version of the solution can be found here: https://github.com/thanos-community/promql-engine/pull/139 + +### Query rewrite algorithm + +As described in the section above, the query will be rewritten using a logical optimizer into a form that is suitable for distributed execution. + +The proposed algorithm is as follows: +* Start AST traversal from the bottom up. +* If both the current node and its parent can be distributed, move up to the parent. +* If the current node can be distributed and its parent cannot, rewrite the current node into its distributed form. +* If the current node cannot be distributed, stop traversal. + +With this algorithm we try to distribute as much of the PromQL query as possible. Furthermore, even queries without aggregations, like `rate(http_requests_total[2m])`, will be rewritten into + +``` +coalesce( + rate(http_requests_total[2m]), + rate(http_requests_total[2m]) +) +``` + +Since PromQL queries are limited in the number of steps they can evaluate, with this algorithm we achieve downsampling at query time since only a small number of samples will be sent from local Queriers to the central one. + +### Time-based overlap resolution + +Thanos stores usually have a small overlap with ingestion components (Prometheus/Receiver) due to eventually consistency from uploading and downloading TSDB blocks. As a result, the central aggregation needs a way to deduplicate samples between ingestion and storage components. + +The proposed way to do time-based deduplication is by removing identical samples in the `coalesce` operator in the Thanos Engine itself. In order for data from independent Queriers to not get deduplicated, aggregations happening in remote engines must always preserve external labels from TSDB blocks that are being queried. + +To illustrate this on an example, we can assume that we have two clusters `a` and `b`, each being monitored with a Prometheus pair and with each Prometheus instance having an external `cluster` label. The query `sum(rate(metric[2m]))` would then be rewritten by the optimizer into: + +``` +sum( + coalesce( + sum by (cluster) (rate(metric[2m])), + sum by (cluster) (rate(metric[2m])) + ) +) +``` + +Each subquery would preserve the external `cluster` label which will allow the `coalesce` operator to deduplicate only those samples which are calculated from the same TSDB blocks. External labels can be propagated to the central engine by extending the `RemoteEngine` interface with a `Labels() []string` method. With this approach, local Queriers can be spread as widely as needed, with the extreme case of having one Querier per deduplicated TSDB block. + +Distributed query execution + +## Deployment models + +With this approach, a Thanos admin can arrange remote Queriers in an arbitrary way, as long as TSDB replicas are always queried by only one remote Querier. The following deployment models can be used as examples: + +#### Monitoring different environments with Prometheus pairs + +In this deployment mode, remote queriers are attached to pairs of Prometheus instances. The central Querier delegates subqueries to them and performs a central aggregation of results. + +Distributed query execution + +#### Querying separate Store Gateways and Prometheus pairs + +Remote Queriers can be attached to Prometheus pairs and Store Gateways at the same time. The central querier delegates subqueries and deduplicates overlapping results before performing a central aggregation. + +Distributed query execution + +#### Running remote Queriers as Store Gateway sidecars + +Remote Queriers can be attached to disjoint groups of Store Gateways. They can even be attached to individual Store Gateways which have deduplicated TSDB blocks, or hold all replicas of a TSDB block. This will make sure penalty-based deduplication happens in the remote querier. + +Store groups can be created by either partitioning TSDBs by time (time-based partitioning), or by external labels. Both of these techniques are documented in the [Store Gateway documentation](https://thanos.io/tip/components/store.md/#time-based-partitioning). + +Distributed query execution + +### Distributed execution against Receive components + +We currently lack the mechanism to configure a Querier against a subset of TSDBs, unless that Querier is exclusively attached to Stores that have those TSDBs. In the case of Receivers, TSDBs are created and pruned dynamically, which makes it hard to apply the distributed query model against this component. + +To resolve this issue, this proposal suggests adding a `"selector.relabel-config` command-line flag to the Query component that will work the same way as the Store Gateway selector works. For each query, the Querier will apply the given relabel config against each Store's external label set and decide whether to keep or drop a TSDB from the query. After the relabeling is applied, the query will be rewritten to target only those TSDBs that match the selector. + +An example config that only targets TSDBs with external labels `tenant=a` would be: + +``` +- source_labels: [tenant] + action: keep + regex: a +``` + +With this mechanism, a user can run a pool of Queriers with a selector config as follows: + +``` +- source_labels: [ext_label_a, ext_label_b] + action: hashmod + target_label: query_shard + modulus: ${query_shard_replicas} +- action: keep + source_labels: [query_shard] + regex: ${query_shard_instance} +``` + + + +This approach can also be used to create Querier shards against Store Gateways, or any other pool of Store components. + +## 7 Alternatives + +A viable alternative to the proposed method is to add support for Query Pushdown in the Thanos Querier. By extracting better as described in https://github.com/thanos-io/thanos/issues/5984, we can decide to execute a query in a local Querier, similar to how the sidecar does that against Prometheus. + +Even though this approach might be faster to implement, it might not be the best long-term solution due to several reasons. To some extent, Query Pushdown misuses the `Series` API and the Querier requesting series is not aware that the query was actually executed. This can be problematic for distributing something like `count(metric)` since the distributed version should end up as: + +``` +sum( + coalesce( + count(metric), + count(metric) + ) +) +``` + +The root querier would need to know that downstream queriers have already executed the `count` and should convert the aggregation into a `sum` + +A similar problem can happen with a `sum(rate(metric[2m]))` expression where downstream queriers calculate the `sum` over the metric's `rate`. In order for the values to not get rated twice, either the downstream queriers need to invert the rate into a cumulative value, or the central querier needs to omit the rate and only calcualte the sum. + +Managing this complexity in Thanos itself seems error prone and hard to maintain over time. As a result, this proposal suggests to localize the complexity into a single logical optimizer as suggested in the sections above. + +Depending on the success of the distributed execution model, we can also fully deprecate query pushdown and query sharding and replace them with a single mechanism that can evolve and improve over time. diff --git a/docs/quick-tutorial.md b/docs/quick-tutorial.md index e77caf0fc3..6c90a3d8fc 100644 --- a/docs/quick-tutorial.md +++ b/docs/quick-tutorial.md @@ -1,98 +1,108 @@ # Quick Tutorial -Feel free to check the free, in-browser interactive tutorial [as Killercoda Thanos Course](https://killercoda.com/thanos). We will be progressively updating our Katacoda Course with more scenarios. +Check out the free, in-browser interactive tutorial [Killercoda Thanos course](https://killercoda.com/thanos). We will be progressively updating our Killercoda course with more scenarios. -On top of this feel free to go through our tutorial presented here: +On top of this, find our quick tutorial below. ## Prometheus -Thanos is based on Prometheus. With Thanos you use more or less Prometheus features depending on the deployment model, however Prometheus always stays as integral foundation for *collecting metrics* and alerting using local data. +Thanos is based on Prometheus. With Thanos, Prometheus always remains as an integral foundation for collecting metrics and alerting using local data. -Thanos bases itself on vanilla [Prometheus](https://prometheus.io/) (v2.2.1+). We plan to support *all* Prometheus version beyond this version. +Thanos bases itself on vanilla [Prometheus](https://prometheus.io/). We plan to support *all* Prometheus versions beyond v2.2.1. -NOTE: It is highly recommended to use Prometheus v2.13+ due to Prometheus remote read improvements. +NOTE: It is highly recommended to use Prometheus v2.13.0+ due to its remote read improvements. -Always make sure to run Prometheus as recommended by Prometheus team, so: +Always make sure to run Prometheus as recommended by the Prometheus team: -* Put Prometheus in the same failure domain. This means same network, same datacenter as monitoring services. -* Use persistent disk to persist data across Prometheus restarts. +* Put Prometheus in the same failure domain. This means in the same network and in the same geographic location as the monitored services. +* Use a persistent disk to persist data across Prometheus restarts. * Use local compaction for longer retentions. -* Do not change min TSDB block durations. -* Do not scale out Prometheus unless necessary. Single Prometheus is highly efficient (: +* Do not change the minimum TSDB block durations. +* Do not scale out Prometheus unless necessary. A single Prometheus instance is already efficient. We recommend using Thanos when you need to scale out your Prometheus instance. ## Components -Following the [KISS](https://en.wikipedia.org/wiki/KISS_principle) and Unix philosophies, Thanos is made of a set of components with each filling a specific role. +Following the [KISS](https://en.wikipedia.org/wiki/KISS_principle) and Unix philosophies, Thanos is comprised of a set of components where each fulfills a specific role. * Sidecar: connects to Prometheus, reads its data for query and/or uploads it to cloud storage. * Store Gateway: serves metrics inside of a cloud storage bucket. -* Compactor: compacts, downsamples and applies retention on the data stored in cloud storage bucket. -* Receiver: receives data from Prometheus's remote-write WAL, exposes it and/or upload it to cloud storage. +* Compactor: compacts, downsamples and applies retention on the data stored in the cloud storage bucket. +* Receiver: receives data from Prometheus's remote write write-ahead log, exposes it, and/or uploads it to cloud storage. * Ruler/Rule: evaluates recording and alerting rules against data in Thanos for exposition and/or upload. * Querier/Query: implements Prometheus's v1 API to aggregate data from the underlying components. -* Query Frontend: implements Prometheus's v1 API proxies it to Query while caching the response and optional splitting by queries day. +* Query Frontend: implements Prometheus's v1 API to proxy it to Querier while caching the response and optionally splitting it by queries per day. -Deployment with Sidecar: +Deployment with Thanos Sidecar for Kubernetes: -![Sidecar](https://docs.google.com/drawings/d/e/2PACX-1vTBFKKgf8YDInJyRakPE8eZZg9phTlOsBB2ogNkFvhNGbZ8YDvz_cGMbxWZBG1G6hpsQfSX145FpYcv/pub?w=960&h=720) + -Deployment with Receive: +![Sidecar](https://docs.google.com/drawings/d/e/2PACX-1vSJd32gPh8-MC5Ko0-P-v1KQ0Xnxa0qmsVXowtkwVGlczGfVW-Vd415Y6F129zvh3y0vHLBZcJeZEoz/pub?w=960&h=720) -![Receive](https://docs.google.com/drawings/d/e/2PACX-1vTfko27YB_3ab7ZL8ODNG5uCcrpqKxhmqaz3lW-yhGN3_oNxkTrqXmwwlcZjaWf3cGgAJIM4CMwwkEV/pub?w=960&h=720) +Deployment via Receive in order to scale out or integrate with other remote write-compatible sources: + + + +![Receive](https://docs.google.com/drawings/d/e/2PACX-1vRdYP__uDuygGR5ym1dxBzU6LEx5v7Rs1cAUKPsl5BZrRGVl5YIj5lsD_FOljeIVOGWatdAI9pazbCP/pub?w=960&h=720) ### Sidecar -Thanos integrates with existing Prometheus servers through a [Sidecar process](https://docs.microsoft.com/en-us/azure/architecture/patterns/sidecar#solution), which runs on the same machine or in the same pod as the Prometheus server. +Thanos integrates with existing Prometheus servers as a [sidecar process](https://docs.microsoft.com/en-us/azure/architecture/patterns/sidecar#solution), which runs on the same machine or in the same pod as the Prometheus server. -The purpose of the Sidecar is to backup Prometheus data into an Object Storage bucket, and give other Thanos components access to the Prometheus metrics via a gRPC API. +The purpose of Thanos Sidecar is to back up Prometheus's data into an object storage bucket, and give other Thanos components access to the Prometheus metrics via a gRPC API. -The Sidecar makes use of the `reload` Prometheus endpoint. Make sure it's enabled with the flag `--web.enable-lifecycle`. +Sidecar makes use of Prometheus's `reload` endpoint. Make sure it's enabled with the flag `--web.enable-lifecycle`. -[Component sidecar documentation](components/sidecar.md) +[Sidecar component documentation](components/sidecar.md) -### External storage +### External Storage -The following configures the sidecar to write Prometheus's data into a configured object storage: +The following configures Sidecar to write Prometheus's data into a configured object storage bucket: ```bash thanos sidecar \ --tsdb.path /var/prometheus \ # TSDB data directory of Prometheus - --prometheus.url "http://localhost:9090" \ # Be sure that the sidecar can use this url! + --prometheus.url "http://localhost:9090" \ # Be sure that Sidecar can use this URL! --objstore.config-file bucket_config.yaml \ # Storage configuration for uploading data ``` -The format of YAML file depends on the provider you choose. Examples of config and up-to-date list of storage types Thanos supports is available [here](storage.md). +The exact format of the YAML file depends on the provider you choose. Configuration examples and an up-to-date list of the storage types that Thanos supports are available [here](storage.md). -Rolling this out has little to zero impact on the running Prometheus instance. It is a good start to ensure you are backing up your data while figuring out the other pieces of Thanos. +Rolling this out has little to no impact on the running Prometheus instance. This allows you to ensure you are backing up your data while figuring out the other pieces of Thanos. If you are not interested in backing up any data, the `--objstore.config-file` flag can simply be omitted. * *[Example Kubernetes manifests using Prometheus operator](https://github.com/coreos/prometheus-operator/tree/master/example/thanos)* -* *[Example Deploying sidecar using official Prometheus Helm Chart](../tutorials/kubernetes-helm/README.md)* +* *[Example Deploying Sidecar using official Prometheus Helm Chart](../tutorials/kubernetes-helm/README.md)* * *[Details & Config for other object stores](storage.md)* ### Store API -The Sidecar component implements and exposes a gRPC *[Store API](https://github.com/thanos-io/thanos/blob/main/pkg/store/storepb/rpc.proto#L27)*. The sidecar implementation allows you to query the metric data stored in Prometheus. +The Sidecar component implements and exposes a gRPC *[Store API](https://github.com/thanos-io/thanos/blob/main/pkg/store/storepb/rpc.proto#L27)*. This implementation allows you to query the metric data stored in Prometheus. -Let's extend the Sidecar in the previous section to connect to a Prometheus server, and expose the Store API. +Let's extend the Sidecar from the previous section to connect to a Prometheus server, and expose the Store API: ```bash thanos sidecar \ --tsdb.path /var/prometheus \ --objstore.config-file bucket_config.yaml \ # Bucket config file to send data to --prometheus.url http://localhost:9090 \ # Location of the Prometheus HTTP server - --http-address 0.0.0.0:19191 \ # HTTP endpoint for collecting metrics on the Sidecar + --http-address 0.0.0.0:19191 \ # HTTP endpoint for collecting metrics on Sidecar --grpc-address 0.0.0.0:19090 # GRPC endpoint for StoreAPI ``` * *[Example Kubernetes manifests using Prometheus operator](https://github.com/coreos/prometheus-operator/tree/master/example/thanos)* -### Uploading old metrics. +### Uploading Old Metrics + +When Sidecar is run with the `--shipper.upload-compacted` flag, it will sync all older existing blocks from Prometheus local storage on startup. -When sidecar is run with the `--shipper.upload-compacted` flag it will sync all older existing blocks from the Prometheus local storage on startup. NOTE: This assumes you never run sidecar with block uploading against this bucket. Otherwise manual steps are needed to remove overlapping blocks from the bucket. Those will be suggested by the sidecar verification process. +NOTE: This assumes you never run the Sidecar with block uploading against this bucket. Otherwise, you must manually remove overlapping blocks from the bucket. Those mitigations will be suggested in the sidecar verification process. ### External Labels @@ -110,31 +120,31 @@ global: ## Querier/Query -Now that we have setup the Sidecar for one or more Prometheus instances, we want to use Thanos' global [Query Layer](components/query.md) to evaluate PromQL queries against all instances at once. +Now that we have setup Sidecar for one or more Prometheus instances, we want to use Thanos's global [Query Layer](components/query.md) to evaluate PromQL queries against all instances at once. -The Query component is stateless and horizontally scalable and can be deployed with any number of replicas. Once connected to the Sidecars, it automatically detects which Prometheus servers need to be contacted for a given PromQL query. +The Querier component is stateless and horizontally scalable, and can be deployed with any number of replicas. Once connected to Thanos Sidecar, it automatically detects which Prometheus servers need to be contacted for a given PromQL query. -Thanos Querier also implements Prometheus's official HTTP API and can thus be used with external tools such as Grafana. It also serves a derivative of Prometheus's UI for ad-hoc querying and stores status. +Thanos Querier also implements Prometheus's official HTTP API and can thus be used with external tools such as Grafana. It also serves a derivative of Prometheus's UI for ad-hoc querying and checking the status of the Thanos stores. -Below, we will set up a Thanos Querier to connect to our Sidecars, and expose its HTTP UI. +Below, we will set up a Thanos Querier to connect to our Sidecars, and expose its HTTP UI: ```bash thanos query \ --http-address 0.0.0.0:19192 \ # HTTP Endpoint for Thanos Querier UI - --store 1.2.3.4:19090 \ # Static gRPC Store API Address for the query node to query - --store 1.2.3.5:19090 \ # Also repeatable - --store dnssrv+_grpc._tcp.thanos-store.monitoring.svc # Supports DNS A & SRV records + --endpiont 1.2.3.4:19090 \ # Static gRPC Store API Address for the query node to query + --endpiont 1.2.3.5:19090 \ # Also repeatable + --endpiont dnssrv+_grpc._tcp.thanos-store.monitoring.svc # Supports DNS A & SRV records ``` -Go to the configured HTTP address that should now show a UI similar to that of Prometheus. If the cluster formed correctly you can now query across all Prometheus instances within the cluster. You can also check the Stores page to check up on your stores. +Go to the configured HTTP address, which should now show a UI similar to that of Prometheus. You can now query across all Prometheus instances within the cluster. You can also check out the Stores page, which shows all of your stores. [Query documentation](components/query.md) -### Deduplicating Data from Prometheus HA pairs +### Deduplicating Data from Prometheus HA Pairs -The Query component is also capable of deduplicating data collected from Prometheus HA pairs. This requires configuring Prometheus's `global.external_labels` configuration block to identify the role of a given Prometheus instance. +The Querier component is also capable of deduplicating data collected from Prometheus HA pairs. This requires configuring Prometheus's `global.external_labels` configuration block to identify the role of a given Prometheus instance. -A typical choice is simply the label name "replica" while letting the value be whatever you wish. For example, you might set up the following in Prometheus's configuration file: +A typical configuration uses the label name "replica" with whatever value you choose. For example, you might set up the following in Prometheus's configuration file: ```yaml global: @@ -147,34 +157,34 @@ global: In a Kubernetes stateful deployment, the replica label can also be the pod name. -Reload your Prometheus instances, and then, in Thanos Querier, we will define `replica` as the label we want to enable deduplication to occur on: +Ensure your Prometheus instances have been reloaded with the configuration you defined above. Then, in Thanos Querier, we will define `replica` as the label we want to enable deduplication on: ```bash thanos query \ --http-address 0.0.0.0:19192 \ - --store 1.2.3.4:19090 \ - --store 1.2.3.5:19090 \ - --query.replica-label replica # Replica label for de-duplication - --query.replica-label replicaX # Supports multiple replica labels for de-duplication + --endpoint 1.2.3.4:19090 \ + --endpoint 1.2.3.5:19090 \ + --query.replica-label replica # Replica label for deduplication + --query.replica-label replicaX # Supports multiple replica labels for deduplication ``` -Go to the configured HTTP address, and you should now be able to query across all Prometheus instances and receive de-duplicated data. +Go to the configured HTTP address, and you should now be able to query across all Prometheus instances and receive deduplicated data. * *[Example Kubernetes manifest](https://github.com/thanos-io/kube-thanos/blob/master/manifests/thanos-query-deployment.yaml)* ### Communication Between Components -The only required communication between nodes is for Thanos Querier to be able to reach gRPC storeAPIs you provide. Thanos Querier periodically calls Info endpoint to collect up-to-date metadata as well as checking the health of given StoreAPI. The metadata includes the information about time windows and external labels for each node. +The only required communication between nodes is for a Thanos Querier to be able to reach the gRPC Store APIs that you provide. Thanos Querier periodically calls the info endpoint to collect up-to-date metadata as well as check the health of a given Store API. That metadata includes the information about time windows and external labels for each node. -There are various ways to tell query component about the StoreAPIs it should query data from. The simplest way is to use a static list of well known addresses to query. These are repeatable so can add as many endpoint as needed. You can put DNS domain prefixed by `dns+` or `dnssrv+` to have Thanos Querier do an `A` or `SRV` lookup to get all required IPs to communicate with. +There are various ways to tell Thanos Querier about the Store APIs it should query data from. The simplest way is to use a static list of well known addresses to query. These are repeatable, so you can add as many endpoints as you need. You can also put a DNS domain prefixed by `dns+` or `dnssrv+` to have a Thanos Querier do an `A` or `SRV` lookup to get all the required IPs it should communicate with. ```bash thanos query \ --http-address 0.0.0.0:19192 \ # Endpoint for Thanos Querier UI --grpc-address 0.0.0.0:19092 \ # gRPC endpoint for Store API - --store 1.2.3.4:19090 \ # Static gRPC Store API Address for the query node to query - --store 1.2.3.5:19090 \ # Also repeatable - --store dns+rest.thanos.peers:19092 # Use DNS lookup for getting all registered IPs as separate StoreAPIs + --endpoint 1.2.3.4:19090 \ # Static gRPC Store API Address for the query node to query + --endpoint 1.2.3.5:19090 \ # Also repeatable + --endpoint dns+rest.thanos.peers:19092 # Use DNS lookup for getting all registered IPs as separate Store APIs ``` Read more details [here](service-discovery.md). @@ -183,7 +193,7 @@ Read more details [here](service-discovery.md). ## Store Gateway -As the sidecar backs up data into the object storage of your choice, you can decrease Prometheus retention and store less locally. However we need a way to query all that historical data again. The store gateway does just that by implementing the same gRPC data API as the sidecars but backing it with data it can find in your object storage bucket. Just like sidecars and query nodes, the store gateway exposes StoreAPI and needs to be discovered by Thanos Querier. +As Thanos Sidecar backs up data into the object storage bucket of your choice, you can decrease Prometheus's retention in order to store less data locally. However, we need a way to query all that historical data again. Store Gateway does just that, by implementing the same gRPC data API as Sidecar, but backing it with data it can find in your object storage bucket. Just like sidecars and query nodes, Store Gateway exposes a Store API and needs to be discovered by Thanos Querier. ```bash thanos store \ @@ -193,7 +203,7 @@ thanos store \ --grpc-address 0.0.0.0:19090 # GRPC endpoint for StoreAPI ``` -The store gateway occupies small amounts of disk space for caching basic information about data in the object storage. This will rarely exceed more than a few gigabytes and is used to improve restart times. It is useful but not required to preserve it across restarts. +Store Gateway uses a small amount of disk space for caching basic information about data in the object storage bucket. This will rarely exceed more than a few gigabytes and is used to improve restart times. It is useful but not required to preserve it across restarts. * *[Example Kubernetes manifest](https://github.com/thanos-io/kube-thanos/blob/master/manifests/thanos-store-statefulSet.yaml)* @@ -201,20 +211,20 @@ The store gateway occupies small amounts of disk space for caching basic informa ## Compactor -A local Prometheus installation periodically compacts older data to improve query efficiency. Since the sidecar backs up data as soon as possible, we need a way to apply the same process to data in the object storage. +A local Prometheus installation periodically compacts older data to improve query efficiency. Since Sidecar backs up data into an object storage bucket as soon as possible, we need a way to apply the same process to data in the bucket. -The compactor component simply scans the object storage and processes compaction where required. At the same time it is responsible for creating downsampled copies of data to speed up queries. +Thanos Compactor simply scans the object storage bucket and performs compaction where required. At the same time, it is responsible for creating downsampled copies of data in order to speed up queries. ```bash thanos compact \ --data-dir /var/thanos/compact \ # Temporary workspace for data processing - --objstore.config-file bucket_config.yaml \ # Bucket where to apply the compacting - --http-address 0.0.0.0:19191 # HTTP endpoint for collecting metrics on the Compactor + --objstore.config-file bucket_config.yaml \ # Bucket where compacting will be performed + --http-address 0.0.0.0:19191 # HTTP endpoint for collecting metrics on the compactor ``` -The compactor is not in the critical path of querying or data backup. It can either be run as a periodic batch job or be left running to always compact data as soon as possible. It is recommended to provide 100-300GB of local disk space for data processing. +Compactor is not in the critical path of querying or data backup. It can either be run as a periodic batch job or be left running to always compact data as soon as possible. It is recommended to provide 100-300GB of local disk space for data processing. -*NOTE: The compactor must be run as a **singleton** and must not run when manually modifying data in the bucket.* +*NOTE: Compactor must be run as a **singleton** and must not run when manually modifying data in the bucket.* * *[Example Kubernetes manifest](https://github.com/thanos-io/kube-thanos/blob/master/examples/all/manifests/thanos-compact-statefulSet.yaml)* @@ -222,6 +232,6 @@ The compactor is not in the critical path of querying or data backup. It can eit ## Ruler/Rule -In case of Prometheus with Thanos sidecar does not have enough retention, or if you want to have alerts or recording rules that requires global view, Thanos has just the component for that: the [Ruler](components/rule.md), which does rule and alert evaluation on top of a given Thanos Querier. +In case Prometheus running with Thanos Sidecar does not have enough retention, or if you want to have alerts or recording rules that require a global view, Thanos has just the component for that: the [Ruler](components/rule.md), which does rule and alert evaluation on top of a given Thanos Querier. [Rule documentation](components/rule.md) diff --git a/docs/release-process.md b/docs/release-process.md index 1d99961df5..bee9247cca 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -23,7 +23,9 @@ Release shepherd responsibilities: | Release | Time of first RC | Shepherd (GitHub handle) | |---------|----------------------|-------------------------------| -| v0.30.0 | (planned) 2022.11.21 | No one ATM | +| v0.32.0 | (planned) 2023.03.09 | No one ATM | +| v0.31.0 | (planned) 2023.01.26 | No one ATM | +| v0.30.0 | 2022.12.21 | `@bwplotka` | | v0.29.0 | 2022.10.21 | `@GiedriusS` | | v0.28.0 | 2022.08.22 | `@yeya24` | | v0.27.0 | 2022.06.21 | `@wiardvanrij` and `@matej-g` | diff --git a/docs/storage.md b/docs/storage.md index 301f538814..f283e5b5e0 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -71,6 +71,7 @@ config: insecure: false signature_version2: false secret_key: "" + session_token: "" put_user_metadata: {} http_config: idle_conn_timeout: 1m30s @@ -358,16 +359,15 @@ config: storage_account_key: "" container: "" endpoint: "" - max_retries: 0 - msi_resource: "" user_assigned_id: "" + max_retries: 0 + reader_config: + max_retry_requests: 0 pipeline_config: max_tries: 0 try_timeout: 0s retry_delay: 0s max_retry_delay: 0s - reader_config: - max_retry_requests: 0 http_config: idle_conn_timeout: 0s response_header_timeout: 0s @@ -384,6 +384,7 @@ config: server_name: "" insecure_skip_verify: false disable_compression: false + msi_resource: "" prefix: "" ``` @@ -413,6 +414,9 @@ config: password: "" domain_id: "" domain_name: "" + application_credential_id: "" + application_credential_name: "" + application_credential_secret: "" project_id: "" project_name: "" project_domain_id: "" @@ -513,11 +517,11 @@ config: prefix: "" ``` -### Oracle Cloud Infrastructure Object Storage +#### Oracle Cloud Infrastructure Object Storage To configure Oracle Cloud Infrastructure (OCI) Object Storage as Thanos Object Store, you need to provide appropriate authentication credentials to your OCI tenancy. The OCI object storage client implementation for Thanos supports either the default keypair or instance principal authentication. -#### API Signing Key +##### API Signing Key The default API signing key authentication provider leverages same [configuration as the OCI CLI](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/cliconcepts.htm) which is usually stored in at `$HOME/.oci/config` or via variable names starting with the string `OCI_CLI`. If the same configuration is found in multiple places the provider will prefer the first one. @@ -545,7 +549,7 @@ config: client_timeout: 90s // Optional time limit for requests made by the HTTP Client. ``` -#### Instance Principal Provider +##### Instance Principal Provider For Example: @@ -559,7 +563,7 @@ config: You can also include any of the optional configuration just like the example in `Default Provider`. -#### Raw Provider +##### Raw Provider For Example: @@ -579,6 +583,24 @@ config: You can also include any of the optional configuration just like the example in `Default Provider`. +##### OCI Policies + +Regardless of the method you use for authentication (raw, instance-principal), you need the following 2 policies in order for Thanos (sidecar or receive) to be able to write TSDB to OCI object storage. The difference lies in whom you are giving the permissions. + +For using instance-principal and dynamic group: + +``` +Allow dynamic-group thanos to read buckets in compartment id ocid1.compartment.oc1..a +Allow dynamic-group thanos to manage objects in compartment id ocid1.compartment.oc1..a +``` + +For using raw provider and an IAM group: + +``` +Allow group thanos to read buckets in compartment id ocid1.compartment.oc1..a +Allow group thanos to manage objects in compartment id ocid1.compartment.oc1..a +``` + ### How to add a new client to Thanos? objstore.go @@ -623,7 +645,7 @@ total 2209344 drwxr-xr-x 2 bwplotka bwplotka 4096 Dec 10 2019 chunks -rw-r--r-- 1 bwplotka bwplotka 1962383742 Dec 10 2019 index -rw-r--r-- 1 bwplotka bwplotka 6761 Dec 10 2019 meta.json --rw-r--r-- 1 bwplotka bwplotka 111 Dec 10 2019 delete-mark.json # <-- Optional marker. +-rw-r--r-- 1 bwplotka bwplotka 111 Dec 10 2019 deletion-mark.json # <-- Optional marker. -rw-r--r-- 1 bwplotka bwplotka 124 Dec 10 2019 no-compact-mark.json # <-- Optional marker. 01DN3SK96XDAEKRB1AN30AAW6E/chunks: diff --git a/docs/tracing.md b/docs/tracing.md index 430570d72f..dd8cf998e4 100644 --- a/docs/tracing.md +++ b/docs/tracing.md @@ -78,6 +78,7 @@ Thanos supports exporting traces in the OpenTelemetry Protocol (OTLP). Both gRPC type: OTLP config: client_type: "" + service_name: "" reconnection_period: 0s compression: "" insecure: false @@ -96,11 +97,13 @@ config: key_file: "" server_name: "" insecure_skip_verify: false + sampler_type: "" + sampler_param: "" ``` ### Jaeger -Client for https://github.com/jaegertracing/jaeger tracing. Options can be provided also via environment variables. For more details see the Jaeger [exporter specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#jaeger-exporter). +Client for https://github.com/jaegertracing/jaeger tracing. Options can be provided also via environment variables. For more details see the Jaeger [exporter specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#jaeger-exporter). *WARNING: Options `RPC Metrics`, `Gen128Bit` and `Disabled` are now deprecated and won't have any effect when set* diff --git a/examples/alerts/alerts.md b/examples/alerts/alerts.md index 474b55b700..1e59873cf5 100644 --- a/examples/alerts/alerts.md +++ b/examples/alerts/alerts.md @@ -220,10 +220,10 @@ rules: annotations: description: Thanos Store {{$labels.job}} is failing to handle {{$value | humanize}}% of requests. runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanosstoregrpcerrorrate - summary: Thanos Store is failing to handle qrpcd requests. + summary: Thanos Store is failing to handle gRPC requests. expr: | ( - sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*"}[5m])) + sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*"}[5m])) / sum by (job) (rate(grpc_server_started_total{job=~".*thanos-store.*"}[5m])) * 100 > 5 diff --git a/examples/alerts/alerts.yaml b/examples/alerts/alerts.yaml index b8ab03b65f..7d99fdba80 100644 --- a/examples/alerts/alerts.yaml +++ b/examples/alerts/alerts.yaml @@ -304,10 +304,10 @@ groups: annotations: description: Thanos Store {{$labels.job}} is failing to handle {{$value | humanize}}% of requests. runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanosstoregrpcerrorrate - summary: Thanos Store is failing to handle qrpcd requests. + summary: Thanos Store is failing to handle gRPC requests. expr: | ( - sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*"}[5m])) + sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*"}[5m])) / sum by (job) (rate(grpc_server_started_total{job=~".*thanos-store.*"}[5m])) * 100 > 5 diff --git a/examples/alerts/rules.yaml b/examples/alerts/rules.yaml index 51e08c69ab..f18cf5be39 100644 --- a/examples/alerts/rules.yaml +++ b/examples/alerts/rules.yaml @@ -91,14 +91,14 @@ groups: rules: - expr: | ( - sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*", grpc_type="unary"}[5m])) + sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*", grpc_type="unary"}[5m])) / sum by (job) (rate(grpc_server_started_total{job=~".*thanos-store.*", grpc_type="unary"}[5m])) ) record: :grpc_server_failures_per_unary:sum_rate - expr: | ( - sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*", grpc_type="server_stream"}[5m])) + sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*", grpc_type="server_stream"}[5m])) / sum by (job) (rate(grpc_server_started_total{job=~".*thanos-store.*", grpc_type="server_stream"}[5m])) ) diff --git a/examples/dashboards/bucket-replicate.json b/examples/dashboards/bucket-replicate.json index 67dad483cb..5a01656188 100644 --- a/examples/dashboards/bucket-replicate.json +++ b/examples/dashboards/bucket-replicate.json @@ -123,7 +123,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, result) (rate(thanos_replicate_replication_runs_total{result=\"error\", job=~\"$job\"}[$interval]))", + "expr": "sum by (job, result) (rate(thanos_replicate_replication_runs_total{result=\"error\", job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{result}}", @@ -329,7 +329,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(blocks_meta_synced{state=\"loaded\", job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(blocks_meta_synced{state=\"loaded\", job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "meta loads", @@ -337,7 +337,7 @@ "step": 10 }, { - "expr": "sum by (job) (rate(blocks_meta_synced{state=\"failed\", job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(blocks_meta_synced{state=\"failed\", job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "partial meta reads", @@ -345,7 +345,7 @@ "step": 10 }, { - "expr": "sum by (job) (rate(thanos_replicate_blocks_already_replicated_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_replicate_blocks_already_replicated_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "already replicated blocks", @@ -353,7 +353,7 @@ "step": 10 }, { - "expr": "sum by (job) (rate(thanos_replicate_blocks_replicated_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_replicate_blocks_replicated_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "replicated blocks", @@ -361,7 +361,7 @@ "step": 10 }, { - "expr": "sum by (job) (rate(thanos_replicate_objects_replicated_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_replicate_objects_replicated_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "replicated objects", diff --git a/examples/dashboards/compact.json b/examples/dashboards/compact.json index 889fcf47af..8fecc5a2c2 100644 --- a/examples/dashboards/compact.json +++ b/examples/dashboards/compact.json @@ -46,7 +46,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, group) (rate(thanos_compact_group_compactions_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, group) (rate(thanos_compact_group_compactions_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "compaction {{job}} {{group}}", @@ -213,7 +213,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, group) (rate(thanos_compact_downsample_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, group) (rate(thanos_compact_downsample_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "downsample {{job}} {{group}}", @@ -380,7 +380,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_compact_garbage_collection_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_compact_garbage_collection_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "garbage collection {{job}}", @@ -665,7 +665,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_compact_blocks_cleaned_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_compact_blocks_cleaned_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Blocks cleanup {{job}}", @@ -742,7 +742,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_compact_block_cleanup_failures_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_compact_block_cleanup_failures_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Blocks cleanup failures {{job}}", @@ -819,7 +819,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_compact_blocks_marked_for_deletion_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_compact_blocks_marked_for_deletion_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Blocks marked {{job}}", @@ -908,7 +908,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_blocks_meta_syncs_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_blocks_meta_syncs_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "sync {{job}}", @@ -1193,7 +1193,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{operation}}", diff --git a/examples/dashboards/overview.json b/examples/dashboards/overview.json index a539947600..29bd665535 100644 --- a/examples/dashboards/overview.json +++ b/examples/dashboards/overview.json @@ -161,10 +161,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(http_requests_total{handler=\"query\",code=~\"5..\"}[$interval])) / sum by (job) (rate(http_requests_total{handler=\"query\"}[$interval]))", + "expr": "sum by (job, code) (rate(http_requests_total{handler=\"query\",code=~\"5..\"}[$interval])) / ignoring (code) group_left() sum by (job) (rate(http_requests_total{handler=\"query\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -466,10 +465,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(http_requests_total{handler=\"query_range\",code=~\"5..\"}[$interval])) / sum by (job) (rate(http_requests_total{handler=\"query_range\"}[$interval]))", + "expr": "sum by (job, code) (rate(http_requests_total{handler=\"query_range\",code=~\"5..\"}[$interval])) / ignoring (code) group_left() sum by (job) (rate(http_requests_total{handler=\"query_range\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -823,10 +821,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",grpc_type=\"unary\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{grpc_type=\"unary\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",grpc_type=\"unary\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{grpc_type=\"unary\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -1180,10 +1177,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",grpc_type=\"unary\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{grpc_type=\"unary\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",grpc_type=\"unary\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{grpc_type=\"unary\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -1485,10 +1481,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(http_requests_total{handler=\"receive\",code=~\"5..\"}[$interval])) / sum by (job) (rate(http_requests_total{handler=\"receive\"}[$interval]))", + "expr": "sum by (job, code) (rate(http_requests_total{handler=\"receive\",code=~\"5..\"}[$interval])) / ignoring (code) group_left() sum by (job) (rate(http_requests_total{handler=\"receive\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -1683,7 +1678,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, alertmanager) (rate(thanos_alert_sender_alerts_sent_total{}[$interval]))", + "expr": "sum by (job, alertmanager) (rate(thanos_alert_sender_alerts_sent_total{}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{alertmanager}}", @@ -1968,7 +1963,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_compact_group_compactions_total{}[$interval]))", + "expr": "sum by (job) (rate(thanos_compact_group_compactions_total{}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "compaction {{job}}", diff --git a/examples/dashboards/query-frontend.json b/examples/dashboards/query-frontend.json index 48421563bb..0502d378b9 100644 --- a/examples/dashboards/query-frontend.json +++ b/examples/dashboards/query-frontend.json @@ -242,10 +242,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query-frontend\",code=~\"5..\"}[$interval])) / sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query-frontend\"}[$interval]))", + "expr": "sum by (job, code) (rate(http_requests_total{job=~\"$job\", handler=\"query-frontend\",code=~\"5..\"}[$interval])) / ignoring (code) group_left() sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query-frontend\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -448,7 +447,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, tripperware) (rate(cortex_cache_request_duration_seconds_count{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, tripperware) (rate(cortex_cache_request_duration_seconds_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{tripperware}}", @@ -525,7 +524,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, tripperware) (rate(querier_cache_gets_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, tripperware) (rate(querier_cache_gets_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Cache gets - {{job}} {{tripperware}}", @@ -533,7 +532,7 @@ "step": 10 }, { - "expr": "sum by (job, tripperware) (rate(querier_cache_misses_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, tripperware) (rate(querier_cache_misses_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Cache misses - {{job}} {{tripperware}}", @@ -610,7 +609,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, tripperware) (rate(cortex_cache_fetched_keys_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, tripperware) (rate(cortex_cache_fetched_keys_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{tripperware}}", @@ -687,7 +686,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, tripperware) (rate(cortex_cache_hits_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, tripperware) (rate(cortex_cache_hits_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{tripperware}}", diff --git a/examples/dashboards/query.json b/examples/dashboards/query.json index f25474aed0..8a5b27de51 100644 --- a/examples/dashboards/query.json +++ b/examples/dashboards/query.json @@ -145,10 +145,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query\",code=~\"5..\"}[$interval])) / sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query\"}[$interval]))", + "expr": "sum by (job, code) (rate(http_requests_total{job=~\"$job\", handler=\"query\",code=~\"5..\"}[$interval])) / ignoring (code) group_left() sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -450,10 +449,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query_range\",code=~\"5..\"}[$interval])) / sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query_range\"}[$interval]))", + "expr": "sum by (job, code) (rate(http_requests_total{job=~\"$job\", handler=\"query_range\",code=~\"5..\"}[$interval])) / ignoring (code) group_left() sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"query_range\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -807,10 +805,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_client_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / sum by (job) (rate(grpc_client_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_client_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_client_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -1164,10 +1161,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_client_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / sum by (job) (rate(grpc_client_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_client_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_client_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], diff --git a/examples/dashboards/receive.json b/examples/dashboards/receive.json index 01352c42f9..39246cb054 100644 --- a/examples/dashboards/receive.json +++ b/examples/dashboards/receive.json @@ -145,10 +145,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"receive\",code=~\"5..\"}[$interval])) / sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"receive\"}[$interval]))", + "expr": "sum by (job, code) (rate(http_requests_total{job=~\"$job\", handler=\"receive\",code=~\"5..\"}[$interval])) / ignoring (code) group_left() sum by (job) (rate(http_requests_total{job=~\"$job\", handler=\"receive\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -350,7 +349,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (tenant, code) (rate(http_requests_total{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\"}[$interval]))", + "expr": "sum by (tenant, code) (rate(http_requests_total{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{code}} - {{tenant}}", @@ -426,7 +425,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (tenant, code) (rate(http_requests_total{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code!~\"2..\"}[$interval]))", + "expr": "sum by (tenant, code) (rate(http_requests_total{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code!~\"2..\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{code}} - {{tenant}}", @@ -502,7 +501,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, tenant) (rate(http_request_duration_seconds_sum{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\"}[$interval])) / sum by (job, tenant) (http_request_duration_seconds_count{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\"})", + "expr": "sum by (job, tenant) (rate(http_request_duration_seconds_sum{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\"}[$__rate_interval])) / sum by (job, tenant) (http_request_duration_seconds_count{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{tenant}}", @@ -590,7 +589,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, tenant) (rate(http_request_size_bytes_sum{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code=~\"2..\"}[$interval])) / sum by (job, tenant) (rate(http_request_size_bytes_count{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code=~\"2..\"}[$interval]))", + "expr": "sum by (job, tenant) (rate(http_request_size_bytes_sum{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code=~\"2..\"}[$__rate_interval])) / sum by (job, tenant) (rate(http_request_size_bytes_count{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code=~\"2..\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{tenant}}", @@ -666,7 +665,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, tenant) (rate(http_request_size_bytes_sum{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code!~\"2..\"}[$interval])) / sum by (job, tenant) (rate(http_request_size_bytes_count{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code!~\"2..\"}[$interval]))", + "expr": "sum by (job, tenant) (rate(http_request_size_bytes_sum{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code!~\"2..\"}[$__rate_interval])) / sum by (job, tenant) (rate(http_request_size_bytes_count{job=~\"$job\", tenant=~\"$tenant\", handler=\"receive\", code!~\"2..\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{tenant}}", @@ -830,7 +829,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(thanos_receive_write_timeseries_bucket{job=~\"$job\", tenant=~\"$tenant\", code=~\"2..\"}[$interval])) by (job, tenant) ", + "expr": "sum(rate(thanos_receive_write_timeseries_sum{job=~\"$job\", tenant=~\"$tenant\", code=~\"2..\"}[$__rate_interval])) by (job, tenant) ", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{tenant}}", @@ -906,7 +905,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(thanos_receive_write_timeseries_bucket{job=~\"$job\", tenant=~\"$tenant\", code!~\"2..\"}[$interval])) by (tenant, code) ", + "expr": "sum(rate(thanos_receive_write_timeseries_sum{job=~\"$job\", tenant=~\"$tenant\", code!~\"2..\"}[$__rate_interval])) by (tenant, code) ", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{code}} - {{tenant}}", @@ -982,7 +981,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(thanos_receive_write_samples_bucket{job=~\"$job\", tenant=~\"$tenant\", code=~\"2..\"}[$interval])) by (job, tenant) ", + "expr": "sum(rate(thanos_receive_write_samples_sum{job=~\"$job\", tenant=~\"$tenant\", code=~\"2..\"}[$__rate_interval])) by (job, tenant) ", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{tenant}}", @@ -1058,7 +1057,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(thanos_receive_write_samples_bucket{job=~\"$job\", tenant=~\"$tenant\", code!~\"2..\"}[$interval])) by (tenant, code) ", + "expr": "sum(rate(thanos_receive_write_samples_sum{job=~\"$job\", tenant=~\"$tenant\", code!~\"2..\"}[$__rate_interval])) by (tenant, code) ", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{code}} - {{tenant}}", @@ -1147,7 +1146,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_receive_replications_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_receive_replications_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "all {{job}}", @@ -1314,7 +1313,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_receive_forward_requests_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_receive_forward_requests_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "all {{job}}", @@ -1632,10 +1631,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\", grpc_method=\"RemoteWrite\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\", grpc_method=\"RemoteWrite\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\", grpc_method=\"RemoteWrite\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\", grpc_method=\"RemoteWrite\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -1989,10 +1987,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\", grpc_method!=\"RemoteWrite\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\", grpc_method!=\"RemoteWrite\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\", grpc_method!=\"RemoteWrite\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\", grpc_method!=\"RemoteWrite\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -2346,10 +2343,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], diff --git a/examples/dashboards/rule.json b/examples/dashboards/rule.json index e25e4c7182..7f2092da8b 100644 --- a/examples/dashboards/rule.json +++ b/examples/dashboards/rule.json @@ -40,15 +40,15 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 3, "stack": false, "steppedLine": false, "targets": [ { - "expr": "sum by (job, strategy) (rate(prometheus_rule_evaluations_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, rule_group, strategy) (rate(prometheus_rule_evaluations_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{ strategy }}", + "legendFormat": "{{ rule_group }} {{ strategy }}", "legendLink": null, "step": 10 } @@ -116,15 +116,15 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 3, "stack": false, "steppedLine": false, "targets": [ { - "expr": "sum by (job, strategy) (increase(prometheus_rule_group_iterations_missed_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, rule_group, strategy) (rate(prometheus_rule_evaluation_failures_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{ strategy }}", + "legendFormat": "{{ rule_group }} {{ strategy }}", "legendLink": null, "step": 10 } @@ -132,7 +132,7 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Rule Group Evaluations Missed", + "title": "Rule Group Evaluations Failed", "tooltip": { "shared": false, "sort": 0, @@ -192,12 +192,88 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (job, rule_group, strategy) (increase(prometheus_rule_group_iterations_missed_total{job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ rule_group }} {{ strategy }}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Rule Group Evaluations Missed", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, "stack": false, "steppedLine": false, "targets": [ { - "expr": "(\n max by(job, rule_group) (prometheus_rule_group_last_duration_seconds{job=~\"$job\"})\n >\n sum by(job, rule_group) (prometheus_rule_group_interval_seconds{job=~\"$job\"})\n)\n", + "expr": "(\n sum by(job, rule_group) (prometheus_rule_group_last_duration_seconds{job=~\"$job\"})\n >\n sum by(job, rule_group) (prometheus_rule_group_interval_seconds{job=~\"$job\"})\n)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{ rule_group }}", @@ -208,7 +284,7 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Rule Group Evlauations Too Slow", + "title": "Rule Group Evaluations Too Slow", "tooltip": { "shared": false, "sort": 0, @@ -261,7 +337,7 @@ "datasource": "$datasource", "description": "Shows rate of dropped alerts.", "fill": 1, - "id": 4, + "id": 5, "legend": { "avg": false, "current": false, @@ -286,7 +362,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, alertmanager) (rate(thanos_alert_sender_alerts_dropped_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, alertmanager) (rate(thanos_alert_sender_alerts_dropped_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{alertmanager}}", @@ -338,7 +414,7 @@ "datasource": "$datasource", "description": "Shows rate of alerts that successfully sent to alert manager.", "fill": 10, - "id": 5, + "id": 6, "legend": { "avg": false, "current": false, @@ -363,7 +439,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, alertmanager) (rate(thanos_alert_sender_alerts_sent_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, alertmanager) (rate(thanos_alert_sender_alerts_sent_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{alertmanager}}", @@ -417,7 +493,7 @@ "datasource": "$datasource", "description": "Shows ratio of errors compared to the total number of sent alerts.", "fill": 10, - "id": 6, + "id": 7, "legend": { "avg": false, "current": false, @@ -493,7 +569,7 @@ "datasource": "$datasource", "description": "Shows how long has it taken to send alerts to alert manager.", "fill": 1, - "id": 7, + "id": 8, "legend": { "avg": false, "current": false, @@ -623,7 +699,7 @@ "datasource": "$datasource", "description": "Shows rate of queued alerts.", "fill": 1, - "id": 8, + "id": 9, "legend": { "avg": false, "current": false, @@ -648,7 +724,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_alert_queue_alerts_dropped_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_alert_queue_alerts_dropped_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}}", @@ -702,7 +778,7 @@ "datasource": "$datasource", "description": "Shows ratio of dropped alerts compared to the total number of queued alerts.", "fill": 10, - "id": 9, + "id": 10, "legend": { "avg": false, "current": false, @@ -790,7 +866,7 @@ "datasource": "$datasource", "description": "Shows rate of handled Unary gRPC requests.", "fill": 10, - "id": 10, + "id": 11, "legend": { "avg": false, "current": false, @@ -941,7 +1017,7 @@ "datasource": "$datasource", "description": "Shows ratio of errors compared to the total number of handled requests.", "fill": 10, - "id": 11, + "id": 12, "legend": { "avg": false, "current": false, @@ -966,10 +1042,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -1017,7 +1092,7 @@ "datasource": "$datasource", "description": "Shows how long has it taken to handle requests, in quantiles.", "fill": 1, - "id": 12, + "id": 13, "legend": { "avg": false, "current": false, @@ -1147,7 +1222,7 @@ "datasource": "$datasource", "description": "Shows rate of handled Streamed gRPC requests.", "fill": 10, - "id": 13, + "id": 14, "legend": { "avg": false, "current": false, @@ -1298,7 +1373,7 @@ "datasource": "$datasource", "description": "Shows ratio of errors compared to the total number of handled requests.", "fill": 10, - "id": 14, + "id": 15, "legend": { "avg": false, "current": false, @@ -1323,10 +1398,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -1374,7 +1448,7 @@ "datasource": "$datasource", "description": "Shows how long has it taken to handle requests, in quantiles", "fill": 1, - "id": 15, + "id": 16, "legend": { "avg": false, "current": false, @@ -1503,7 +1577,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 16, + "id": 17, "legend": { "avg": false, "current": false, @@ -1619,7 +1693,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 17, + "id": 18, "legend": { "avg": false, "current": false, @@ -1695,7 +1769,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 18, + "id": 19, "legend": { "avg": false, "current": false, diff --git a/examples/dashboards/sidecar.json b/examples/dashboards/sidecar.json index 116d800504..0ab12060a1 100644 --- a/examples/dashboards/sidecar.json +++ b/examples/dashboards/sidecar.json @@ -197,10 +197,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -553,10 +552,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -883,7 +881,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{operation}}", diff --git a/examples/dashboards/store.json b/examples/dashboards/store.json index bb239d412f..031c6b58c1 100644 --- a/examples/dashboards/store.json +++ b/examples/dashboards/store.json @@ -197,10 +197,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"unary\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"unary\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -554,10 +553,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", + "expr": "sum by (job, grpc_code) (rate(grpc_server_handled_total{grpc_code=~\"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss\",job=~\"$job\", grpc_type=\"server_stream\"}[$interval])) / ignoring (grpc_code) group_left() sum by (job) (rate(grpc_server_handled_total{job=~\"$job\", grpc_type=\"server_stream\"}[$interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "error", "step": 10 } ], @@ -760,7 +758,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{operation}}", @@ -837,7 +835,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operation_failures_total{job=~\"$job\"}[$interval])) / sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operation_failures_total{job=~\"$job\"}[$__rate_interval])) / sum by (job, operation) (rate(thanos_objstore_bucket_operations_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{operation}}", @@ -914,7 +912,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (job, operation, le) (rate(thanos_objstore_bucket_operation_duration_seconds_bucket{job=~\"$job\"}[$interval]))) * 1", + "expr": "histogram_quantile(0.99, sum by (job, operation, le) (rate(thanos_objstore_bucket_operation_duration_seconds_bucket{job=~\"$job\"}[$__rate_interval]))) * 1", "format": "time_series", "intervalFactor": 2, "legendFormat": "P99 {{job}}", @@ -922,7 +920,7 @@ "step": 10 }, { - "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operation_duration_seconds_sum{job=~\"$job\"}[$interval])) * 1 / sum by (job, operation) (rate(thanos_objstore_bucket_operation_duration_seconds_count{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, operation) (rate(thanos_objstore_bucket_operation_duration_seconds_sum{job=~\"$job\"}[$__rate_interval])) * 1 / sum by (job, operation) (rate(thanos_objstore_bucket_operation_duration_seconds_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "mean {{job}}", @@ -930,7 +928,7 @@ "step": 10 }, { - "expr": "histogram_quantile(0.50, sum by (job, operation, le) (rate(thanos_objstore_bucket_operation_duration_seconds_bucket{job=~\"$job\"}[$interval]))) * 1", + "expr": "histogram_quantile(0.50, sum by (job, operation, le) (rate(thanos_objstore_bucket_operation_duration_seconds_bucket{job=~\"$job\"}[$__rate_interval]))) * 1", "format": "time_series", "intervalFactor": 2, "legendFormat": "P50 {{job}}", @@ -1019,7 +1017,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job) (rate(thanos_bucket_store_block_loads_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_bucket_store_block_loads_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "block loads", @@ -1174,7 +1172,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, operation) (rate(thanos_bucket_store_block_drops_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, operation) (rate(thanos_bucket_store_block_drops_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "block drops {{job}}", @@ -1341,7 +1339,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_requests_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_requests_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{item_type}}", @@ -1418,7 +1416,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_hits_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_hits_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{item_type}}", @@ -1495,7 +1493,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_items_added_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_items_added_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{item_type}}", @@ -1572,7 +1570,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_items_evicted_total{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, item_type) (rate(thanos_store_index_cache_items_evicted_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}} {{item_type}}", @@ -1661,7 +1659,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (job, le) (rate(thanos_bucket_store_sent_chunk_size_bytes_bucket{job=~\"$job\"}[$interval])))", + "expr": "histogram_quantile(0.99, sum by (job, le) (rate(thanos_bucket_store_sent_chunk_size_bytes_bucket{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P99", @@ -1669,7 +1667,7 @@ "step": 10 }, { - "expr": "sum by (job) (rate(thanos_bucket_store_sent_chunk_size_bytes_sum{job=~\"$job\"}[$interval])) / sum by (job) (rate(thanos_bucket_store_sent_chunk_size_bytes_count{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_bucket_store_sent_chunk_size_bytes_sum{job=~\"$job\"}[$__rate_interval])) / sum by (job) (rate(thanos_bucket_store_sent_chunk_size_bytes_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "mean", @@ -1677,7 +1675,7 @@ "step": 10 }, { - "expr": "histogram_quantile(0.99, sum by (job, le) (rate(thanos_bucket_store_sent_chunk_size_bytes_bucket{job=~\"$job\"}[$interval])))", + "expr": "histogram_quantile(0.50, sum by (job, le) (rate(thanos_bucket_store_sent_chunk_size_bytes_bucket{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P50", @@ -1765,7 +1763,7 @@ "steppedLine": false, "targets": [ { - "expr": "thanos_bucket_store_series_blocks_queried{job=~\"$job\", quantile=\"0.99\"}", + "expr": "histogram_quantile(0.99, sum by (le) (rate(thanos_bucket_store_series_blocks_queried{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P99", @@ -1773,7 +1771,7 @@ "step": 10 }, { - "expr": "sum by (job) (rate(thanos_bucket_store_series_blocks_queried_sum{job=~\"$job\"}[$interval])) / sum by (job) (rate(thanos_bucket_store_series_blocks_queried_count{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_bucket_store_series_blocks_queried_sum{job=~\"$job\"}[$__rate_interval])) / sum by (job) (rate(thanos_bucket_store_series_blocks_queried_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "mean {{job}}", @@ -1781,7 +1779,7 @@ "step": 10 }, { - "expr": "thanos_bucket_store_series_blocks_queried{job=~\"$job\", quantile=\"0.50\"}", + "expr": "histogram_quantile(0.50, sum by (le) (rate(thanos_bucket_store_series_blocks_queried{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P50", @@ -1858,7 +1856,7 @@ "steppedLine": false, "targets": [ { - "expr": "thanos_bucket_store_series_data_fetched{job=~\"$job\", quantile=\"0.99\"}", + "expr": "histogram_quantile(0.99, sum by (le) (rate(thanos_bucket_store_series_data_fetched{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P99: {{data_type}} / {{job}}", @@ -1866,7 +1864,7 @@ "step": 10 }, { - "expr": "sum by (job, data_type) (rate(thanos_bucket_store_series_data_fetched_sum{job=~\"$job\"}[$interval])) / sum by (job, data_type) (rate(thanos_bucket_store_series_data_fetched_count{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, data_type) (rate(thanos_bucket_store_series_data_fetched_sum{job=~\"$job\"}[$__rate_interval])) / sum by (job, data_type) (rate(thanos_bucket_store_series_data_fetched_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "mean: {{data_type}} / {{job}}", @@ -1874,7 +1872,7 @@ "step": 10 }, { - "expr": "thanos_bucket_store_series_data_fetched{job=~\"$job\", quantile=\"0.50\"}", + "expr": "histogram_quantile(0.50, sum by (le) (rate(thanos_bucket_store_series_data_fetched{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P50: {{data_type}} / {{job}}", @@ -1951,7 +1949,7 @@ "steppedLine": false, "targets": [ { - "expr": "thanos_bucket_store_series_data_touched{job=~\"$job\", quantile=\"0.99\"}", + "expr": "histogram_quantile(0.99, sum by (le) (rate(thanos_bucket_store_series_data_touched{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P99: {{data_type}} / {{job}}", @@ -1959,7 +1957,7 @@ "step": 10 }, { - "expr": "sum by (job, data_type) (rate(thanos_bucket_store_series_data_touched_sum{job=~\"$job\"}[$interval])) / sum by (job, data_type) (rate(thanos_bucket_store_series_data_touched_count{job=~\"$job\"}[$interval]))", + "expr": "sum by (job, data_type) (rate(thanos_bucket_store_series_data_touched_sum{job=~\"$job\"}[$__rate_interval])) / sum by (job, data_type) (rate(thanos_bucket_store_series_data_touched_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "mean: {{data_type}} / {{job}}", @@ -1967,7 +1965,7 @@ "step": 10 }, { - "expr": "thanos_bucket_store_series_data_touched{job=~\"$job\", quantile=\"0.50\"}", + "expr": "histogram_quantile(0.50, sum by (le) (rate(thanos_bucket_store_series_data_touched{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P50: {{data_type}} / {{job}}", @@ -2043,7 +2041,7 @@ "steppedLine": false, "targets": [ { - "expr": "thanos_bucket_store_series_result_series{job=~\"$job\",quantile=\"0.99\"}", + "expr": "histogram_quantile(0.99, sum by (le) (rate(thanos_bucket_store_series_result_series{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P99", @@ -2051,7 +2049,7 @@ "step": 10 }, { - "expr": "sum by (job) (rate(thanos_bucket_store_series_result_series_sum{job=~\"$job\"}[$interval])) / sum by (job) (rate(thanos_bucket_store_series_result_series_count{job=~\"$job\"}[$interval]))", + "expr": "sum by (job) (rate(thanos_bucket_store_series_result_series_sum{job=~\"$job\"}[$__rate_interval])) / sum by (job) (rate(thanos_bucket_store_series_result_series_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "mean {{job}}", @@ -2059,7 +2057,7 @@ "step": 10 }, { - "expr": "thanos_bucket_store_series_result_series{job=~\"$job\",quantile=\"0.50\"}", + "expr": "histogram_quantile(0.50, sum by (le) (rate(thanos_bucket_store_series_result_series{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "P50", diff --git a/examples/interactive/interactive_test.go b/examples/interactive/interactive_test.go index 7a40ea97ce..b5c7cb6346 100644 --- a/examples/interactive/interactive_test.go +++ b/examples/interactive/interactive_test.go @@ -19,9 +19,9 @@ import ( "github.com/prometheus/common/model" "gopkg.in/yaml.v2" + "github.com/efficientgo/core/testutil" "github.com/thanos-io/objstore/client" "github.com/thanos-io/objstore/providers/s3" - "github.com/thanos-io/thanos/pkg/testutil" tracingclient "github.com/thanos-io/thanos/pkg/tracing/client" "github.com/thanos-io/thanos/pkg/tracing/jaeger" "github.com/thanos-io/thanos/test/e2e/e2ethanos" diff --git a/go.mod b/go.mod index 70b2c744f7..fa5c7fad13 100644 --- a/go.mod +++ b/go.mod @@ -3,102 +3,93 @@ module github.com/thanos-io/thanos go 1.18 require ( - cloud.google.com/go/storage v1.27.0 // indirect - cloud.google.com/go/trace v1.2.0 + cloud.google.com/go/storage v1.28.1 // indirect + cloud.google.com/go/trace v1.8.0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.8.3 - github.com/NYTimes/gziphandler v1.1.1 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alicebob/miniredis/v2 v2.22.0 - github.com/armon/go-metrics v0.4.0 github.com/blang/semver/v4 v4.0.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 - github.com/cespare/xxhash/v2 v2.1.2 + github.com/cespare/xxhash/v2 v2.2.0 github.com/chromedp/cdproto v0.0.0-20220629234738-4cfc9cdeeb92 github.com/chromedp/chromedp v0.8.2 - github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.0 - github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a + github.com/efficientgo/e2e v0.14.1-0.20230413163036-7a7e0bae9913 github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fatih/structtag v1.2.0 github.com/felixge/fgprof v0.9.2 github.com/fortytw2/leaktest v1.3.0 - github.com/fsnotify/fsnotify v1.5.4 + github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 - github.com/go-openapi/strfmt v0.21.3 + github.com/go-openapi/strfmt v0.21.7 github.com/go-redis/redis/v8 v8.11.5 github.com/gogo/protobuf v1.3.2 github.com/gogo/status v1.1.1 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 github.com/googleapis/gax-go v2.0.2+incompatible - github.com/gorilla/mux v1.8.0 - github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 + github.com/gorilla/mux v1.8.0 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/kit/v2 v2.0.0-20201002093600-73cf2ae9d891 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/consul/api v1.15.2 - github.com/hashicorp/go-cleanhttp v0.5.2 - github.com/hashicorp/go-sockaddr v1.0.2 - github.com/hashicorp/golang-lru v0.5.4 - github.com/hashicorp/memberlist v0.3.1 + github.com/hashicorp/golang-lru v0.6.0 github.com/jpillora/backoff v1.0.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.9 github.com/leanovate/gopter v0.2.9 github.com/lightstep/lightstep-tracer-go v0.25.0 github.com/lovoo/gcloud-opentracing v0.3.0 - github.com/miekg/dns v1.1.50 - github.com/minio/minio-go/v7 v7.0.37 + github.com/miekg/dns v1.1.53 + github.com/minio/minio-go/v7 v7.0.45 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 github.com/olekukonko/tablewriter v0.0.5 - github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e - github.com/opentracing-contrib/go-stdlib v1.0.0 + github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect + github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/basictracer-go v1.0.0 github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 - github.com/prometheus/alertmanager v0.24.0 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.37.0 - github.com/prometheus/exporter-toolkit v0.7.1 + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/alertmanager v0.25.0 + github.com/prometheus/client_golang v1.15.0 + github.com/prometheus/client_model v0.3.0 + github.com/prometheus/common v0.42.0 + github.com/prometheus/exporter-toolkit v0.9.1 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.39.1 + github.com/prometheus/prometheus v0.44.0-rc.0.0.20230508103029-94d9367bbf13 github.com/sony/gobreaker v0.5.0 - github.com/stretchr/testify v1.8.0 - github.com/thanos-community/promql-engine v0.0.0-20221101075408-6d5b22b2cd4d - github.com/thanos-io/objstore v0.0.0-20221111162010-dc083e736d11 + github.com/stretchr/testify v1.8.2 + github.com/thanos-community/promql-engine v0.0.0-20230505104016-5124a98eee24 + github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a - github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae + github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.elastic.co/apm v1.11.0 go.elastic.co/apm/module/apmot v1.11.0 - go.etcd.io/etcd/api/v3 v3.5.4 - go.etcd.io/etcd/client/pkg/v3 v3.5.4 - go.etcd.io/etcd/client/v3 v3.5.4 - go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect - go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/bridge/opentracing v1.10.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/trace v1.10.0 + go.opentelemetry.io/contrib/propagators/ot v1.13.0 // indirect + go.opentelemetry.io/otel v1.14.0 + go.opentelemetry.io/otel/bridge/opentracing v1.12.0 + go.opentelemetry.io/otel/sdk v1.14.0 + go.opentelemetry.io/otel/trace v1.14.0 go.uber.org/atomic v1.10.0 - go.uber.org/automaxprocs v1.5.1 - go.uber.org/goleak v1.2.0 - golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa - golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 - golang.org/x/sync v0.0.0-20220907140024-f12130a52804 - golang.org/x/text v0.3.7 - golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 - google.golang.org/api v0.97.0 // indirect - google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 - google.golang.org/grpc v1.49.0 + go.uber.org/automaxprocs v1.5.2 + go.uber.org/goleak v1.2.1 + golang.org/x/crypto v0.7.0 + golang.org/x/net v0.9.0 + golang.org/x/sync v0.1.0 + golang.org/x/text v0.9.0 + golang.org/x/time v0.3.0 + google.golang.org/api v0.114.0 // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/grpc v1.53.0 google.golang.org/grpc/examples v0.0.0-20211119005141-f45e61797429 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 @@ -107,38 +98,45 @@ require ( ) require ( - github.com/efficientgo/core v1.0.0-rc.0 - github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd + github.com/efficientgo/core v1.0.0-rc.2 github.com/minio/sha256-simd v1.0.0 ) require ( - cloud.google.com/go v0.104.0 // indirect - cloud.google.com/go/compute v1.7.0 // indirect - cloud.google.com/go/iam v0.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect - go.opentelemetry.io/contrib/samplers/jaegerremote v0.3.0 - go.opentelemetry.io/otel/exporters/jaeger v1.8.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0 + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/iam v0.12.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect + go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0 + go.opentelemetry.io/otel/exporters/jaeger v1.12.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 ) -require go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 +require ( + github.com/onsi/gomega v1.27.6 + go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 + go4.org/intern v0.0.0-20220617035311-6925f38cc365 + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 +) + +require go4.org/unsafe/assume-no-moving-gc v0.0.0-20230209150437-ee73d164e760 // indirect require ( + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.32.3 // indirect github.com/OneOfOne/xxhash v1.2.6 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go v1.44.109 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go v1.44.245 // indirect github.com/aws/aws-sdk-go-v2 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect @@ -152,90 +150,75 @@ require ( github.com/aws/smithy-go v1.11.1 // indirect github.com/baidubce/bce-sdk-go v0.9.111 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cenkalti/backoff/v4 v4.2.0 // indirect github.com/chromedp/sysutil v1.0.0 // indirect github.com/clbanning/mxj v1.8.4 // indirect - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/elastic/go-sysinfo v1.8.1 // indirect github.com/elastic/go-windows v1.0.1 // indirect - github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.22.1 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/gobwas/ws v1.1.0 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/googleapis v1.4.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/golang-jwt/jwt/v4 v4.4.1 // indirect - github.com/google/btree v1.0.1 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/go-cmp v0.5.9 github.com/google/go-querystring v1.1.0 // indirect - github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 // indirect + github.com/google/pprof v0.0.0-20230406165453-00490a63f317 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.5.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-hclog v0.16.2 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/serf v0.9.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/klauspost/cpuid/v2 v2.1.0 // indirect - github.com/kr/pretty v0.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20210210170715-a8dfcb80d3a7 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mozillazg/go-httpheader v0.2.1 // indirect github.com/ncw/swift v1.0.53 // indirect github.com/oracle/oci-go-sdk/v65 v65.13.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/redis/rueidis v1.0.2-go1.18 github.com/rivo/uniseg v0.2.0 // indirect github.com/rs/xid v1.4.0 // indirect github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect - github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shirou/gopsutil/v3 v3.22.9 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/stretchr/objx v0.4.0 // indirect - github.com/tencentyun/cos-go-sdk-v5 v0.7.34 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/tencentyun/cos-go-sdk-v5 v0.7.40 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect @@ -244,27 +227,25 @@ require ( github.com/yusufpapurcu/wmi v1.2.2 // indirect go.elastic.co/apm/module/apmhttp v1.11.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect - go.mongodb.org/mongo-driver v1.10.2 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 // indirect - go.opentelemetry.io/contrib/propagators/aws v1.9.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.32.0 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect + go.opentelemetry.io/contrib/propagators/aws v1.13.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.13.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect + go.opentelemetry.io/otel/metric v0.37.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect - golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect - golang.org/x/tools v0.1.12 // indirect - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/tools v0.8.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect ) diff --git a/go.sum b/go.sum index 4939068e8e..33f38e4511 100644 --- a/go.sum +++ b/go.sum @@ -27,8 +27,8 @@ cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Ud cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -40,12 +40,18 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -56,42 +62,39 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/trace v1.2.0 h1:oIaB4KahkIUOpLSAAjEJ8y2desbjY/x/RfP4O3KAtTI= -cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= +github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.8.3 h1:i84ZOPT35YCJROyuf97VP/VEdYhQce/8NTLOWq5tqJw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.8.3/go.mod h1:3+qm+VCJbVmQ9uscVz+8h1rRkJEy9ZNFGgpT1XB9mPg= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.32.3 h1:FhsH8qgWFkkPlPXBZ68uuT/FH/R+DLTtVPxjLEBs1v4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.32.3/go.mod h1:9a+Opaevo9fybhUvQkEG7fR6Zk7pYrW/s9NC4fODFIQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= -github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= @@ -125,23 +128,20 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.109 h1:+Na5JPeS0kiEHoBp5Umcuuf+IDqXqD0lXnM920E31YI= -github.com/aws/aws-sdk-go v1.44.109/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= +github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= @@ -168,8 +168,6 @@ github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnw github.com/baidubce/bce-sdk-go v0.9.111 h1:yGgtPpZYUZW4uoVorQ4xnuEgVeddACydlcJKW87MDV4= github.com/baidubce/bce-sdk-go v0.9.111/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -179,15 +177,15 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= +github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chromedp/cdproto v0.0.0-20220629234738-4cfc9cdeeb92 h1:0kiAQSLWZDt4wsmcICou3C6in/OJ58FCqvXcB8Ax1Dk= github.com/chromedp/cdproto v0.0.0-20220629234738-4cfc9cdeeb92/go.mod h1:5Y4sD/eXpwrChIuxhSr/G20n9CdbCmoerOHnuAf0Zr0= github.com/chromedp/chromedp v0.8.2 h1:EYSsSqWuKYwyHZEJpU00kOGOMz5DE0qDVckelzauMFA= @@ -197,8 +195,6 @@ github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moA github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= @@ -206,16 +202,16 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 h1:XP+uhjN0yBCN/tPkr8Z0BNDc5rZam9RG6UWyf2FrSQ0= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cortexproject/promqlsmith v0.0.0-20230313010502-5c380a3b00b0 h1:NxAuzQ8oCBUgmBTmhC4GyKk9kBl4IoDymGib+tVdqiQ= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -229,14 +225,13 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.84.1 h1:VgPsuxhrO9pUygvij6qOhqXfAkxAsDZYRpmjSDMEaHo= +github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -246,12 +241,10 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/efficientgo/core v1.0.0-rc.0 h1:jJoA0N+C4/knWYVZ6GrdHOtDyrg8Y/TR4vFpTaqTsqs= -github.com/efficientgo/core v1.0.0-rc.0/go.mod h1:kQa0V74HNYMfuJH6jiPiwNdpWXl4xd/K4tzlrcvYDQI= -github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= -github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a/go.mod h1:Hi+sz0REtlhVZ8zcdeTC3j6LUEEpJpPtNjOaOKuNcgI= -github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd h1:svR6KxSP1xiPw10RN4Pd7g6BAVkEcNN628PAqZH31mM= -github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= +github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= +github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= +github.com/efficientgo/e2e v0.14.1-0.20230413163036-7a7e0bae9913 h1:/CcqWjW1rMnoYiiKpiVKcReOJCGnCo868z85g7TwRhg= +github.com/efficientgo/e2e v0.14.1-0.20230413163036-7a7e0bae9913/go.mod h1:plsKU0YHE9uX+7utvr7SiDtVBSHJyEfHRO4UnUgDmts= github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd h1:VaYzzXeUbC5fVheskcKVNOyJMEYD+HgrJNzIAg/mRIM= github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= @@ -260,18 +253,15 @@ github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTK github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/fgprof v0.9.2 h1:tAMHtWMyl6E0BimjVbFt7fieU6FpjttsZN7j0wT5blc= @@ -284,8 +274,8 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -302,54 +292,58 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= -github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -381,11 +375,9 @@ github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.1.0 h1:7RFti/xnNkMJnrK7D1yQ/iCIB5OrrY/54/H930kIbHA= github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0= -github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= @@ -400,10 +392,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= -github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -437,8 +427,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -446,9 +437,7 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -474,8 +463,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -491,8 +480,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 h1:ykKxL12NZd3JmWZnyqarJGsF73M9Xhtrik/FEtEeFRE= -github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -500,8 +489,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -511,11 +500,11 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3 h1:eHv/jVY/JNop1xg2J9cBb4EzyMpWZoNCP1BslSAIkOI= -github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= +github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -524,8 +513,8 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= -github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -540,80 +529,53 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0= -github.com/hashicorp/consul/api v1.15.2/go.mod h1:v6nvB10borjOuIwNRZYPZiHKrTM/AyrGtd0WVVodKM8= +github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= -github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= -github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf h1:l/EZ57iRPNs8vd8c9qH0dB4Q+IiZHJouLAgxJ5j25tU= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ= +github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -630,7 +592,6 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -654,16 +615,14 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd h1:b1taQnM42dp3NdiiQwfmM1WyyucHayZSKN5R0PRYWL0= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -672,14 +631,13 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20210210170715-a8dfcb80d3a7 h1:YjW+hUb8Fh2S58z4av4t/0cBMK/Q0aP48RocCFsC8yI= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20210210170715-a8dfcb80d3a7/go.mod h1:Spd59icnvRxSKuyijbbwe5AemzvcyXAUBgApa7VybMw= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.25.0 h1:sGVnz8h3jTQuHKMbUe2949nXm3Sg09N1UcR3VoQNN5E= github.com/lightstep/lightstep-tracer-go v0.25.0/go.mod h1:G1ZAEaqTHFPWpWunnbUn1ADEY/Jvzz7jIOaXwAfD6A8= -github.com/linode/linodego v1.9.1 h1:29UpEPpYcGFnbwiJW8mbk/bjBZpgd/pv68io2IKTo34= +github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= github.com/lovoo/gcloud-opentracing v0.3.0 h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDLnHp0urPLU= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= @@ -692,46 +650,31 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.37 h1:aJvYMbtpVPSFBck6guyvOkxK03MycxDOCs49ZBuY5M8= -github.com/minio/minio-go/v7 v7.0.37/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs= +github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -750,7 +693,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -780,8 +722,10 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -805,17 +749,16 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/oracle/oci-go-sdk/v65 v65.13.0 h1:0+9ea5goYfhI3/MPfbIQU6yzHYWE6sCk6VuUepxk5Nk= github.com/oracle/oci-go-sdk/v65 v65.13.0/go.mod h1:oyMrMa1vOzzKTmPN+kqrTR9y9kPA2tU1igN3NUSNTIE= github.com/orisano/pixelmatch v0.0.0-20210112091706-4fa4c7ba91d5 h1:1SoBaSPudixRecmlHXb/GxmaD3fLMtHIDN13QujwQuc= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -825,45 +768,44 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw= -github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI= +github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg= +github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= +github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= -github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= +github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw= +github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -873,11 +815,14 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.39.1 h1:abZM6A+sKAv2eKTbRIaHq4amM/nT07MuxRm0+QTaTj0= -github.com/prometheus/prometheus v0.39.1/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/prometheus v0.44.0-rc.0.0.20230508103029-94d9367bbf13 h1:NQicSUgogSEMucX1aF9Z4nwKKTCLj4hFZSORwWBO2nk= +github.com/prometheus/prometheus v0.44.0-rc.0.0.20230508103029-94d9367bbf13/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/rueidis v1.0.2-go1.18 h1:ZmiZSZY9Htzn/Ri+vZ5o1snD2inOoqKjezypNqwAgKk= +github.com/redis/rueidis v1.0.2-go1.18/go.mod h1:aJiezBQL+bZKAZ+d7YOuj6xKQhrXvEPBiOfotEhG5R8= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -885,29 +830,23 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/shirou/gopsutil/v3 v3.21.2/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4c5eA= github.com/shirou/gopsutil/v3 v3.22.9/go.mod h1:bBYl1kjgEJpWpxeHmLI+dVHWtyAwfcmSBLDsp2TNT8A= -github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= github.com/simonpasquier/klog-gokit/v3 v3.0.0 h1:J0QrVhAULISHWN05PeXX/xMqJBjnpl2fAuO8uHdQGsA= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -926,7 +865,6 @@ github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -936,26 +874,29 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4= -github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= -github.com/tencentyun/cos-go-sdk-v5 v0.7.34/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= +github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= +github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-community/promql-engine v0.0.0-20221101075408-6d5b22b2cd4d h1:UIqUy9mHwI1ZqAtxYiYmRhSlGCgRtfS9rFy2usHcA30= -github.com/thanos-community/promql-engine v0.0.0-20221101075408-6d5b22b2cd4d/go.mod h1:e3BzS0UVlHOKQa5STt/C0elYAa4Qp/7jyzp/Xf+e0C0= -github.com/thanos-io/objstore v0.0.0-20221111162010-dc083e736d11 h1:XlwaDUNwhXBIuLRK826MaT5WkZztbGXmBsNu6m/1Tbs= -github.com/thanos-io/objstore v0.0.0-20221111162010-dc083e736d11/go.mod h1:Vx5dZs9ElxEhNLnum/OgB0pNTqNdI2zdXL82BeJr3T4= +github.com/thanos-community/promql-engine v0.0.0-20230505104016-5124a98eee24 h1:UvZpeeWoiu54gd6ZW8lzJZWhQLekfF/vmccgOMQssYU= +github.com/thanos-community/promql-engine v0.0.0-20230505104016-5124a98eee24/go.mod h1:PbimG7ocz5JmFRLlQ7yMcewnkunNBmvMyVgAoNmyvDw= +github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204 h1:W4w5Iph7j32Sf1QFWLJDCqvO0WgZS0jHGID+qnq3wV0= +github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204/go.mod h1:STSgpY8M6EKF2G/raUFdbIMf2U9GgYlEjAEHJxjvpAo= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -968,7 +909,6 @@ github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1g github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -978,8 +918,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= -github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae h1:Z8YibUpdBEdCq8nwrYXJQ8vYooevbmEBIdFpseXK3/8= -github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae/go.mod h1:YfOOLoW1Q/jIIu0WLeSwgStmrKjuJEZSKTAUc+0KFvE= +github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d h1:9Z/HiqeGN+LOnmotAMpFEQjuXZ4AGAVFG0rC1laP5Go= +github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d/go.mod h1:Fnq3+U51tMkPRMC6Wr7zKGUeFFYX4YjNrNK50iU0fcE= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -988,14 +928,13 @@ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23n github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= @@ -1010,18 +949,11 @@ go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= -go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1030,42 +962,43 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 h1:qZ3KzA4qPzLBDtQyPk4ydjlg8zvXbNysnFHaVMKJbVo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0/go.mod h1:14Oo79mRwusSI02L0EfG3Gp1uF3+1wSL+D4zDysxyqs= -go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= -go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= -go.opentelemetry.io/contrib/propagators/aws v1.9.0 h1:60BnkzNPdU3WD12oOGQNTjgbyws8iDggIaBWpghvcVo= -go.opentelemetry.io/contrib/propagators/aws v1.9.0/go.mod h1:lYGAfTJZU1mo92QxtuiuNJjRyRyEWj1ldO1b0Vpc1I0= -go.opentelemetry.io/contrib/propagators/b3 v1.9.0 h1:Lzb9zU98jCE2kyfCjWfSSsiQoGtvBL+COxvUBf7FNhU= -go.opentelemetry.io/contrib/propagators/b3 v1.9.0/go.mod h1:fyx3gFXn+4w5uWTTiqaI8oBNBW/6w9Ow5zxXf7NGixU= -go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 h1:edJTgwezAtLKUINAXfjxllJ1vlsphNPV7RkuKNd/HkQ= -go.opentelemetry.io/contrib/propagators/jaeger v1.9.0/go.mod h1:Q/AXutvrBTfEDSeRLwOmKhyviX5adJvTesg6JFTybYg= -go.opentelemetry.io/contrib/propagators/ot v1.9.0 h1:+pYoqyFoA3H6EZ7Wie2ZQdqS4ZfG42PAGvj3eLUukHE= -go.opentelemetry.io/contrib/propagators/ot v1.9.0/go.mod h1:D2GfaecHHX67fXT93/5iKl2DArjt8+H0XWtFD8b4Z+k= -go.opentelemetry.io/contrib/samplers/jaegerremote v0.3.0 h1:SLLzX5hdPC0jR3t0MrmRhZkKZJ0UKhcB+0N/wWkiarQ= -go.opentelemetry.io/contrib/samplers/jaegerremote v0.3.0/go.mod h1:QnxuwZJaTvT5YN/25CLle62v/7gal96wXN/CSOhWMaI= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/bridge/opentracing v1.10.0 h1:WzAVGovpC1s7KD5g4taU6BWYZP3QGSDVTlbRu9fIHw8= -go.opentelemetry.io/otel/bridge/opentracing v1.10.0/go.mod h1:J7GLR/uxxqMAzZptsH0pjte3Ep4GacTCrbGBoDuHBqk= -go.opentelemetry.io/otel/exporters/jaeger v1.8.0 h1:TLLqD6kDhLPziEC7pgPrMvP9lAqdk3n1gf8DiFSnfW8= -go.opentelemetry.io/otel/exporters/jaeger v1.8.0/go.mod h1:GbWg+ng88rDtx+id26C34QLqw2erqJeAjsCx9AFeHfE= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0 h1:S8DedULB3gp93Rh+9Z+7NTEv+6Id/KYS7LDyipZ9iCE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0/go.mod h1:5WV40MLWwvWlGP7Xm8g3pMcg0pKOUY609qxJn8y7LmM= -go.opentelemetry.io/otel/metric v0.32.0 h1:lh5KMDB8xlMM4kwE38vlZJ3rZeiWrjw3As1vclfC01k= -go.opentelemetry.io/otel/metric v0.32.0/go.mod h1:PVDNTt297p8ehm949jsIzd+Z2bIZJYQQG/uuHTeWFHY= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= +go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 h1:WZwiLCwOL0XW/6TVT7LTtdRDveoHZ6q3wL+0iYsBcdE= +go.opentelemetry.io/contrib/propagators/autoprop v0.38.0/go.mod h1:JBebP2d0HiffbfelbIEoBOCl4790g7Z8lD1scUd3Vd8= +go.opentelemetry.io/contrib/propagators/aws v1.13.0 h1:9qOAQhTeJGiaYNfCCnRmL12XZGIaxclqS5yfkSXpn8o= +go.opentelemetry.io/contrib/propagators/aws v1.13.0/go.mod h1:XXahyNfhmY382jrQPE1sKXxTgfnXbx3KzNOweRJV8+A= +go.opentelemetry.io/contrib/propagators/b3 v1.13.0 h1:f17PBmZK60RoHvOpJVqEka8oS2EXjpjHquESD/8zZ50= +go.opentelemetry.io/contrib/propagators/b3 v1.13.0/go.mod h1:zy2hz1TpGUoJzSwlBchVGvVAFQS8s2pglKLbrAFZ+Sc= +go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 h1:+tVlvpiQMOCzi4EYCaBjblibpyKfqoph0fcITmtXMws= +go.opentelemetry.io/contrib/propagators/jaeger v1.13.0/go.mod h1:Qf7eVCLYawiNIB+A81kk8aFDFwYqXSqmt0N2RcvkLLI= +go.opentelemetry.io/contrib/propagators/ot v1.13.0 h1:tHWNd0WRS6w9keZoZg9aF3zYohdaBacQfojPYZJgATQ= +go.opentelemetry.io/contrib/propagators/ot v1.13.0/go.mod h1:R6Op9T6LxNaMRVlGD0wVwz40LSsAq296CXiEydKLQBU= +go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0 h1:E+RlfFhGZ5Tk0wO1oOJYC0Il4Q7SjE8ZMl8x/VTK9Pk= +go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0/go.mod h1:cuBMmL+iGJ4UpZi6dykQlIUxqKSMkp5eu1C1UjUJYFI= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel/bridge/opentracing v1.12.0 h1:tU684zGp/ft9QpXRixnoeKbz0vNjrcd3tEDsYy+uJUI= +go.opentelemetry.io/otel/bridge/opentracing v1.12.0/go.mod h1:qjLYKFXmUQhZHVa0EbQOY29U061UO/14B+NGWUOnOnk= +go.opentelemetry.io/otel/exporters/jaeger v1.12.0 h1:1Vy11S0iAD70EPfcP3N2f2IhLq/cIuTW+Zt010MswR8= +go.opentelemetry.io/otel/exporters/jaeger v1.12.0/go.mod h1:SCLbaspEoU9mGJZB6ksc2iSGU6CLWY5yefchDqOM0IM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4= +go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= +go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= +go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1073,27 +1006,24 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= -go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= +go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= +go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230209150437-ee73d164e760 h1:gH0IO5GDYAcawu+ThKrvAofVTgJjYaoOZ5rrC4pS2Xw= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230209150437-ee73d164e760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1101,15 +1031,14 @@ golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaE golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1120,8 +1049,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -1134,7 +1063,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1147,9 +1075,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1170,7 +1098,6 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1193,14 +1120,10 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1208,9 +1131,11 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 h1:asZqf0wXastQr+DudYagQS8uBO8bHKeYD1vbAvGmFL8= -golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1231,9 +1156,9 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1247,8 +1172,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1258,7 +1184,6 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1273,18 +1198,13 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1310,12 +1230,10 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1323,14 +1241,13 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1346,13 +1263,16 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1361,14 +1281,16 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 h1:yuLAip3bfURHClMG9VBdzPrQvCWjWiWUTBGV+/fCbUs= -golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1388,12 +1310,10 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1434,18 +1354,18 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -1488,8 +1408,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.97.0 h1:x/vEL1XDF/2V4xzdNgFPaKHluRESo2aTsL7QzHnBtGQ= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1580,9 +1500,8 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -1602,23 +1521,24 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -1647,17 +1567,16 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.25.1 h1:yL7du50yc93k17nH/Xe9jujAYrcDkI/i5DL1jPz4E3M= -k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI= -k8s.io/client-go v0.25.1 h1:uFj4AJKtE1/ckcSKz8IhgAuZTdRXZDKev8g387ndD58= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= +k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= +k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/internal/cortex/chunk/bucket_client.go b/internal/cortex/chunk/bucket_client.go deleted file mode 100644 index a4570bd792..0000000000 --- a/internal/cortex/chunk/bucket_client.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" - "time" -) - -// BucketClient is used to enforce retention on chunk buckets. -type BucketClient interface { - DeleteChunksBefore(ctx context.Context, ts time.Time) error -} diff --git a/internal/cortex/chunk/cache/background.go b/internal/cortex/chunk/cache/background.go index 0fb3c7b0ff..f3457668b3 100644 --- a/internal/cortex/chunk/cache/background.go +++ b/internal/cortex/chunk/cache/background.go @@ -8,8 +8,6 @@ import ( "flag" "sync" - opentracing "github.com/opentracing/opentracing-go" - otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ) @@ -73,44 +71,6 @@ func NewBackground(name string, cfg BackgroundConfig, cache Cache, reg prometheu return c } -// Stop the background flushing goroutines. -func (c *backgroundCache) Stop() { - close(c.quit) - c.wg.Wait() - - c.Cache.Stop() -} - -const keysPerBatch = 100 - -// Store writes keys for the cache in the background. -func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byte) { - for len(keys) > 0 { - num := keysPerBatch - if num > len(keys) { - num = len(keys) - } - - bgWrite := backgroundWrite{ - keys: keys[:num], - bufs: bufs[:num], - } - select { - case c.bgWrites <- bgWrite: - c.queueLength.Add(float64(num)) - default: - c.droppedWriteBack.Add(float64(num)) - sp := opentracing.SpanFromContext(ctx) - if sp != nil { - sp.LogFields(otlog.Int("dropped", num)) - } - return // queue is full; give up - } - keys = keys[num:] - bufs = bufs[num:] - } -} - func (c *backgroundCache) writeBackLoop() { defer c.wg.Done() diff --git a/internal/cortex/chunk/cache/background_test.go b/internal/cortex/chunk/cache/background_test.go index fd8d83d085..766e30f234 100644 --- a/internal/cortex/chunk/cache/background_test.go +++ b/internal/cortex/chunk/cache/background_test.go @@ -15,10 +15,10 @@ func TestBackground(t *testing.T) { WriteBackBuffer: 100, }, cache.NewMockCache(), nil) - keys, chunks := fillCache(t, c) + keys, bufs := fillCache(t, c) cache.Flush(c) - testCacheSingle(t, c, keys, chunks) - testCacheMultiple(t, c, keys, chunks) + testCacheSingle(t, c, keys, bufs) + testCacheMultiple(t, c, keys, bufs) testCacheMiss(t, c) } diff --git a/internal/cortex/chunk/cache/cache.go b/internal/cortex/chunk/cache/cache.go index 2efc28fda5..691a7415f4 100644 --- a/internal/cortex/chunk/cache/cache.go +++ b/internal/cortex/chunk/cache/cache.go @@ -58,7 +58,6 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, description string, f cfg.Prefix = prefix } - func (cfg *Config) Validate() error { return cfg.Fifocache.Validate() } diff --git a/internal/cortex/chunk/cache/cache_test.go b/internal/cortex/chunk/cache/cache_test.go index f1a1c9c84c..0e82b79842 100644 --- a/internal/cortex/chunk/cache/cache_test.go +++ b/internal/cortex/chunk/cache/cache_test.go @@ -5,82 +5,31 @@ package cache_test import ( "context" + "fmt" "math/rand" - "sort" "strconv" "testing" "time" "github.com/go-kit/log" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/internal/cortex/chunk" "github.com/thanos-io/thanos/internal/cortex/chunk/cache" - prom_chunk "github.com/thanos-io/thanos/internal/cortex/chunk/encoding" ) -const userID = "1" - -func fillCache(t *testing.T, cache cache.Cache) ([]string, []chunk.Chunk) { - const chunkLen = 13 * 3600 // in seconds - - // put a set of chunks, larger than background batch size, with varying timestamps and values +func fillCache(t *testing.T, cache cache.Cache) ([]string, [][]byte) { keys := []string{} bufs := [][]byte{} - chunks := []chunk.Chunk{} for i := 0; i < 111; i++ { - ts := model.TimeFromUnix(int64(i * chunkLen)) - promChunk := prom_chunk.New() - nc, err := promChunk.Add(model.SamplePair{ - Timestamp: ts, - Value: model.SampleValue(i), - }) - require.NoError(t, err) - require.Nil(t, nc) - c := chunk.NewChunk( - userID, - model.Fingerprint(1), - labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "bar", Value: "baz"}, - }, - promChunk, - ts, - ts.Add(chunkLen), - ) - - err = c.Encode() - require.NoError(t, err) - buf, err := c.Encoded() - require.NoError(t, err) - - // In order to be able to compare the expected chunk (this one) with the - // actual one (the one that will be fetched from the cache) we need to - // cleanup the chunk to avoid any internal references mismatch (ie. appender - // pointer). - cleanChunk := chunk.Chunk{ - UserID: c.UserID, - Fingerprint: c.Fingerprint, - From: c.From, - Through: c.Through, - Checksum: c.Checksum, - ChecksumSet: c.ChecksumSet, - } - err = cleanChunk.Decode(chunk.NewDecodeContext(), buf) - require.NoError(t, err) - - keys = append(keys, c.ExternalKey()) - bufs = append(bufs, buf) - chunks = append(chunks, cleanChunk) + keys = append(keys, fmt.Sprintf("test%d", i)) + bufs = append(bufs, []byte(fmt.Sprintf("buf%d", i))) } cache.Store(context.Background(), keys, bufs) - return keys, chunks + return keys, bufs } -func testCacheSingle(t *testing.T, cache cache.Cache, keys []string, chunks []chunk.Chunk) { +func testCacheSingle(t *testing.T, cache cache.Cache, keys []string, data [][]byte) { for i := 0; i < 100; i++ { index := rand.Intn(len(keys)) key := keys[index] @@ -89,70 +38,24 @@ func testCacheSingle(t *testing.T, cache cache.Cache, keys []string, chunks []ch require.Len(t, found, 1) require.Len(t, bufs, 1) require.Len(t, missingKeys, 0) - - c, err := chunk.ParseExternalKey(userID, found[0]) - require.NoError(t, err) - err = c.Decode(chunk.NewDecodeContext(), bufs[0]) - require.NoError(t, err) - require.Equal(t, chunks[index], c) + require.Equal(t, data[index], bufs[0]) } } -func testCacheMultiple(t *testing.T, cache cache.Cache, keys []string, chunks []chunk.Chunk) { +func testCacheMultiple(t *testing.T, cache cache.Cache, keys []string, data [][]byte) { // test getting them all found, bufs, missingKeys := cache.Fetch(context.Background(), keys) require.Len(t, found, len(keys)) require.Len(t, bufs, len(keys)) require.Len(t, missingKeys, 0) - result := []chunk.Chunk{} + result := [][]byte{} for i := range found { - c, err := chunk.ParseExternalKey(userID, found[i]) - require.NoError(t, err) - err = c.Decode(chunk.NewDecodeContext(), bufs[i]) - require.NoError(t, err) - result = append(result, c) + result = append(result, bufs[i]) } - require.Equal(t, chunks, result) -} - -func testChunkFetcher(t *testing.T, c cache.Cache, keys []string, chunks []chunk.Chunk) { - fetcher, err := chunk.NewChunkFetcher(c, false, nil) - require.NoError(t, err) - defer fetcher.Stop() - - found, err := fetcher.FetchChunks(context.Background(), chunks, keys) - require.NoError(t, err) - sort.Sort(byExternalKey(found)) - sort.Sort(byExternalKey(chunks)) - require.Equal(t, chunks, found) + require.Equal(t, data, result) } -// testChunkFetcherStop checks that stopping the fetcher while fetching chunks don't result an error -func testChunkFetcherStop(t *testing.T, c cache.Cache, keys []string, chunks []chunk.Chunk) { - fetcher, err := chunk.NewChunkFetcher(c, false, chunk.NewMockStorage()) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - defer close(done) - if _, err := fetcher.FetchChunks(context.Background(), chunks, keys); err != nil { - // Since we stop fetcher while FetchChunks is running, we may not get everything back - // which requires the fetcher to fetch keys from storage, which is missing the keys - // so errors here is expected. Need to check the error because of the lint check. - require.NotNil(t, err) - } - }() - fetcher.Stop() - <-done -} - -type byExternalKey []chunk.Chunk - -func (a byExternalKey) Len() int { return len(a) } -func (a byExternalKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byExternalKey) Less(i, j int) bool { return a[i].ExternalKey() < a[j].ExternalKey() } - func testCacheMiss(t *testing.T, cache cache.Cache) { for i := 0; i < 100; i++ { key := strconv.Itoa(rand.Int()) // arbitrary key which should fail: no chunk key is a single integer @@ -164,24 +67,16 @@ func testCacheMiss(t *testing.T, cache cache.Cache) { } func testCache(t *testing.T, cache cache.Cache) { - keys, chunks := fillCache(t, cache) + keys, bufs := fillCache(t, cache) t.Run("Single", func(t *testing.T) { - testCacheSingle(t, cache, keys, chunks) + testCacheSingle(t, cache, keys, bufs) }) t.Run("Multiple", func(t *testing.T) { - testCacheMultiple(t, cache, keys, chunks) + testCacheMultiple(t, cache, keys, bufs) }) t.Run("Miss", func(t *testing.T) { testCacheMiss(t, cache) }) - t.Run("Fetcher", func(t *testing.T) { - testChunkFetcher(t, cache, keys, chunks) - }) - t.Run("FetcherStop", func(t *testing.T) { - // Refill the cache to avoid nil pointer error during fetch for getting missing keys from storage - keys, chunks = fillCache(t, cache) - testChunkFetcherStop(t, cache, keys, chunks) - }) } func TestMemcache(t *testing.T) { diff --git a/internal/cortex/chunk/cache/memcached.go b/internal/cortex/chunk/cache/memcached.go index 651e288278..8328e4889f 100644 --- a/internal/cortex/chunk/cache/memcached.go +++ b/internal/cortex/chunk/cache/memcached.go @@ -147,7 +147,6 @@ func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, b }) return } - func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { var items map[string]*memcache.Item const method = "Memcache.GetMulti" diff --git a/internal/cortex/chunk/cache/memcached_client.go b/internal/cortex/chunk/cache/memcached_client.go index 11295a9aff..bcae408bac 100644 --- a/internal/cortex/chunk/cache/memcached_client.go +++ b/internal/cortex/chunk/cache/memcached_client.go @@ -16,13 +16,11 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/sony/gobreaker" - "github.com/thanos-io/thanos/pkg/discovery/dns" - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" + "github.com/thanos-io/thanos/pkg/discovery/dns" ) // MemcachedClient interface exists for mocking memcacheClient. @@ -192,34 +190,6 @@ func (c *memcachedClient) dialViaCircuitBreaker(network, address string, timeout return conn.(net.Conn), nil } -// Stop the memcache client. -func (c *memcachedClient) Stop() { - close(c.quit) - c.wait.Wait() -} - -func (c *memcachedClient) Set(item *memcache.Item) error { - // Skip hitting memcached at all if the item is bigger than the max allowed size. - if c.maxItemSize > 0 && len(item.Value) > c.maxItemSize { - c.skipped.Inc() - return nil - } - - err := c.Client.Set(item) - if err == nil { - return nil - } - - // Inject the server address in order to have more information about which memcached - // backend server failed. This is a best effort. - addr, addrErr := c.serverList.PickServer(item.Key) - if addrErr != nil { - return err - } - - return errors.Wrapf(err, "server=%s", addr) -} - func (c *memcachedClient) updateLoop(updateInterval time.Duration) { defer c.wait.Done() ticker := time.NewTicker(updateInterval) diff --git a/internal/cortex/chunk/cache/memcached_client_selector_test.go b/internal/cortex/chunk/cache/memcached_client_selector_test.go deleted file mode 100644 index f024e5752b..0000000000 --- a/internal/cortex/chunk/cache/memcached_client_selector_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package cache_test - -import ( - "fmt" - "testing" - - "github.com/bradfitz/gomemcache/memcache" - "github.com/facette/natsort" - "github.com/stretchr/testify/require" - - "github.com/thanos-io/thanos/internal/cortex/chunk/cache" -) - -func TestNatSort(t *testing.T) { - // Validate that the order of SRV records returned by a DNS - // lookup for a k8s StatefulSet are ordered as expected when - // a natsort is done. - input := []string{ - "memcached-10.memcached.cortex.svc.cluster.local.", - "memcached-1.memcached.cortex.svc.cluster.local.", - "memcached-6.memcached.cortex.svc.cluster.local.", - "memcached-3.memcached.cortex.svc.cluster.local.", - "memcached-25.memcached.cortex.svc.cluster.local.", - } - - expected := []string{ - "memcached-1.memcached.cortex.svc.cluster.local.", - "memcached-3.memcached.cortex.svc.cluster.local.", - "memcached-6.memcached.cortex.svc.cluster.local.", - "memcached-10.memcached.cortex.svc.cluster.local.", - "memcached-25.memcached.cortex.svc.cluster.local.", - } - - natsort.Sort(input) - require.Equal(t, expected, input) -} - -func TestMemcachedJumpHashSelector_PickSever(t *testing.T) { - s := cache.MemcachedJumpHashSelector{} - err := s.SetServers("google.com:80", "microsoft.com:80", "duckduckgo.com:80") - require.NoError(t, err) - - // We store the string representation instead of the net.Addr - // to make sure different IPs were discovered during SetServers - distribution := make(map[string]int) - - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key-%d", i) - addr, err := s.PickServer(key) - require.NoError(t, err) - distribution[addr.String()]++ - } - - // All of the servers should have been returned at least - // once - require.Len(t, distribution, 3) - for _, v := range distribution { - require.NotZero(t, v) - } -} - -func TestMemcachedJumpHashSelector_PickSever_ErrNoServers(t *testing.T) { - s := cache.MemcachedJumpHashSelector{} - _, err := s.PickServer("foo") - require.Error(t, memcache.ErrNoServers, err) -} diff --git a/internal/cortex/chunk/cache/stop_once.go b/internal/cortex/chunk/cache/stop_once.go deleted file mode 100644 index 16e2fdad22..0000000000 --- a/internal/cortex/chunk/cache/stop_once.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package cache - -import "sync" - -type stopOnce struct { - once sync.Once - Cache -} - -// StopOnce wraps a Cache and ensures its only stopped once. -func StopOnce(cache Cache) Cache { - return &stopOnce{ - Cache: cache, - } -} - -func (s *stopOnce) Stop() { - s.once.Do(func() { - s.Cache.Stop() - }) -} diff --git a/internal/cortex/chunk/chunk.go b/internal/cortex/chunk/chunk.go deleted file mode 100644 index db286a4c22..0000000000 --- a/internal/cortex/chunk/chunk.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "strconv" - "strings" - "sync" - - "github.com/golang/snappy" - jsoniter "github.com/json-iterator/go" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - errs "github.com/weaveworks/common/errors" - - prom_chunk "github.com/thanos-io/thanos/internal/cortex/chunk/encoding" - "github.com/thanos-io/thanos/internal/cortex/prom1/storage/metric" -) - -const ( - ErrInvalidChecksum = errs.Error("invalid chunk checksum") - ErrWrongMetadata = errs.Error("wrong chunk metadata") - ErrMetadataLength = errs.Error("chunk metadata wrong length") - ErrDataLength = errs.Error("chunk data wrong length") - ErrSliceOutOfRange = errs.Error("chunk can't be sliced out of its data range") -) - -var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) - -func errInvalidChunkID(s string) error { - return errors.Errorf("invalid chunk ID %q", s) -} - -// Chunk contains encoded timeseries data -type Chunk struct { - // These two fields will be missing from older chunks (as will the hash). - // On fetch we will initialise these fields from the DynamoDB key. - Fingerprint model.Fingerprint `json:"fingerprint"` - UserID string `json:"userID"` - - // These fields will be in all chunks, including old ones. - From model.Time `json:"from"` - Through model.Time `json:"through"` - Metric labels.Labels `json:"metric"` - - // The hash is not written to the external storage either. We use - // crc32, Castagnoli table. See http://www.evanjones.ca/crc32c.html. - // For old chunks, ChecksumSet will be false. - ChecksumSet bool `json:"-"` - Checksum uint32 `json:"-"` - - // We never use Delta encoding (the zero value), so if this entry is - // missing, we default to DoubleDelta. - Encoding prom_chunk.Encoding `json:"encoding"` - Data prom_chunk.Chunk `json:"-"` - - // The encoded version of the chunk, held so we don't need to re-encode it - encoded []byte -} - -// NewChunk creates a new chunk -func NewChunk(userID string, fp model.Fingerprint, metric labels.Labels, c prom_chunk.Chunk, from, through model.Time) Chunk { - return Chunk{ - Fingerprint: fp, - UserID: userID, - From: from, - Through: through, - Metric: metric, - Encoding: c.Encoding(), - Data: c, - } -} - -// ParseExternalKey is used to construct a partially-populated chunk from the -// key in DynamoDB. This chunk can then be used to calculate the key needed -// to fetch the Chunk data from Memcache/S3, and then fully populate the chunk -// with decode(). -// -// Pre-checksums, the keys written to DynamoDB looked like -// `::` (aka the ID), and the key for -// memcache and S3 was `/::. -// Finger prints and times were written in base-10. -// -// Post-checksums, externals keys become the same across DynamoDB, Memcache -// and S3. Numbers become hex encoded. Keys look like: -// `/:::`. -func ParseExternalKey(userID, externalKey string) (Chunk, error) { - if !strings.Contains(externalKey, "/") { - return parseLegacyChunkID(userID, externalKey) - } - chunk, err := parseNewExternalKey(externalKey) - if err != nil { - return Chunk{}, err - } - if chunk.UserID != userID { - return Chunk{}, errors.WithStack(ErrWrongMetadata) - } - return chunk, nil -} - -func parseLegacyChunkID(userID, key string) (Chunk, error) { - parts := strings.Split(key, ":") - if len(parts) != 3 { - return Chunk{}, errInvalidChunkID(key) - } - fingerprint, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Chunk{}, err - } - from, err := strconv.ParseInt(parts[1], 10, 64) - if err != nil { - return Chunk{}, err - } - through, err := strconv.ParseInt(parts[2], 10, 64) - if err != nil { - return Chunk{}, err - } - return Chunk{ - UserID: userID, - Fingerprint: model.Fingerprint(fingerprint), - From: model.Time(from), - Through: model.Time(through), - }, nil -} - -func parseNewExternalKey(key string) (Chunk, error) { - parts := strings.Split(key, "/") - if len(parts) != 2 { - return Chunk{}, errInvalidChunkID(key) - } - userID := parts[0] - hexParts := strings.Split(parts[1], ":") - if len(hexParts) != 4 { - return Chunk{}, errInvalidChunkID(key) - } - fingerprint, err := strconv.ParseUint(hexParts[0], 16, 64) - if err != nil { - return Chunk{}, err - } - from, err := strconv.ParseInt(hexParts[1], 16, 64) - if err != nil { - return Chunk{}, err - } - through, err := strconv.ParseInt(hexParts[2], 16, 64) - if err != nil { - return Chunk{}, err - } - checksum, err := strconv.ParseUint(hexParts[3], 16, 32) - if err != nil { - return Chunk{}, err - } - return Chunk{ - UserID: userID, - Fingerprint: model.Fingerprint(fingerprint), - From: model.Time(from), - Through: model.Time(through), - Checksum: uint32(checksum), - ChecksumSet: true, - }, nil -} - -// ExternalKey returns the key you can use to fetch this chunk from external -// storage. For newer chunks, this key includes a checksum. -func (c *Chunk) ExternalKey() string { - // Some chunks have a checksum stored in dynamodb, some do not. We must - // generate keys appropriately. - if c.ChecksumSet { - // This is the inverse of parseNewExternalKey. - return fmt.Sprintf("%s/%x:%x:%x:%x", c.UserID, uint64(c.Fingerprint), int64(c.From), int64(c.Through), c.Checksum) - } - // This is the inverse of parseLegacyExternalKey, with "/" prepended. - // Legacy chunks had the user ID prefix on s3/memcache, but not in DynamoDB. - // See comment on parseExternalKey. - return fmt.Sprintf("%s/%d:%d:%d", c.UserID, uint64(c.Fingerprint), int64(c.From), int64(c.Through)) -} - -var writerPool = sync.Pool{ - New: func() interface{} { return snappy.NewBufferedWriter(nil) }, -} - -// Encode writes the chunk into a buffer, and calculates the checksum. -func (c *Chunk) Encode() error { - return c.EncodeTo(nil) -} - -// EncodeTo is like Encode but you can provide your own buffer to use. -func (c *Chunk) EncodeTo(buf *bytes.Buffer) error { - if buf == nil { - buf = bytes.NewBuffer(nil) - } - // Write 4 empty bytes first - we will come back and put the len in here. - metadataLenBytes := [4]byte{} - if _, err := buf.Write(metadataLenBytes[:]); err != nil { - return err - } - - // Encode chunk metadata into snappy-compressed buffer - writer := writerPool.Get().(*snappy.Writer) - defer writerPool.Put(writer) - writer.Reset(buf) - json := jsoniter.ConfigFastest - if err := json.NewEncoder(writer).Encode(c); err != nil { - return err - } - writer.Close() - - // Write the metadata length back at the start of the buffer. - // (note this length includes the 4 bytes for the length itself) - metadataLen := buf.Len() - binary.BigEndian.PutUint32(metadataLenBytes[:], uint32(metadataLen)) - copy(buf.Bytes(), metadataLenBytes[:]) - - // Write another 4 empty bytes - we will come back and put the len in here. - dataLenBytes := [4]byte{} - if _, err := buf.Write(dataLenBytes[:]); err != nil { - return err - } - - // And now the chunk data - if err := c.Data.Marshal(buf); err != nil { - return err - } - - // Now write the data len back into the buf. - binary.BigEndian.PutUint32(dataLenBytes[:], uint32(buf.Len()-metadataLen-4)) - copy(buf.Bytes()[metadataLen:], dataLenBytes[:]) - - // Now work out the checksum - c.encoded = buf.Bytes() - c.ChecksumSet = true - c.Checksum = crc32.Checksum(c.encoded, castagnoliTable) - return nil -} - -// Encoded returns the buffer created by Encoded() -func (c *Chunk) Encoded() ([]byte, error) { - if c.encoded == nil { - if err := c.Encode(); err != nil { - return nil, err - } - } - return c.encoded, nil -} - -// DecodeContext holds data that can be re-used between decodes of different chunks -type DecodeContext struct { - reader *snappy.Reader -} - -// NewDecodeContext creates a new, blank, DecodeContext -func NewDecodeContext() *DecodeContext { - return &DecodeContext{ - reader: snappy.NewReader(nil), - } -} - -// Decode the chunk from the given buffer, and confirm the chunk is the one we -// expected. -func (c *Chunk) Decode(decodeContext *DecodeContext, input []byte) error { - // First, calculate the checksum of the chunk and confirm it matches - // what we expected. - if c.ChecksumSet && c.Checksum != crc32.Checksum(input, castagnoliTable) { - return errors.WithStack(ErrInvalidChecksum) - } - - // Now unmarshal the chunk metadata. - r := bytes.NewReader(input) - var metadataLen uint32 - if err := binary.Read(r, binary.BigEndian, &metadataLen); err != nil { - return errors.Wrap(err, "when reading metadata length from chunk") - } - var tempMetadata Chunk - decodeContext.reader.Reset(r) - json := jsoniter.ConfigFastest - err := json.NewDecoder(decodeContext.reader).Decode(&tempMetadata) - if err != nil { - return errors.Wrap(err, "when decoding chunk metadata") - } - metadataRead := len(input) - r.Len() - // Older versions of Cortex included the initial length word; newer versions do not. - if !(metadataRead == int(metadataLen) || metadataRead == int(metadataLen)+4) { - return errors.Wrapf(ErrMetadataLength, "expected %d, got %d", metadataLen, metadataRead) - } - - // Next, confirm the chunks matches what we expected. Easiest way to do this - // is to compare what the decoded data thinks its external ID would be, but - // we don't write the checksum to s3, so we have to copy the checksum in. - if c.ChecksumSet { - tempMetadata.Checksum, tempMetadata.ChecksumSet = c.Checksum, c.ChecksumSet - if !equalByKey(*c, tempMetadata) { - return errors.WithStack(ErrWrongMetadata) - } - } - *c = tempMetadata - - // Older chunks always used DoubleDelta and did not write Encoding - // to JSON, so override if it has the zero value (Delta) - if c.Encoding == prom_chunk.Delta { - c.Encoding = prom_chunk.DoubleDelta - } - - // Finally, unmarshal the actual chunk data. - c.Data, err = prom_chunk.NewForEncoding(c.Encoding) - if err != nil { - return errors.Wrap(err, "when creating new chunk") - } - - var dataLen uint32 - if err := binary.Read(r, binary.BigEndian, &dataLen); err != nil { - return errors.Wrap(err, "when reading data length from chunk") - } - - c.encoded = input - remainingData := input[len(input)-r.Len():] - if int(dataLen) != len(remainingData) { - return ErrDataLength - } - - return c.Data.UnmarshalFromBuf(remainingData[:int(dataLen)]) -} - -func equalByKey(a, b Chunk) bool { - return a.UserID == b.UserID && a.Fingerprint == b.Fingerprint && - a.From == b.From && a.Through == b.Through && a.Checksum == b.Checksum -} - -// Samples returns all SamplePairs for the chunk. -func (c *Chunk) Samples(from, through model.Time) ([]model.SamplePair, error) { - it := c.Data.NewIterator(nil) - interval := metric.Interval{OldestInclusive: from, NewestInclusive: through} - return prom_chunk.RangeValues(it, interval) -} - -// Slice builds a new smaller chunk with data only from given time range (inclusive) -func (c *Chunk) Slice(from, through model.Time) (*Chunk, error) { - // there should be atleast some overlap between chunk interval and slice interval - if from > c.Through || through < c.From { - return nil, ErrSliceOutOfRange - } - - pc, err := c.Data.Rebound(from, through) - if err != nil { - return nil, err - } - - nc := NewChunk(c.UserID, c.Fingerprint, c.Metric, pc, from, through) - return &nc, nil -} - -func intervalsOverlap(interval1, interval2 model.Interval) bool { - if interval1.Start > interval2.End || interval2.Start > interval1.End { - return false - } - - return true -} diff --git a/internal/cortex/chunk/chunk_store.go b/internal/cortex/chunk/chunk_store.go deleted file mode 100644 index 3a626a99af..0000000000 --- a/internal/cortex/chunk/chunk_store.go +++ /dev/null @@ -1,717 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" - "flag" - "fmt" - "sort" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/thanos-io/thanos/internal/cortex/chunk/cache" - "github.com/thanos-io/thanos/internal/cortex/chunk/encoding" - "github.com/thanos-io/thanos/internal/cortex/util" - "github.com/thanos-io/thanos/internal/cortex/util/extract" - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" - "github.com/thanos-io/thanos/internal/cortex/util/spanlogger" - "github.com/thanos-io/thanos/internal/cortex/util/validation" -) - -var ( - ErrQueryMustContainMetricName = QueryError("query must contain metric name") - ErrMetricNameLabelMissing = errors.New("metric name label missing") - ErrParialDeleteChunkNoOverlap = errors.New("interval for partial deletion has not overlap with chunk interval") - - indexEntriesPerChunk = promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "chunk_store_index_entries_per_chunk", - Help: "Number of entries written to storage per chunk.", - Buckets: prometheus.ExponentialBuckets(1, 2, 5), - }) - cacheCorrupt = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "cache_corrupt_chunks_total", - Help: "Total count of corrupt chunks found in cache.", - }) -) - -// Query errors are to be treated as user errors, rather than storage errors. -type QueryError string - -func (e QueryError) Error() string { - return string(e) -} - -// StoreConfig specifies config for a ChunkStore -type StoreConfig struct { - ChunkCacheConfig cache.Config `yaml:"chunk_cache_config"` - WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config"` - - CacheLookupsOlderThan model.Duration `yaml:"cache_lookups_older_than"` - - // Not visible in yaml because the setting shouldn't be common between ingesters and queriers. - // This exists in case we don't want to cache all the chunks but still want to take advantage of - // ingester chunk write deduplication. But for the queriers we need the full value. So when this option - // is set, use different caches for ingesters and queriers. - chunkCacheStubs bool // don't write the full chunk to cache, just a stub entry - - // When DisableIndexDeduplication is true and chunk is already there in cache, only index would be written to the store and not chunk. - DisableIndexDeduplication bool `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("store.chunks-cache.", "Cache config for chunks. ", f) - f.BoolVar(&cfg.chunkCacheStubs, "store.chunks-cache.cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.") - cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f) - - f.Var(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", "Cache index entries older than this period. 0 to disable.") -} - -// Validate validates the store config. -func (cfg *StoreConfig) Validate(logger log.Logger) error { - if err := cfg.ChunkCacheConfig.Validate(); err != nil { - return err - } - if err := cfg.WriteDedupeCacheConfig.Validate(); err != nil { - return err - } - return nil -} - -type baseStore struct { - cfg StoreConfig - - index IndexClient - chunks Client - schema BaseSchema - limits StoreLimits - fetcher *Fetcher -} - -func newBaseStore(cfg StoreConfig, schema BaseSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (baseStore, error) { - fetcher, err := NewChunkFetcher(chunksCache, cfg.chunkCacheStubs, chunks) - if err != nil { - return baseStore{}, err - } - - return baseStore{ - cfg: cfg, - index: index, - chunks: chunks, - schema: schema, - limits: limits, - fetcher: fetcher, - }, nil -} - -// Stop any background goroutines (ie in the cache.) -func (c *baseStore) Stop() { - c.fetcher.storage.Stop() - c.fetcher.Stop() - c.index.Stop() -} - -// store implements Store -type store struct { - baseStore - schema StoreSchema -} - -func newStore(cfg StoreConfig, schema StoreSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (Store, error) { - rs, err := newBaseStore(cfg, schema, index, chunks, limits, chunksCache) - if err != nil { - return nil, err - } - - return &store{ - baseStore: rs, - schema: schema, - }, nil -} - -// Put implements Store -func (c *store) Put(ctx context.Context, chunks []Chunk) error { - for _, chunk := range chunks { - if err := c.PutOne(ctx, chunk.From, chunk.Through, chunk); err != nil { - return err - } - } - return nil -} - -// PutOne implements Store -func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error { - log, ctx := spanlogger.New(ctx, "ChunkStore.PutOne") - defer log.Finish() - chunks := []Chunk{chunk} - - err := c.fetcher.storage.PutChunks(ctx, chunks) - if err != nil { - return err - } - - if cacheErr := c.fetcher.writeBackCache(ctx, chunks); cacheErr != nil { - level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr) - } - - writeReqs, err := c.calculateIndexEntries(chunk.UserID, from, through, chunk) - if err != nil { - return err - } - - return c.index.BatchWrite(ctx, writeReqs) -} - -// calculateIndexEntries creates a set of batched WriteRequests for all the chunks it is given. -func (c *store) calculateIndexEntries(userID string, from, through model.Time, chunk Chunk) (WriteBatch, error) { - seenIndexEntries := map[string]struct{}{} - - metricName := chunk.Metric.Get(labels.MetricName) - if metricName == "" { - return nil, ErrMetricNameLabelMissing - } - - entries, err := c.schema.GetWriteEntries(from, through, userID, metricName, chunk.Metric, chunk.ExternalKey()) - if err != nil { - return nil, err - } - indexEntriesPerChunk.Observe(float64(len(entries))) - - // Remove duplicate entries based on tableName:hashValue:rangeValue - result := c.index.NewWriteBatch() - for _, entry := range entries { - key := fmt.Sprintf("%s:%s:%x", entry.TableName, entry.HashValue, entry.RangeValue) - if _, ok := seenIndexEntries[key]; !ok { - seenIndexEntries[key] = struct{}{} - result.Add(entry.TableName, entry.HashValue, entry.RangeValue, entry.Value) - } - } - return result, nil -} - -// Get implements Store -func (c *store) Get(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.Get") - defer log.Span.Finish() - level.Debug(log).Log("from", from, "through", through, "matchers", len(allMatchers)) - - // Validate the query is within reasonable bounds. - metricName, matchers, shortcut, err := c.validateQuery(ctx, userID, &from, &through, allMatchers) - if err != nil { - return nil, err - } else if shortcut { - return nil, nil - } - - log.Span.SetTag("metric", metricName) - return c.getMetricNameChunks(ctx, userID, from, through, matchers, metricName) -} - -func (c *store) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { - return nil, nil, errors.New("not implemented") -} - -// LabelValuesForMetricName retrieves all label values for a single label name and metric name. -func (c *baseStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName, labelName string) ([]string, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues") - defer log.Span.Finish() - level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "labelName", labelName) - - shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through) - if err != nil { - return nil, err - } else if shortcut { - return nil, nil - } - - queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) - if err != nil { - return nil, err - } - - entries, err := c.lookupEntriesByQueries(ctx, queries) - if err != nil { - return nil, err - } - - var result UniqueStrings - for _, entry := range entries { - _, labelValue, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) - if err != nil { - return nil, err - } - result.Add(string(labelValue)) - } - return result.Strings(), nil -} - -// LabelNamesForMetricName retrieves all label names for a metric name. -func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.LabelNamesForMetricName") - defer log.Span.Finish() - level.Debug(log).Log("from", from, "through", through, "metricName", metricName) - - shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through) - if err != nil { - return nil, err - } else if shortcut { - return nil, nil - } - - chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, nil, metricName) - if err != nil { - return nil, err - } - level.Debug(log).Log("msg", "Chunks in index", "chunks", len(chunks)) - - // Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint - filtered := filterChunksByTime(from, through, chunks) - filtered, keys := filterChunksByUniqueFingerprint(filtered) - level.Debug(log).Log("msg", "Chunks post filtering", "chunks", len(chunks)) - - // Now fetch the actual chunk data from Memcache / S3 - allChunks, err := c.fetcher.FetchChunks(ctx, filtered, keys) - if err != nil { - level.Error(log).Log("msg", "FetchChunks", "err", err) - return nil, err - } - return labelNamesFromChunks(allChunks), nil -} - -func (c *baseStore) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) { - //nolint:ineffassign,staticcheck //Leaving ctx even though we don't currently use it, we want to make it available for when we might need it and hopefully will ensure us using the correct context at that time - log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange") - defer log.Span.Finish() - - if *through < *from { - return false, QueryError(fmt.Sprintf("invalid query, through < from (%s < %s)", through, from)) - } - - maxQueryLength := c.limits.MaxQueryLength(userID) - if maxQueryLength > 0 && (*through).Sub(*from) > maxQueryLength { - return false, QueryError(fmt.Sprintf(validation.ErrQueryTooLong, (*through).Sub(*from), maxQueryLength)) - } - - now := model.Now() - - if from.After(now) { - // time-span start is in future ... regard as legal - level.Info(log).Log("msg", "whole timerange in future, yield empty resultset", "through", through, "from", from, "now", now) - return true, nil - } - - if through.After(now.Add(5 * time.Minute)) { - // time-span end is in future ... regard as legal - level.Info(log).Log("msg", "adjusting end timerange from future to now", "old_through", through, "new_through", now) - *through = now // Avoid processing future part - otherwise some schemas could fail with eg non-existent table gripes - } - - return false, nil -} - -func (c *baseStore) validateQuery(ctx context.Context, userID string, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { - log, ctx := spanlogger.New(ctx, "store.validateQuery") - defer log.Span.Finish() - - shortcut, err := c.validateQueryTimeRange(ctx, userID, from, through) - if err != nil { - return "", nil, false, err - } - if shortcut { - return "", nil, true, nil - } - - // Check there is a metric name matcher of type equal, - metricNameMatcher, matchers, ok := extract.MetricNameMatcherFromMatchers(matchers) - if !ok || metricNameMatcher.Type != labels.MatchEqual { - return "", nil, false, ErrQueryMustContainMetricName - } - - return metricNameMatcher.Value, matchers, false, nil -} - -func (c *store) getMetricNameChunks(ctx context.Context, userID string, from, through model.Time, allMatchers []*labels.Matcher, metricName string) ([]Chunk, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.getMetricNameChunks") - defer log.Finish() - level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "matchers", len(allMatchers)) - - filters, matchers := util.SplitFiltersAndMatchers(allMatchers) - chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, matchers, metricName) - if err != nil { - return nil, err - } - level.Debug(log).Log("Chunks in index", len(chunks)) - - // Filter out chunks that are not in the selected time range. - filtered := filterChunksByTime(from, through, chunks) - level.Debug(log).Log("Chunks post filtering", len(chunks)) - - maxChunksPerQuery := c.limits.MaxChunksPerQueryFromStore(userID) - if maxChunksPerQuery > 0 && len(filtered) > maxChunksPerQuery { - err := QueryError(fmt.Sprintf("Query %v fetched too many chunks (%d > %d)", allMatchers, len(filtered), maxChunksPerQuery)) - level.Error(log).Log("err", err) - return nil, err - } - - // Now fetch the actual chunk data from Memcache / S3 - keys := keysFromChunks(filtered) - allChunks, err := c.fetcher.FetchChunks(ctx, filtered, keys) - if err != nil { - return nil, err - } - - // Filter out chunks based on the empty matchers in the query. - filteredChunks := filterChunksByMatchers(allChunks, filters) - return filteredChunks, nil -} - -func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, from, through model.Time, matchers []*labels.Matcher, metricName string) ([]Chunk, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.lookupChunksByMetricName") - defer log.Finish() - - // Just get chunks for metric if there are no matchers - if len(matchers) == 0 { - queries, err := c.schema.GetReadQueriesForMetric(from, through, userID, metricName) - if err != nil { - return nil, err - } - level.Debug(log).Log("queries", len(queries)) - - entries, err := c.lookupEntriesByQueries(ctx, queries) - if err != nil { - return nil, err - } - level.Debug(log).Log("entries", len(entries)) - - chunkIDs, err := c.parseIndexEntries(ctx, entries, nil) - if err != nil { - return nil, err - } - level.Debug(log).Log("chunkIDs", len(chunkIDs)) - - return c.convertChunkIDsToChunks(ctx, userID, chunkIDs) - } - - // Otherwise get chunks which include other matchers - incomingChunkIDs := make(chan []string) - incomingErrors := make(chan error) - for _, matcher := range matchers { - go func(matcher *labels.Matcher) { - chunkIDs, err := c.lookupIdsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, nil) - if err != nil { - incomingErrors <- err - } else { - incomingChunkIDs <- chunkIDs - } - }(matcher) - } - - // Receive chunkSets from all matchers - var chunkIDs []string - var lastErr error - var initialized bool - for i := 0; i < len(matchers); i++ { - select { - case incoming := <-incomingChunkIDs: - if !initialized { - chunkIDs = incoming - initialized = true - } else { - chunkIDs = intersectStrings(chunkIDs, incoming) - } - case err := <-incomingErrors: - lastErr = err - } - } - if lastErr != nil { - return nil, lastErr - } - level.Debug(log).Log("msg", "post intersection", "chunkIDs", len(chunkIDs)) - - // Convert IndexEntry's into chunks - return c.convertChunkIDsToChunks(ctx, userID, chunkIDs) -} - -func (c *baseStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]IndexQuery) []IndexQuery) ([]string, error) { - formattedMatcher := formatMatcher(matcher) - log, ctx := spanlogger.New(ctx, "Store.lookupIdsByMetricNameMatcher", "metricName", metricName, "matcher", formattedMatcher) - defer log.Span.Finish() - - var err error - var queries []IndexQuery - var labelName string - if matcher == nil { - queries, err = c.schema.GetReadQueriesForMetric(from, through, userID, metricName) - } else if matcher.Type == labels.MatchEqual { - labelName = matcher.Name - queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value) - } else { - labelName = matcher.Name - queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name) - } - if err != nil { - return nil, err - } - level.Debug(log).Log("matcher", formattedMatcher, "queries", len(queries)) - - if filter != nil { - queries = filter(queries) - level.Debug(log).Log("matcher", formattedMatcher, "filteredQueries", len(queries)) - } - - entries, err := c.lookupEntriesByQueries(ctx, queries) - if e, ok := err.(CardinalityExceededError); ok { - e.MetricName = metricName - e.LabelName = labelName - return nil, e - } else if err != nil { - return nil, err - } - level.Debug(log).Log("matcher", formattedMatcher, "entries", len(entries)) - - ids, err := c.parseIndexEntries(ctx, entries, matcher) - if err != nil { - return nil, err - } - level.Debug(log).Log("matcher", formattedMatcher, "ids", len(ids)) - - return ids, nil -} - -// Using this function avoids logging of nil matcher, which works, but indirectly via panic and recover. -// That confuses attached debugger, which wants to breakpoint on each panic. -// Using simple check is also faster. -func formatMatcher(matcher *labels.Matcher) string { - if matcher == nil { - return "nil" - } - return matcher.String() -} - -func (c *baseStore) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) { - log, ctx := spanlogger.New(ctx, "store.lookupEntriesByQueries") - defer log.Span.Finish() - - // Nothing to do if there are no queries. - if len(queries) == 0 { - return nil, nil - } - - var lock sync.Mutex - var entries []IndexEntry - err := c.index.QueryPages(ctx, queries, func(query IndexQuery, resp ReadBatch) bool { - iter := resp.Iterator() - lock.Lock() - for iter.Next() { - entries = append(entries, IndexEntry{ - TableName: query.TableName, - HashValue: query.HashValue, - RangeValue: iter.RangeValue(), - Value: iter.Value(), - }) - } - lock.Unlock() - return true - }) - if err != nil { - level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "error querying storage", "err", err) - } - return entries, err -} - -func (c *baseStore) parseIndexEntries(_ context.Context, entries []IndexEntry, matcher *labels.Matcher) ([]string, error) { - // Nothing to do if there are no entries. - if len(entries) == 0 { - return nil, nil - } - - matchSet := map[string]struct{}{} - if matcher != nil && matcher.Type == labels.MatchRegexp { - set := FindSetMatches(matcher.Value) - for _, v := range set { - matchSet[v] = struct{}{} - } - } - - result := make([]string, 0, len(entries)) - for _, entry := range entries { - chunkKey, labelValue, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) - if err != nil { - return nil, err - } - - // If the matcher is like a set (=~"a|b|c|d|...") and - // the label value is not in that set move on. - if len(matchSet) > 0 { - if _, ok := matchSet[string(labelValue)]; !ok { - continue - } - - // If its in the set, then add it to set, we don't need to run - // matcher on it again. - result = append(result, chunkKey) - continue - } - - if matcher != nil && !matcher.Matches(string(labelValue)) { - continue - } - result = append(result, chunkKey) - } - // Return ids sorted and deduped because they will be merged with other sets. - sort.Strings(result) - result = uniqueStrings(result) - return result, nil -} - -func (c *baseStore) convertChunkIDsToChunks(ctx context.Context, userID string, chunkIDs []string) ([]Chunk, error) { - chunkSet := make([]Chunk, 0, len(chunkIDs)) - for _, chunkID := range chunkIDs { - chunk, err := ParseExternalKey(userID, chunkID) - if err != nil { - return nil, err - } - chunkSet = append(chunkSet, chunk) - } - - return chunkSet, nil -} - -func (c *store) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error { - metricName := metric.Get(model.MetricNameLabel) - if metricName == "" { - return ErrMetricNameLabelMissing - } - - chunkWriteEntries, err := c.schema.GetWriteEntries(from, through, userID, string(metricName), metric, chunkID) - if err != nil { - return errors.Wrapf(err, "when getting index entries to delete for chunkID=%s", chunkID) - } - - return c.deleteChunk(ctx, userID, chunkID, metric, chunkWriteEntries, partiallyDeletedInterval, func(chunk Chunk) error { - return c.PutOne(ctx, chunk.From, chunk.Through, chunk) - }) -} - -func (c *baseStore) deleteChunk(ctx context.Context, - userID string, - chunkID string, - metric labels.Labels, - chunkWriteEntries []IndexEntry, - partiallyDeletedInterval *model.Interval, - putChunkFunc func(chunk Chunk) error) error { - - metricName := metric.Get(model.MetricNameLabel) - if metricName == "" { - return ErrMetricNameLabelMissing - } - - // if chunk is partially deleted, fetch it, slice non-deleted portion and put it to store before deleting original chunk - if partiallyDeletedInterval != nil { - err := c.reboundChunk(ctx, userID, chunkID, *partiallyDeletedInterval, putChunkFunc) - if err != nil { - return errors.Wrapf(err, "chunkID=%s", chunkID) - } - } - - batch := c.index.NewWriteBatch() - for i := range chunkWriteEntries { - batch.Delete(chunkWriteEntries[i].TableName, chunkWriteEntries[i].HashValue, chunkWriteEntries[i].RangeValue) - } - - err := c.index.BatchWrite(ctx, batch) - if err != nil { - return errors.Wrapf(err, "when deleting index entries for chunkID=%s", chunkID) - } - - err = c.chunks.DeleteChunk(ctx, userID, chunkID) - if err != nil { - if err == ErrStorageObjectNotFound { - return nil - } - return errors.Wrapf(err, "when deleting chunk from storage with chunkID=%s", chunkID) - } - - return nil -} - -func (c *baseStore) reboundChunk(ctx context.Context, userID, chunkID string, partiallyDeletedInterval model.Interval, putChunkFunc func(chunk Chunk) error) error { - chunk, err := ParseExternalKey(userID, chunkID) - if err != nil { - return errors.Wrap(err, "when parsing external key") - } - - if !intervalsOverlap(model.Interval{Start: chunk.From, End: chunk.Through}, partiallyDeletedInterval) { - return ErrParialDeleteChunkNoOverlap - } - - chunks, err := c.fetcher.FetchChunks(ctx, []Chunk{chunk}, []string{chunkID}) - if err != nil { - if err == ErrStorageObjectNotFound { - return nil - } - return errors.Wrap(err, "when fetching chunk from storage for slicing") - } - - if len(chunks) != 1 { - return fmt.Errorf("expected to get 1 chunk from storage got %d instead", len(chunks)) - } - - chunk = chunks[0] - var newChunks []*Chunk - if partiallyDeletedInterval.Start > chunk.From { - newChunk, err := chunk.Slice(chunk.From, partiallyDeletedInterval.Start-1) - if err != nil && err != encoding.ErrSliceNoDataInRange { - return errors.Wrapf(err, "when slicing chunk for interval %d - %d", chunk.From, partiallyDeletedInterval.Start-1) - } - - if newChunk != nil { - newChunks = append(newChunks, newChunk) - } - } - - if partiallyDeletedInterval.End < chunk.Through { - newChunk, err := chunk.Slice(partiallyDeletedInterval.End+1, chunk.Through) - if err != nil && err != encoding.ErrSliceNoDataInRange { - return errors.Wrapf(err, "when slicing chunk for interval %d - %d", partiallyDeletedInterval.End+1, chunk.Through) - } - - if newChunk != nil { - newChunks = append(newChunks, newChunk) - } - } - - for _, newChunk := range newChunks { - if err := newChunk.Encode(); err != nil { - return errors.Wrapf(err, "when encoding new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through) - } - - err = putChunkFunc(*newChunk) - if err != nil { - return errors.Wrapf(err, "when putting new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through) - } - } - - return nil -} - -func (c *store) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { - // SeriesID is something which is only used in SeriesStore so we need not do anything here - return nil -} - -func (c *baseStore) GetChunkFetcher(_ model.Time) *Fetcher { - return c.fetcher -} diff --git a/internal/cortex/chunk/chunk_store_test.go b/internal/cortex/chunk/chunk_store_test.go deleted file mode 100644 index 3bc9ce5e20..0000000000 --- a/internal/cortex/chunk/chunk_store_test.go +++ /dev/null @@ -1,1153 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" - "fmt" - "math/rand" - "reflect" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/weaveworks/common/test" - - "github.com/thanos-io/thanos/internal/cortex/chunk/cache" - "github.com/thanos-io/thanos/internal/cortex/chunk/encoding" - "github.com/thanos-io/thanos/internal/cortex/util/flagext" - "github.com/thanos-io/thanos/internal/cortex/util/validation" -) - -type configFactory func() StoreConfig - -var seriesStoreSchemas = []string{"v9", "v10", "v11"} - -var schemas = append([]string{"v1", "v2", "v3", "v4", "v5", "v6"}, seriesStoreSchemas...) - -var stores = []struct { - name string - configFn configFactory -}{ - { - name: "store", - configFn: func() StoreConfig { - var storeCfg StoreConfig - flagext.DefaultValues(&storeCfg) - return storeCfg - }, - }, - { - name: "cached_store", - configFn: func() StoreConfig { - var storeCfg StoreConfig - flagext.DefaultValues(&storeCfg) - storeCfg.WriteDedupeCacheConfig.Cache = cache.NewFifoCache("test", cache.FifoCacheConfig{ - MaxSizeItems: 500, - }, prometheus.NewRegistry(), log.NewNopLogger()) - return storeCfg - }, - }, -} - -// newTestStore creates a new Store for testing. -func newTestChunkStore(t require.TestingT, schemaName string) Store { - var storeCfg StoreConfig - flagext.DefaultValues(&storeCfg) - return newTestChunkStoreConfig(t, schemaName, storeCfg) -} - -func newTestChunkStoreConfig(t require.TestingT, schemaName string, storeCfg StoreConfig) Store { - schemaCfg := DefaultSchemaConfig("", schemaName, 0) - - schema, err := schemaCfg.Configs[0].CreateSchema() - require.NoError(t, err) - - return newTestChunkStoreConfigWithMockStorage(t, schemaCfg, schema, storeCfg) -} - -func newTestChunkStoreConfigWithMockStorage(t require.TestingT, schemaCfg SchemaConfig, schema BaseSchema, storeCfg StoreConfig) Store { - var tbmConfig TableManagerConfig - err := schemaCfg.Validate() - require.NoError(t, err) - flagext.DefaultValues(&tbmConfig) - storage := NewMockStorage() - tableManager, err := NewTableManager(tbmConfig, schemaCfg, maxChunkAge, storage, nil, nil, nil) - require.NoError(t, err) - - err = tableManager.SyncTables(context.Background()) - require.NoError(t, err) - - var limits validation.Limits - flagext.DefaultValues(&limits) - limits.MaxQueryLength = model.Duration(30 * 24 * time.Hour) - overrides, err := validation.NewOverrides(limits, nil) - require.NoError(t, err) - - reg := prometheus.NewRegistry() - logger := log.NewNopLogger() - chunksCache, err := cache.New(storeCfg.ChunkCacheConfig, reg, logger) - require.NoError(t, err) - writeDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig, reg, logger) - require.NoError(t, err) - - store := NewCompositeStore(nil) - err = store.addSchema(storeCfg, schema, schemaCfg.Configs[0].From.Time, storage, storage, overrides, chunksCache, writeDedupeCache) - require.NoError(t, err) - return store -} - -// TestChunkStore_Get tests results are returned correctly depending on the type of query -func TestChunkStore_Get(t *testing.T) { - ctx := context.Background() - now := model.Now() - - fooMetric1 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - {Name: "flip", Value: "flop"}, - {Name: "toms", Value: "code"}, - } - fooMetric2 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "beep"}, - {Name: "toms", Value: "code"}, - } - - // barMetric1 is a subset of barMetric2 to test over-matching bug. - barMetric1 := labels.Labels{ - {Name: labels.MetricName, Value: "bar"}, - {Name: "bar", Value: "baz"}, - } - barMetric2 := labels.Labels{ - {Name: labels.MetricName, Value: "bar"}, - {Name: "bar", Value: "baz"}, - {Name: "toms", Value: "code"}, - } - - fooChunk1 := dummyChunkFor(now, fooMetric1) - fooChunk2 := dummyChunkFor(now, fooMetric2) - - barChunk1 := dummyChunkFor(now, barMetric1) - barChunk2 := dummyChunkFor(now, barMetric2) - - testCases := []struct { - query string - expect []Chunk - err string - }{ - { - query: `foo`, - expect: []Chunk{fooChunk1, fooChunk2}, - }, - { - query: `foo{flip=""}`, - expect: []Chunk{fooChunk2}, - }, - { - query: `foo{bar="baz"}`, - expect: []Chunk{fooChunk1}, - }, - { - query: `foo{bar="beep"}`, - expect: []Chunk{fooChunk2}, - }, - { - query: `foo{toms="code"}`, - expect: []Chunk{fooChunk1, fooChunk2}, - }, - { - query: `foo{bar!="baz"}`, - expect: []Chunk{fooChunk2}, - }, - { - query: `foo{bar=~"beep|baz"}`, - expect: []Chunk{fooChunk1, fooChunk2}, - }, - { - query: `foo{toms="code", bar=~"beep|baz"}`, - expect: []Chunk{fooChunk1, fooChunk2}, - }, - { - query: `foo{toms="code", bar="baz"}`, - expect: []Chunk{fooChunk1}, - }, - { - query: `foo{a="b", bar="baz"}`, - expect: nil, - }, - { - query: `{__name__=~"foo"}`, - err: "query must contain metric name", - }, - } - for _, schema := range schemas { - for _, storeCase := range stores { - storeCfg := storeCase.configFn() - store := newTestChunkStoreConfig(t, schema, storeCfg) - defer store.Stop() - - if err := store.Put(ctx, []Chunk{ - fooChunk1, - fooChunk2, - barChunk1, - barChunk2, - }); err != nil { - t.Fatal(err) - } - - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s / %s / %s", tc.query, schema, storeCase.name), func(t *testing.T) { - t.Log("========= Running query", tc.query, "with schema", schema) - matchers, err := parser.ParseMetricSelector(tc.query) - if err != nil { - t.Fatal(err) - } - - // Query with ordinary time-range - chunks1, err := store.Get(ctx, userID, now.Add(-time.Hour), now, matchers...) - if tc.err != "" { - require.Error(t, err) - require.Equal(t, tc.err, err.Error()) - return - } - require.NoError(t, err) - if !reflect.DeepEqual(tc.expect, chunks1) { - t.Fatalf("%s: wrong chunks - %s", tc.query, test.Diff(tc.expect, chunks1)) - } - - // Pushing end of time-range into future should yield exact same resultset - chunks2, err := store.Get(ctx, userID, now.Add(-time.Hour), now.Add(time.Hour*24*10), matchers...) - require.NoError(t, err) - if !reflect.DeepEqual(tc.expect, chunks2) { - t.Fatalf("%s: wrong chunks - %s", tc.query, test.Diff(tc.expect, chunks2)) - } - - // Query with both begin & end of time-range in future should yield empty resultset - chunks3, err := store.Get(ctx, userID, now.Add(time.Hour), now.Add(time.Hour*2), matchers...) - require.NoError(t, err) - if len(chunks3) != 0 { - t.Fatalf("%s: future query should yield empty resultset ... actually got %v chunks: %#v", - tc.query, len(chunks3), chunks3) - } - }) - } - } - } -} - -func TestChunkStore_LabelValuesForMetricName(t *testing.T) { - ctx := context.Background() - now := model.Now() - - fooMetric1 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - {Name: "flip", Value: "flop"}, - {Name: "toms", Value: "code"}, - } - fooMetric2 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "beep"}, - {Name: "toms", Value: "code"}, - } - fooMetric3 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "bop"}, - {Name: "flip", Value: "flap"}, - } - - // barMetric1 is a subset of barMetric2 to test over-matching bug. - barMetric1 := labels.Labels{ - {Name: labels.MetricName, Value: "bar"}, - {Name: "bar", Value: "baz"}, - } - barMetric2 := labels.Labels{ - {Name: labels.MetricName, Value: "bar"}, - {Name: "bar", Value: "baz"}, - {Name: "toms", Value: "code"}, - } - - fooChunk1 := dummyChunkFor(now, fooMetric1) - fooChunk2 := dummyChunkFor(now, fooMetric2) - fooChunk3 := dummyChunkFor(now, fooMetric3) - - barChunk1 := dummyChunkFor(now, barMetric1) - barChunk2 := dummyChunkFor(now, barMetric2) - - for _, tc := range []struct { - metricName, labelName string - expect []string - }{ - { - `foo`, `bar`, - []string{"baz", "beep", "bop"}, - }, - { - `bar`, `toms`, - []string{"code"}, - }, - { - `bar`, `bar`, - []string{"baz"}, - }, - { - `foo`, `foo`, - nil, - }, - { - `foo`, `flip`, - []string{"flap", "flop"}, - }, - } { - for _, schema := range schemas { - for _, storeCase := range stores { - t.Run(fmt.Sprintf("%s / %s / %s / %s", tc.metricName, tc.labelName, schema, storeCase.name), func(t *testing.T) { - t.Log("========= Running labelValues with metricName", tc.metricName, "with labelName", tc.labelName, "with schema", schema) - storeCfg := storeCase.configFn() - store := newTestChunkStoreConfig(t, schema, storeCfg) - defer store.Stop() - - if err := store.Put(ctx, []Chunk{ - fooChunk1, - fooChunk2, - fooChunk3, - barChunk1, - barChunk2, - }); err != nil { - t.Fatal(err) - } - - // Query with ordinary time-range - labelValues1, err := store.LabelValuesForMetricName(ctx, userID, now.Add(-time.Hour), now, tc.metricName, tc.labelName) - require.NoError(t, err) - - if !reflect.DeepEqual(tc.expect, labelValues1) { - t.Fatalf("%s/%s: wrong label values - %s", tc.metricName, tc.labelName, test.Diff(tc.expect, labelValues1)) - } - - // Pushing end of time-range into future should yield exact same resultset - labelValues2, err := store.LabelValuesForMetricName(ctx, userID, now.Add(-time.Hour), now.Add(time.Hour*24*10), tc.metricName, tc.labelName) - require.NoError(t, err) - - if !reflect.DeepEqual(tc.expect, labelValues2) { - t.Fatalf("%s/%s: wrong label values - %s", tc.metricName, tc.labelName, test.Diff(tc.expect, labelValues2)) - } - - // Query with both begin & end of time-range in future should yield empty resultset - labelValues3, err := store.LabelValuesForMetricName(ctx, userID, now.Add(time.Hour), now.Add(time.Hour*2), tc.metricName, tc.labelName) - require.NoError(t, err) - if len(labelValues3) != 0 { - t.Fatalf("%s/%s: future query should yield empty resultset ... actually got %v label values: %#v", - tc.metricName, tc.labelName, len(labelValues3), labelValues3) - } - }) - } - } - } - -} - -func TestChunkStore_LabelNamesForMetricName(t *testing.T) { - ctx := context.Background() - now := model.Now() - - fooMetric1 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - {Name: "flip", Value: "flop"}, - {Name: "toms", Value: "code"}, - } - fooMetric2 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "beep"}, - {Name: "toms", Value: "code"}, - } - fooMetric3 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "bop"}, - {Name: "flip", Value: "flap"}, - } - - // barMetric1 is a subset of barMetric2 to test over-matching bug. - barMetric1 := labels.Labels{ - {Name: labels.MetricName, Value: "bar"}, - {Name: "bar", Value: "baz"}, - } - barMetric2 := labels.Labels{ - {Name: labels.MetricName, Value: "bar"}, - {Name: "bar", Value: "baz"}, - {Name: "toms", Value: "code"}, - } - - fooChunk1 := dummyChunkFor(now, fooMetric1) - fooChunk2 := dummyChunkFor(now, fooMetric2) - fooChunk3 := dummyChunkFor(now, fooMetric3) - fooChunk4 := dummyChunkFor(now.Add(-time.Hour), fooMetric1) // same series but different chunk - - barChunk1 := dummyChunkFor(now, barMetric1) - barChunk2 := dummyChunkFor(now, barMetric2) - - for _, tc := range []struct { - metricName string - expect []string - }{ - { - `foo`, - []string{labels.MetricName, "bar", "flip", "toms"}, - }, - { - `bar`, - []string{labels.MetricName, "bar", "toms"}, - }, - } { - for _, schema := range schemas { - for _, storeCase := range stores { - t.Run(fmt.Sprintf("%s / %s / %s ", tc.metricName, schema, storeCase.name), func(t *testing.T) { - t.Log("========= Running labelNames with metricName", tc.metricName, "with schema", schema) - storeCfg := storeCase.configFn() - store := newTestChunkStoreConfig(t, schema, storeCfg) - defer store.Stop() - - if err := store.Put(ctx, []Chunk{ - fooChunk1, - fooChunk2, - fooChunk3, - fooChunk4, - barChunk1, - barChunk2, - }); err != nil { - t.Fatal(err) - } - - // Query with ordinary time-range - labelNames1, err := store.LabelNamesForMetricName(ctx, userID, now.Add(-time.Hour), now, tc.metricName) - require.NoError(t, err) - - if !reflect.DeepEqual(tc.expect, labelNames1) { - t.Fatalf("%s: wrong label name - %s", tc.metricName, test.Diff(tc.expect, labelNames1)) - } - - // Pushing end of time-range into future should yield exact same resultset - labelNames2, err := store.LabelNamesForMetricName(ctx, userID, now.Add(-time.Hour), now.Add(time.Hour*24*10), tc.metricName) - require.NoError(t, err) - - if !reflect.DeepEqual(tc.expect, labelNames2) { - t.Fatalf("%s: wrong label name - %s", tc.metricName, test.Diff(tc.expect, labelNames2)) - } - - // Query with both begin & end of time-range in future should yield empty resultset - labelNames3, err := store.LabelNamesForMetricName(ctx, userID, now.Add(time.Hour), now.Add(time.Hour*2), tc.metricName) - require.NoError(t, err) - if len(labelNames3) != 0 { - t.Fatalf("%s: future query should yield empty resultset ... actually got %v label names: %#v", - tc.metricName, len(labelNames3), labelNames3) - } - }) - } - } - } - -} - -// TestChunkStore_getMetricNameChunks tests if chunks are fetched correctly when we have the metric name -func TestChunkStore_getMetricNameChunks(t *testing.T) { - ctx := context.Background() - now := model.Now() - chunk1 := dummyChunkFor(now, labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - {Name: "flip", Value: "flop"}, - {Name: "toms", Value: "code"}, - }) - chunk2 := dummyChunkFor(now, labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "beep"}, - {Name: "toms", Value: "code"}, - }) - - testCases := []struct { - query string - expect []Chunk - }{ - { - `foo`, - []Chunk{chunk1, chunk2}, - }, - { - `foo{flip=""}`, - []Chunk{chunk2}, - }, - { - `foo{bar="baz"}`, - []Chunk{chunk1}, - }, - { - `foo{bar="beep"}`, - []Chunk{chunk2}, - }, - { - `foo{toms="code"}`, - []Chunk{chunk1, chunk2}, - }, - { - `foo{bar!="baz"}`, - []Chunk{chunk2}, - }, - { - `foo{bar=~"beep|baz"}`, - []Chunk{chunk1, chunk2}, - }, - { - `foo{bar=~"beeping|baz"}`, - []Chunk{chunk1}, - }, - { - `foo{toms="code", bar=~"beep|baz"}`, - []Chunk{chunk1, chunk2}, - }, - { - `foo{toms="code", bar="baz"}`, - []Chunk{chunk1}, - }, - } - for _, schema := range schemas { - for _, storeCase := range stores { - storeCfg := storeCase.configFn() - store := newTestChunkStoreConfig(t, schema, storeCfg) - defer store.Stop() - - if err := store.Put(ctx, []Chunk{chunk1, chunk2}); err != nil { - t.Fatal(err) - } - - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s / %s / %s", tc.query, schema, storeCase.name), func(t *testing.T) { - t.Log("========= Running query", tc.query, "with schema", schema) - matchers, err := parser.ParseMetricSelector(tc.query) - if err != nil { - t.Fatal(err) - } - - chunks, err := store.Get(ctx, userID, now.Add(-time.Hour), now, matchers...) - require.NoError(t, err) - - if !reflect.DeepEqual(tc.expect, chunks) { - t.Fatalf("%s: wrong chunks - %s", tc.query, test.Diff(tc.expect, chunks)) - } - }) - } - } - } -} - -func mustNewLabelMatcher(matchType labels.MatchType, name string, value string) *labels.Matcher { - return labels.MustNewMatcher(matchType, name, value) -} - -func TestChunkStoreRandom(t *testing.T) { - ctx := context.Background() - - for _, schema := range schemas { - t.Run(schema, func(t *testing.T) { - store := newTestChunkStore(t, schema) - defer store.Stop() - - // put 100 chunks from 0 to 99 - const chunkLen = 2 * 3600 // in seconds - for i := 0; i < 100; i++ { - ts := model.TimeFromUnix(int64(i * chunkLen)) - ch := encoding.New() - nc, err := ch.Add(model.SamplePair{ - Timestamp: ts, - Value: model.SampleValue(float64(i)), - }) - require.NoError(t, err) - require.Nil(t, nc) - chunk := NewChunk( - userID, - model.Fingerprint(1), - labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - }, - ch, - ts, - ts.Add(chunkLen*time.Second).Add(-1*time.Second), - ) - err = chunk.Encode() - require.NoError(t, err) - err = store.Put(ctx, []Chunk{chunk}) - require.NoError(t, err) - } - - // pick two random numbers and do a query - for i := 0; i < 100; i++ { - start := rand.Int63n(99 * chunkLen) - end := start + 1 + rand.Int63n((99*chunkLen)-start) - assert.True(t, start < end) - - startTime := model.TimeFromUnix(start) - endTime := model.TimeFromUnix(end) - - matchers := []*labels.Matcher{ - mustNewLabelMatcher(labels.MatchEqual, labels.MetricName, "foo"), - mustNewLabelMatcher(labels.MatchEqual, "bar", "baz"), - } - chunks, err := store.Get(ctx, userID, startTime, endTime, matchers...) - require.NoError(t, err) - - // We need to check that each chunk is in the time range - for _, chunk := range chunks { - assert.False(t, chunk.From.After(endTime)) - assert.False(t, chunk.Through.Before(startTime)) - samples, err := chunk.Samples(chunk.From, chunk.Through) - assert.NoError(t, err) - assert.Equal(t, 1, len(samples)) - // TODO verify chunk contents - } - - // And check we got all the chunks we want - numChunks := (end / chunkLen) - (start / chunkLen) + 1 - assert.Equal(t, int(numChunks), len(chunks)) - } - }) - } -} - -func TestChunkStoreLeastRead(t *testing.T) { - // Test we don't read too much from the index - ctx := context.Background() - store := newTestChunkStore(t, "v6") - defer store.Stop() - - // Put 24 chunks 1hr chunks in the store - const chunkLen = 60 // in seconds - for i := 0; i < 24; i++ { - ts := model.TimeFromUnix(int64(i * chunkLen)) - ch := encoding.New() - nc, err := ch.Add(model.SamplePair{ - Timestamp: ts, - Value: model.SampleValue(float64(i)), - }) - require.NoError(t, err) - require.Nil(t, nc) - chunk := NewChunk( - userID, - model.Fingerprint(1), - labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - }, - ch, - ts, - ts.Add(chunkLen*time.Second), - ) - t.Logf("Loop %d", i) - err = chunk.Encode() - require.NoError(t, err) - err = store.Put(ctx, []Chunk{chunk}) - require.NoError(t, err) - } - - // pick a random numbers and do a query to end of row - for i := 1; i < 24; i++ { - start := int64(i * chunkLen) - end := int64(24 * chunkLen) - assert.True(t, start <= end) - - startTime := model.TimeFromUnix(start) - endTime := model.TimeFromUnix(end) - matchers := []*labels.Matcher{ - mustNewLabelMatcher(labels.MatchEqual, labels.MetricName, "foo"), - mustNewLabelMatcher(labels.MatchEqual, "bar", "baz"), - } - - chunks, err := store.Get(ctx, userID, startTime, endTime, matchers...) - require.NoError(t, err) - - // We need to check that each chunk is in the time range - for _, chunk := range chunks { - assert.False(t, chunk.From.After(endTime)) - assert.False(t, chunk.Through.Before(startTime)) - samples, err := chunk.Samples(chunk.From, chunk.Through) - assert.NoError(t, err) - assert.Equal(t, 1, len(samples)) - } - - // And check we got all the chunks we want - numChunks := 24 - (start / chunkLen) + 1 - assert.Equal(t, int(numChunks), len(chunks)) - } -} - -func TestIndexCachingWorks(t *testing.T) { - ctx := context.Background() - metric := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - } - storeMaker := stores[1] - storeCfg := storeMaker.configFn() - - store := newTestChunkStoreConfig(t, "v9", storeCfg) - defer store.Stop() - - storage := store.(CompositeStore).stores[0].Store.(*seriesStore).fetcher.storage.(*MockStorage) - - fooChunk1 := dummyChunkFor(model.Time(0).Add(15*time.Second), metric) - err := fooChunk1.Encode() - require.NoError(t, err) - err = store.Put(ctx, []Chunk{fooChunk1}) - require.NoError(t, err) - n := storage.numIndexWrites - - // Only one extra entry for the new chunk of same series. - fooChunk2 := dummyChunkFor(model.Time(0).Add(30*time.Second), metric) - err = fooChunk2.Encode() - require.NoError(t, err) - err = store.Put(ctx, []Chunk{fooChunk2}) - require.NoError(t, err) - require.Equal(t, n+1, storage.numIndexWrites) -} - -func BenchmarkIndexCaching(b *testing.B) { - ctx := context.Background() - storeMaker := stores[1] - storeCfg := storeMaker.configFn() - - store := newTestChunkStoreConfig(b, "v9", storeCfg) - defer store.Stop() - - fooChunk1 := dummyChunkFor(model.Time(0).Add(15*time.Second), BenchmarkLabels) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := store.Put(ctx, []Chunk{fooChunk1}) - require.NoError(b, err) - } -} - -func TestChunkStoreError(t *testing.T) { - ctx := context.Background() - for _, tc := range []struct { - query string - from, through model.Time - err string - }{ - { - query: "foo", - from: model.Time(0).Add(31 * 24 * time.Hour), - through: model.Time(0), - err: "invalid query, through < from (0 < 2678400)", - }, - { - query: "foo", - from: model.Time(0), - through: model.Time(0).Add(31 * 24 * time.Hour), - err: "the query time range exceeds the limit (query length: 744h0m0s, limit: 720h0m0s)", - }, - { - query: "{foo=\"bar\"}", - from: model.Time(0), - through: model.Time(0).Add(1 * time.Hour), - err: "query must contain metric name", - }, - { - query: "{__name__=~\"bar\"}", - from: model.Time(0), - through: model.Time(0).Add(1 * time.Hour), - err: "query must contain metric name", - }, - } { - for _, schema := range schemas { - t.Run(fmt.Sprintf("%s / %s", tc.query, schema), func(t *testing.T) { - store := newTestChunkStore(t, schema) - defer store.Stop() - - matchers, err := parser.ParseMetricSelector(tc.query) - require.NoError(t, err) - - // Query with ordinary time-range - _, err = store.Get(ctx, userID, tc.from, tc.through, matchers...) - require.EqualError(t, err, tc.err) - }) - } - } -} - -func benchmarkParseIndexEntries(i int64, regex string, b *testing.B) { - b.ReportAllocs() - b.StopTimer() - store := &store{} - ctx := context.Background() - entries := generateIndexEntries(i) - matcher, err := labels.NewMatcher(labels.MatchRegexp, "", regex) - if err != nil { - b.Fatal(err) - } - b.StartTimer() - for n := 0; n < b.N; n++ { - keys, err := store.parseIndexEntries(ctx, entries, matcher) - if err != nil { - b.Fatal(err) - } - if regex == ".*" && len(keys) != len(entries)/2 { - b.Fatalf("expected keys:%d got:%d", len(entries)/2, len(keys)) - } - } -} - -func BenchmarkParseIndexEntries500(b *testing.B) { benchmarkParseIndexEntries(500, ".*", b) } -func BenchmarkParseIndexEntries2500(b *testing.B) { benchmarkParseIndexEntries(2500, ".*", b) } -func BenchmarkParseIndexEntries10000(b *testing.B) { benchmarkParseIndexEntries(10000, ".*", b) } -func BenchmarkParseIndexEntries50000(b *testing.B) { benchmarkParseIndexEntries(50000, ".*", b) } - -func BenchmarkParseIndexEntriesRegexSet500(b *testing.B) { - benchmarkParseIndexEntries(500, "labelvalue0|labelvalue1|labelvalue2|labelvalue3|labelvalue600", b) -} -func BenchmarkParseIndexEntriesRegexSet2500(b *testing.B) { - benchmarkParseIndexEntries(2500, "labelvalue0|labelvalue1|labelvalue2|labelvalue3|labelvalue600", b) -} -func BenchmarkParseIndexEntriesRegexSet10000(b *testing.B) { - benchmarkParseIndexEntries(10000, "labelvalue0|labelvalue1|labelvalue2|labelvalue3|labelvalue600", b) -} -func BenchmarkParseIndexEntriesRegexSet50000(b *testing.B) { - benchmarkParseIndexEntries(50000, "labelvalue0|labelvalue1|labelvalue2|labelvalue3|labelvalue600", b) -} - -func generateIndexEntries(n int64) []IndexEntry { - res := make([]IndexEntry, 0, n) - for i := int64(n - 1); i >= 0; i-- { - labelValue := fmt.Sprintf("labelvalue%d", i%(n/2)) - chunkID := fmt.Sprintf("chunkid%d", i%(n/2)) - rangeValue := []byte{} - rangeValue = append(rangeValue, []byte("component1")...) - rangeValue = append(rangeValue, 0) - rangeValue = append(rangeValue, []byte(labelValue)...) - rangeValue = append(rangeValue, 0) - rangeValue = append(rangeValue, []byte(chunkID)...) - rangeValue = append(rangeValue, 0) - res = append(res, IndexEntry{ - RangeValue: rangeValue, - }) - } - return res -} - -func getNonDeletedIntervals(originalInterval, deletedInterval model.Interval) []model.Interval { - if !intervalsOverlap(originalInterval, deletedInterval) { - return []model.Interval{originalInterval} - } - - nonDeletedIntervals := []model.Interval{} - if deletedInterval.Start > originalInterval.Start { - nonDeletedIntervals = append(nonDeletedIntervals, model.Interval{Start: originalInterval.Start, End: deletedInterval.Start - 1}) - } - - if deletedInterval.End < originalInterval.End { - nonDeletedIntervals = append(nonDeletedIntervals, model.Interval{Start: deletedInterval.End + 1, End: originalInterval.End}) - } - - return nonDeletedIntervals -} - -func TestStore_DeleteChunk(t *testing.T) { - ctx := context.Background() - - metric1 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - } - - metric2 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz2"}, - } - - metric3 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz3"}, - } - - fooChunk1 := dummyChunkForEncoding(model.Now(), metric1, encoding.Varbit, 200) - err := fooChunk1.Encode() - require.NoError(t, err) - - fooChunk2 := dummyChunkForEncoding(model.Now(), metric2, encoding.Varbit, 200) - err = fooChunk2.Encode() - require.NoError(t, err) - - nonExistentChunk := dummyChunkForEncoding(model.Now(), metric3, encoding.Varbit, 200) - - fooMetricNameMatcher, err := parser.ParseMetricSelector(`foo`) - if err != nil { - t.Fatal(err) - } - - for _, tc := range []struct { - name string - chunks []Chunk - chunkToDelete Chunk - partialDeleteInterval *model.Interval - err error - numChunksToExpectAfterDeletion int - }{ - { - name: "delete whole chunk", - chunkToDelete: fooChunk1, - numChunksToExpectAfterDeletion: 1, - }, - { - name: "delete chunk partially at start", - chunkToDelete: fooChunk1, - partialDeleteInterval: &model.Interval{Start: fooChunk1.From, End: fooChunk1.From.Add(30 * time.Minute)}, - numChunksToExpectAfterDeletion: 2, - }, - { - name: "delete chunk partially at end", - chunkToDelete: fooChunk1, - partialDeleteInterval: &model.Interval{Start: fooChunk1.Through.Add(-30 * time.Minute), End: fooChunk1.Through}, - numChunksToExpectAfterDeletion: 2, - }, - { - name: "delete chunk partially in the middle", - chunkToDelete: fooChunk1, - partialDeleteInterval: &model.Interval{Start: fooChunk1.From.Add(15 * time.Minute), End: fooChunk1.Through.Add(-15 * time.Minute)}, - numChunksToExpectAfterDeletion: 3, - }, - { - name: "delete non-existent chunk", - chunkToDelete: nonExistentChunk, - numChunksToExpectAfterDeletion: 2, - }, - { - name: "delete first second", - chunkToDelete: fooChunk1, - partialDeleteInterval: &model.Interval{Start: fooChunk1.From, End: fooChunk1.From}, - numChunksToExpectAfterDeletion: 2, - }, - { - name: "delete chunk out of range", - chunkToDelete: fooChunk1, - partialDeleteInterval: &model.Interval{Start: fooChunk1.Through.Add(time.Minute), End: fooChunk1.Through.Add(10 * time.Minute)}, - numChunksToExpectAfterDeletion: 2, - err: errors.Wrapf(ErrParialDeleteChunkNoOverlap, "chunkID=%s", fooChunk1.ExternalKey()), - }, - } { - for _, schema := range schemas { - t.Run(fmt.Sprintf("%s / %s", schema, tc.name), func(t *testing.T) { - store := newTestChunkStore(t, schema) - defer store.Stop() - - // inserting 2 chunks with different labels but same metric name - err = store.Put(ctx, []Chunk{fooChunk1, fooChunk2}) - require.NoError(t, err) - - // we expect to get 2 chunks back using just metric name matcher - chunks, err := store.Get(ctx, userID, model.Now().Add(-time.Hour), model.Now(), fooMetricNameMatcher...) - require.NoError(t, err) - require.Equal(t, 2, len(chunks)) - - err = store.DeleteChunk(ctx, tc.chunkToDelete.From, tc.chunkToDelete.Through, userID, - tc.chunkToDelete.ExternalKey(), tc.chunkToDelete.Metric, tc.partialDeleteInterval) - - if tc.err != nil { - require.Error(t, err) - require.Equal(t, tc.err.Error(), err.Error()) - - // we expect to get same results back if delete operation is expected to fail - chunks, err := store.Get(ctx, userID, model.Now().Add(-time.Hour), model.Now(), fooMetricNameMatcher...) - require.NoError(t, err) - - require.Equal(t, 2, len(chunks)) - - return - } - require.NoError(t, err) - - matchersForDeletedChunk, err := parser.ParseMetricSelector(tc.chunkToDelete.Metric.String()) - require.NoError(t, err) - - var nonDeletedIntervals []model.Interval - - if tc.partialDeleteInterval != nil { - nonDeletedIntervals = getNonDeletedIntervals(model.Interval{ - Start: tc.chunkToDelete.From, - End: tc.chunkToDelete.Through, - }, *tc.partialDeleteInterval) - } - - // we expect to get 1 non deleted chunk + new chunks that were created (if any) after partial deletion - chunks, err = store.Get(ctx, userID, model.Now().Add(-time.Hour), model.Now(), fooMetricNameMatcher...) - require.NoError(t, err) - require.Equal(t, tc.numChunksToExpectAfterDeletion, len(chunks)) - - chunks, err = store.Get(ctx, userID, model.Now().Add(-time.Hour), model.Now(), matchersForDeletedChunk...) - require.NoError(t, err) - require.Equal(t, len(nonDeletedIntervals), len(chunks)) - - // comparing intervals of new chunks that were created after partial deletion - for i, nonDeletedInterval := range nonDeletedIntervals { - require.Equal(t, chunks[i].From, nonDeletedInterval.Start) - require.Equal(t, chunks[i].Through, nonDeletedInterval.End) - } - }) - } - } -} - -func TestStore_DeleteSeriesIDs(t *testing.T) { - ctx := context.Background() - metric1 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - } - - metric2 := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz2"}, - } - - matchers, err := parser.ParseMetricSelector(`foo`) - if err != nil { - t.Fatal(err) - } - - for _, schema := range seriesStoreSchemas { - t.Run(schema, func(t *testing.T) { - store := newTestChunkStore(t, schema) - defer store.Stop() - - seriesStore := store.(CompositeStore).stores[0].Store.(*seriesStore) - - fooChunk1 := dummyChunkForEncoding(model.Now(), metric1, encoding.Varbit, 200) - err := fooChunk1.Encode() - require.NoError(t, err) - - fooChunk2 := dummyChunkForEncoding(model.Now(), metric2, encoding.Varbit, 200) - err = fooChunk2.Encode() - require.NoError(t, err) - - err = store.Put(ctx, []Chunk{fooChunk1, fooChunk2}) - require.NoError(t, err) - - // we expect to have 2 series IDs in index for the chunks that were added above - seriesIDs, err := seriesStore.lookupSeriesByMetricNameMatcher(ctx, model.Now().Add(-time.Hour), model.Now(), - userID, "foo", nil, nil) - require.NoError(t, err) - require.Equal(t, 2, len(seriesIDs)) - - // we expect to have 2 chunks in store that were added above - chunks, err := store.Get(ctx, userID, model.Now().Add(-time.Hour), model.Now(), matchers...) - require.NoError(t, err) - require.Equal(t, 2, len(chunks)) - - // lets try deleting series ID without deleting the chunk - err = store.DeleteSeriesIDs(ctx, fooChunk1.From, fooChunk1.Through, userID, fooChunk1.Metric) - require.NoError(t, err) - - // series IDs should still be there since chunks for them still exist - seriesIDs, err = seriesStore.lookupSeriesByMetricNameMatcher(ctx, model.Now().Add(-time.Hour), model.Now(), - userID, "foo", nil, nil) - require.NoError(t, err) - require.Equal(t, 2, len(seriesIDs)) - - // lets delete a chunk and then delete its series ID - err = store.DeleteChunk(ctx, fooChunk1.From, fooChunk1.Through, userID, fooChunk1.ExternalKey(), metric1, nil) - require.NoError(t, err) - - err = store.DeleteSeriesIDs(ctx, fooChunk1.From, fooChunk1.Through, userID, fooChunk1.Metric) - require.NoError(t, err) - - // there should be only be 1 chunk and 1 series ID left for it - chunks, err = store.Get(ctx, userID, model.Now().Add(-time.Hour), model.Now(), matchers...) - require.NoError(t, err) - require.Equal(t, 1, len(chunks)) - - seriesIDs, err = seriesStore.lookupSeriesByMetricNameMatcher(ctx, model.Now().Add(-time.Hour), model.Now(), - userID, "foo", nil, nil) - require.NoError(t, err) - require.Equal(t, 1, len(seriesIDs)) - require.Equal(t, string(labelsSeriesID(fooChunk2.Metric)), seriesIDs[0]) - - // lets delete the other chunk partially and try deleting the series ID - err = store.DeleteChunk(ctx, fooChunk2.From, fooChunk2.Through, userID, fooChunk2.ExternalKey(), metric2, - &model.Interval{Start: fooChunk2.From, End: fooChunk2.From.Add(30 * time.Minute)}) - require.NoError(t, err) - - err = store.DeleteSeriesIDs(ctx, fooChunk1.From, fooChunk1.Through, userID, fooChunk1.Metric) - require.NoError(t, err) - - // partial deletion should have left another chunk and a series ID in store - chunks, err = store.Get(ctx, userID, model.Now().Add(-time.Hour), model.Now(), matchers...) - require.NoError(t, err) - require.Equal(t, 1, len(chunks)) - - seriesIDs, err = seriesStore.lookupSeriesByMetricNameMatcher(ctx, model.Now().Add(-time.Hour), model.Now(), - userID, "foo", nil, nil) - require.NoError(t, err) - require.Equal(t, 1, len(seriesIDs)) - require.Equal(t, string(labelsSeriesID(fooChunk2.Metric)), seriesIDs[0]) - }) - } -} - -func TestDisableIndexDeduplication(t *testing.T) { - for i, disableIndexDeduplication := range []bool{ - false, true, - } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - ctx := context.Background() - metric := labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - } - storeMaker := stores[0] - storeCfg := storeMaker.configFn() - storeCfg.ChunkCacheConfig.Cache = cache.NewFifoCache("chunk-cache", cache.FifoCacheConfig{ - MaxSizeItems: 5, - }, prometheus.NewRegistry(), log.NewNopLogger()) - storeCfg.DisableIndexDeduplication = disableIndexDeduplication - - store := newTestChunkStoreConfig(t, "v9", storeCfg) - defer store.Stop() - - storage := store.(CompositeStore).stores[0].Store.(*seriesStore).fetcher.storage.(*MockStorage) - - fooChunk1 := dummyChunkFor(model.Time(0).Add(15*time.Second), metric) - err := fooChunk1.Encode() - require.NoError(t, err) - err = store.Put(ctx, []Chunk{fooChunk1}) - require.NoError(t, err) - n := storage.numIndexWrites - - // see if we have written the chunk to the store - require.Equal(t, 1, storage.numChunkWrites) - - // Put the same chunk again - err = store.Put(ctx, []Chunk{fooChunk1}) - require.NoError(t, err) - - expectedTotalWrites := n - if disableIndexDeduplication { - expectedTotalWrites *= 2 - } - require.Equal(t, expectedTotalWrites, storage.numIndexWrites) - - // see if we deduped the chunk and the number of chunks we wrote is still 1 - require.Equal(t, 1, storage.numChunkWrites) - }) - - } - -} diff --git a/internal/cortex/chunk/chunk_store_utils.go b/internal/cortex/chunk/chunk_store_utils.go deleted file mode 100644 index c7e94359ed..0000000000 --- a/internal/cortex/chunk/chunk_store_utils.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" - "sync" - - "github.com/go-kit/log/level" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql" - - "github.com/thanos-io/thanos/internal/cortex/chunk/cache" - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" - "github.com/thanos-io/thanos/internal/cortex/util/spanlogger" -) - -const chunkDecodeParallelism = 16 - -func filterChunksByTime(from, through model.Time, chunks []Chunk) []Chunk { - filtered := make([]Chunk, 0, len(chunks)) - for _, chunk := range chunks { - if chunk.Through < from || through < chunk.From { - continue - } - filtered = append(filtered, chunk) - } - return filtered -} - -func keysFromChunks(chunks []Chunk) []string { - keys := make([]string, 0, len(chunks)) - for _, chk := range chunks { - keys = append(keys, chk.ExternalKey()) - } - - return keys -} - -func labelNamesFromChunks(chunks []Chunk) []string { - var result UniqueStrings - for _, c := range chunks { - for _, l := range c.Metric { - result.Add(l.Name) - } - } - return result.Strings() -} - -func filterChunksByUniqueFingerprint(chunks []Chunk) ([]Chunk, []string) { - filtered := make([]Chunk, 0, len(chunks)) - keys := make([]string, 0, len(chunks)) - uniqueFp := map[model.Fingerprint]struct{}{} - - for _, chunk := range chunks { - if _, ok := uniqueFp[chunk.Fingerprint]; ok { - continue - } - filtered = append(filtered, chunk) - keys = append(keys, chunk.ExternalKey()) - uniqueFp[chunk.Fingerprint] = struct{}{} - } - return filtered, keys -} - -func filterChunksByMatchers(chunks []Chunk, filters []*labels.Matcher) []Chunk { - filteredChunks := make([]Chunk, 0, len(chunks)) -outer: - for _, chunk := range chunks { - for _, filter := range filters { - if !filter.Matches(chunk.Metric.Get(filter.Name)) { - continue outer - } - } - filteredChunks = append(filteredChunks, chunk) - } - return filteredChunks -} - -// Fetcher deals with fetching chunk contents from the cache/store, -// and writing back any misses to the cache. Also responsible for decoding -// chunks from the cache, in parallel. -type Fetcher struct { - storage Client - cache cache.Cache - cacheStubs bool - - wait sync.WaitGroup - decodeRequests chan decodeRequest - quit chan struct{} -} - -type decodeRequest struct { - chunk Chunk - buf []byte - responses chan decodeResponse -} -type decodeResponse struct { - chunk Chunk - err error -} - -// NewChunkFetcher makes a new ChunkFetcher. -func NewChunkFetcher(cacher cache.Cache, cacheStubs bool, storage Client) (*Fetcher, error) { - c := &Fetcher{ - storage: storage, - cache: cacher, - cacheStubs: cacheStubs, - decodeRequests: make(chan decodeRequest), - quit: make(chan struct{}), - } - - c.wait.Add(chunkDecodeParallelism) - for i := 0; i < chunkDecodeParallelism; i++ { - go c.worker() - } - - return c, nil -} - -// Stop the ChunkFetcher. -func (c *Fetcher) Stop() { - select { - case <-c.quit: - default: - close(c.quit) - } - - c.wait.Wait() - c.cache.Stop() -} - -func (c *Fetcher) worker() { - defer c.wait.Done() - decodeContext := NewDecodeContext() - for { - select { - case <-c.quit: - return - case req := <-c.decodeRequests: - err := req.chunk.Decode(decodeContext, req.buf) - if err != nil { - cacheCorrupt.Inc() - } - req.responses <- decodeResponse{ - chunk: req.chunk, - err: err, - } - } - } -} - -// FetchChunks fetches a set of chunks from cache and store. Note that the keys passed in must be -// lexicographically sorted, while the returned chunks are not in the same order as the passed in chunks. -func (c *Fetcher) FetchChunks(ctx context.Context, chunks []Chunk, keys []string) ([]Chunk, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.FetchChunks") - defer log.Span.Finish() - - // Now fetch the actual chunk data from Memcache / S3 - cacheHits, cacheBufs, _ := c.cache.Fetch(ctx, keys) - - fromCache, missing, err := c.processCacheResponse(ctx, chunks, cacheHits, cacheBufs) - if err != nil { - level.Warn(log).Log("msg", "error fetching from cache", "err", err) - } - - var fromStorage []Chunk - if len(missing) > 0 { - fromStorage, err = c.storage.GetChunks(ctx, missing) - } - - // Always cache any chunks we did get - if cacheErr := c.writeBackCache(ctx, fromStorage); cacheErr != nil { - level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr) - } - - if err != nil { - // Don't rely on Cortex error translation here. - return nil, promql.ErrStorage{Err: err} - } - - allChunks := append(fromCache, fromStorage...) - return allChunks, nil -} - -func (c *Fetcher) writeBackCache(ctx context.Context, chunks []Chunk) error { - keys := make([]string, 0, len(chunks)) - bufs := make([][]byte, 0, len(chunks)) - for i := range chunks { - var encoded []byte - var err error - if !c.cacheStubs { - encoded, err = chunks[i].Encoded() - // TODO don't fail, just log and continue? - if err != nil { - return err - } - } - - keys = append(keys, chunks[i].ExternalKey()) - bufs = append(bufs, encoded) - } - - c.cache.Store(ctx, keys, bufs) - return nil -} - -// ProcessCacheResponse decodes the chunks coming back from the cache, separating -// hits and misses. -func (c *Fetcher) processCacheResponse(ctx context.Context, chunks []Chunk, keys []string, bufs [][]byte) ([]Chunk, []Chunk, error) { - var ( - requests = make([]decodeRequest, 0, len(keys)) - responses = make(chan decodeResponse) - missing []Chunk - ) - log, _ := spanlogger.New(ctx, "Fetcher.processCacheResponse") - defer log.Span.Finish() - - i, j := 0, 0 - for i < len(chunks) && j < len(keys) { - chunkKey := chunks[i].ExternalKey() - - if chunkKey < keys[j] { - missing = append(missing, chunks[i]) - i++ - } else if chunkKey > keys[j] { - level.Warn(util_log.Logger).Log("msg", "got chunk from cache we didn't ask for") - j++ - } else { - requests = append(requests, decodeRequest{ - chunk: chunks[i], - buf: bufs[j], - responses: responses, - }) - i++ - j++ - } - } - for ; i < len(chunks); i++ { - missing = append(missing, chunks[i]) - } - level.Debug(log).Log("chunks", len(chunks), "decodeRequests", len(requests), "missing", len(missing)) - - go func() { - for _, request := range requests { - select { - case <-c.quit: - return - case c.decodeRequests <- request: - } - } - }() - - var ( - err error - found []Chunk - ) - -loopResponses: - for i := 0; i < len(requests); i++ { - select { - case <-c.quit: - break loopResponses - case response := <-responses: - // Don't exit early, as we don't want to block the workers. - if response.err != nil { - err = response.err - } else { - found = append(found, response.chunk) - } - } - } - return found, missing, err -} diff --git a/internal/cortex/chunk/chunk_test.go b/internal/cortex/chunk/chunk_test.go deleted file mode 100644 index c7817f11ef..0000000000 --- a/internal/cortex/chunk/chunk_test.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" - "fmt" - "sort" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" - - "github.com/thanos-io/thanos/internal/cortex/chunk/encoding" - "github.com/thanos-io/thanos/internal/cortex/ingester/client" - "github.com/thanos-io/thanos/internal/cortex/util" -) - -const userID = "userID" - -func init() { - encoding.DefaultEncoding = encoding.Varbit -} - -var labelsForDummyChunks = labels.Labels{ - {Name: labels.MetricName, Value: "foo"}, - {Name: "bar", Value: "baz"}, - {Name: "toms", Value: "code"}, -} - -func dummyChunk(now model.Time) Chunk { - return dummyChunkFor(now, labelsForDummyChunks) -} - -func dummyChunkForEncoding(now model.Time, metric labels.Labels, enc encoding.Encoding, samples int) Chunk { - c, _ := encoding.NewForEncoding(enc) - chunkStart := now.Add(-time.Hour) - - for i := 0; i < samples; i++ { - t := time.Duration(i) * 15 * time.Second - nc, err := c.Add(model.SamplePair{Timestamp: chunkStart.Add(t), Value: model.SampleValue(i)}) - if err != nil { - panic(err) - } - if nc != nil { - panic("returned chunk was not nil") - } - } - - chunk := NewChunk( - userID, - client.Fingerprint(metric), - metric, - c, - chunkStart, - now, - ) - // Force checksum calculation. - err := chunk.Encode() - if err != nil { - panic(err) - } - return chunk -} - -func dummyChunkFor(now model.Time, metric labels.Labels) Chunk { - return dummyChunkForEncoding(now, metric, encoding.Varbit, 1) -} - -func TestChunkCodec(t *testing.T) { - dummy := dummyChunk(model.Now()) - decodeContext := NewDecodeContext() - for i, c := range []struct { - chunk Chunk - err error - f func(*Chunk, []byte) - }{ - // Basic round trip - {chunk: dummy}, - - // Checksum should fail - { - chunk: dummy, - err: ErrInvalidChecksum, - f: func(_ *Chunk, buf []byte) { buf[4]++ }, - }, - - // Checksum should fail - { - chunk: dummy, - err: ErrInvalidChecksum, - f: func(c *Chunk, _ []byte) { c.Checksum = 123 }, - }, - - // Metadata test should fail - { - chunk: dummy, - err: ErrWrongMetadata, - f: func(c *Chunk, _ []byte) { c.Fingerprint++ }, - }, - - // Metadata test should fail - { - chunk: dummy, - err: ErrWrongMetadata, - f: func(c *Chunk, _ []byte) { c.UserID = "foo" }, - }, - } { - t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { - err := c.chunk.Encode() - require.NoError(t, err) - encoded, err := c.chunk.Encoded() - require.NoError(t, err) - - have, err := ParseExternalKey(userID, c.chunk.ExternalKey()) - require.NoError(t, err) - - buf := make([]byte, len(encoded)) - copy(buf, encoded) - if c.f != nil { - c.f(&have, buf) - } - - err = have.Decode(decodeContext, buf) - require.Equal(t, c.err, errors.Cause(err)) - - if c.err == nil { - require.Equal(t, have, c.chunk) - } - }) - } -} - -const fixedTimestamp = model.Time(1557654321000) - -func TestChunkDecodeBackwardsCompatibility(t *testing.T) { - // lets build a new chunk same as what was built using code at commit b1777a50ab19 - c, _ := encoding.NewForEncoding(encoding.Bigchunk) - nc, err := c.Add(model.SamplePair{Timestamp: fixedTimestamp, Value: 0}) - require.NoError(t, err) - require.Equal(t, nil, nc, "returned chunk should be nil") - - chunk := NewChunk( - userID, - client.Fingerprint(labelsForDummyChunks), - labelsForDummyChunks, - c, - fixedTimestamp.Add(-time.Hour), - fixedTimestamp, - ) - // Force checksum calculation. - require.NoError(t, chunk.Encode()) - - // Chunk encoded using code at commit b1777a50ab19 - rawData := []byte("\x00\x00\x00\xb7\xff\x06\x00\x00sNaPpY\x01\xa5\x00\x00\x04\xc7a\xba{\"fingerprint\":18245339272195143978,\"userID\":\"userID\",\"from\":1557650721,\"through\":1557654321,\"metric\":{\"bar\":\"baz\",\"toms\":\"code\",\"__name__\":\"foo\"},\"encoding\":3}\n\x00\x00\x00\x15\x01\x00\x11\x00\x00\x01\xd0\xdd\xf5\xb6\xd5Z\x00\x00\x00\x00\x00\x00\x00\x00\x00") - decodeContext := NewDecodeContext() - have, err := ParseExternalKey(userID, "userID/fd3477666dacf92a:16aab37c8e8:16aab6eb768:38eb373c") - require.NoError(t, err) - require.NoError(t, have.Decode(decodeContext, rawData)) - want := chunk - // We can't just compare these two chunks, since the Bigchunk internals are different on construction and read-in. - // Compare the serialised version instead - require.NoError(t, have.Encode()) - require.NoError(t, want.Encode()) - haveEncoded, _ := have.Encoded() - wantEncoded, _ := want.Encoded() - require.Equal(t, haveEncoded, wantEncoded) - require.Equal(t, have.ExternalKey(), want.ExternalKey()) -} - -func TestParseExternalKey(t *testing.T) { - for _, c := range []struct { - key string - chunk Chunk - err error - }{ - {key: "2:1484661279394:1484664879394", chunk: Chunk{ - UserID: userID, - Fingerprint: model.Fingerprint(2), - From: model.Time(1484661279394), - Through: model.Time(1484664879394), - }}, - - {key: userID + "/2:270d8f00:270d8f00:f84c5745", chunk: Chunk{ - UserID: userID, - Fingerprint: model.Fingerprint(2), - From: model.Time(655200000), - Through: model.Time(655200000), - ChecksumSet: true, - Checksum: 4165752645, - }}, - - {key: "invalidUserID/2:270d8f00:270d8f00:f84c5745", chunk: Chunk{}, err: ErrWrongMetadata}, - } { - chunk, err := ParseExternalKey(userID, c.key) - require.Equal(t, c.err, errors.Cause(err)) - require.Equal(t, c.chunk, chunk) - } -} - -func TestChunksToMatrix(t *testing.T) { - // Create 2 chunks which have the same metric - now := model.Now() - chunk1 := dummyChunkFor(now, labelsForDummyChunks) - chunk1Samples, err := chunk1.Samples(chunk1.From, chunk1.Through) - require.NoError(t, err) - chunk2 := dummyChunkFor(now, labelsForDummyChunks) - chunk2Samples, err := chunk2.Samples(chunk2.From, chunk2.Through) - require.NoError(t, err) - - ss1 := &model.SampleStream{ - Metric: util.LabelsToMetric(chunk1.Metric), - Values: util.MergeSampleSets(chunk1Samples, chunk2Samples), - } - - // Create another chunk with a different metric - otherMetric := labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo2"}, - {Name: "bar", Value: "baz"}, - {Name: "toms", Value: "code"}, - } - chunk3 := dummyChunkFor(now, otherMetric) - chunk3Samples, err := chunk3.Samples(chunk3.From, chunk3.Through) - require.NoError(t, err) - - ss2 := &model.SampleStream{ - Metric: util.LabelsToMetric(chunk3.Metric), - Values: chunk3Samples, - } - - for _, c := range []struct { - chunks []Chunk - expectedMatrix model.Matrix - }{ - { - chunks: []Chunk{}, - expectedMatrix: model.Matrix{}, - }, { - chunks: []Chunk{ - chunk1, - chunk2, - chunk3, - }, - expectedMatrix: model.Matrix{ - ss1, - ss2, - }, - }, - } { - matrix, err := ChunksToMatrix(context.Background(), c.chunks, chunk1.From, chunk3.Through) - require.NoError(t, err) - - sort.Sort(matrix) - sort.Sort(c.expectedMatrix) - require.Equal(t, c.expectedMatrix, matrix) - } -} - -func benchmarkChunk(now model.Time) Chunk { - return dummyChunkFor(now, BenchmarkLabels) -} - -func BenchmarkEncode(b *testing.B) { - chunk := dummyChunk(model.Now()) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - chunk.encoded = nil - err := chunk.Encode() - require.NoError(b, err) - } -} - -func BenchmarkDecode1(b *testing.B) { benchmarkDecode(b, 1) } -func BenchmarkDecode100(b *testing.B) { benchmarkDecode(b, 100) } -func BenchmarkDecode10000(b *testing.B) { benchmarkDecode(b, 10000) } - -func benchmarkDecode(b *testing.B, batchSize int) { - chunk := benchmarkChunk(model.Now()) - err := chunk.Encode() - require.NoError(b, err) - buf, err := chunk.Encoded() - require.NoError(b, err) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - decodeContext := NewDecodeContext() - b.StopTimer() - chunks := make([]Chunk, batchSize) - // Copy across the metadata so the check works out ok - for j := 0; j < batchSize; j++ { - chunks[j] = chunk - chunks[j].Metric = nil - chunks[j].Data = nil - } - b.StartTimer() - for j := 0; j < batchSize; j++ { - err := chunks[j].Decode(decodeContext, buf) - require.NoError(b, err) - } - } -} - -func TestChunk_Slice(t *testing.T) { - chunkEndTime := model.Now() - chunkStartTime := chunkEndTime.Add(-time.Hour) - - for _, tc := range []struct { - name string - sliceRange model.Interval - err error - }{ - { - name: "slice first 10 mins", - sliceRange: model.Interval{Start: chunkStartTime, End: chunkStartTime.Add(10 * time.Minute)}, - }, - { - name: "slice last 10 mins", - sliceRange: model.Interval{Start: chunkEndTime.Add(-10 * time.Minute), End: chunkEndTime}, - }, - { - name: "slice in the middle", - sliceRange: model.Interval{Start: chunkStartTime.Add(20 * time.Minute), End: chunkEndTime.Add(-20 * time.Minute)}, - }, - { - name: "slice out of range", - sliceRange: model.Interval{Start: chunkEndTime.Add(20 * time.Minute), End: chunkEndTime.Add(30 * time.Minute)}, - err: ErrSliceOutOfRange, - }, - { - name: "slice no data in range", - sliceRange: model.Interval{Start: chunkStartTime.Add(time.Second), End: chunkStartTime.Add(10 * time.Second)}, - err: encoding.ErrSliceNoDataInRange, - }, - { - name: "slice interval not aligned with sample intervals", - sliceRange: model.Interval{Start: chunkStartTime.Add(time.Second), End: chunkStartTime.Add(10 * time.Minute).Add(10 * time.Second)}, - }, - } { - t.Run(tc.name, func(t *testing.T) { - // dummy chunk is created with time range chunkEndTime-1hour to chunkEndTime - originalChunk := dummyChunkForEncoding(chunkEndTime, labelsForDummyChunks, encoding.DefaultEncoding, 241) - - newChunk, err := originalChunk.Slice(tc.sliceRange.Start, tc.sliceRange.End) - if tc.err != nil { - require.Equal(t, tc.err, err) - return - } - require.NoError(t, err) - - require.Equal(t, tc.sliceRange.Start, newChunk.From) - require.Equal(t, tc.sliceRange.End, newChunk.Through) - - chunkItr := originalChunk.Data.NewIterator(nil) - chunkItr.FindAtOrAfter(tc.sliceRange.Start) - - newChunkItr := newChunk.Data.NewIterator(nil) - newChunkItr.Scan() - - for { - require.Equal(t, chunkItr.Value(), newChunkItr.Value()) - - originalChunksHasMoreSamples := chunkItr.Scan() - newChunkHasMoreSamples := newChunkItr.Scan() - - // originalChunk and newChunk both should end at same time or newChunk should end before or at slice end time - if !originalChunksHasMoreSamples || chunkItr.Value().Timestamp > tc.sliceRange.End { - require.Equal(t, false, newChunkHasMoreSamples) - break - } - - require.Equal(t, true, newChunkHasMoreSamples) - } - - }) - } -} diff --git a/internal/cortex/chunk/composite_store.go b/internal/cortex/chunk/composite_store.go deleted file mode 100644 index aefe474cb8..0000000000 --- a/internal/cortex/chunk/composite_store.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" - "errors" - "sort" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/thanos-io/thanos/internal/cortex/chunk/cache" -) - -// StoreLimits helps get Limits specific to Queries for Stores -type StoreLimits interface { - MaxChunksPerQueryFromStore(userID string) int - MaxQueryLength(userID string) time.Duration -} - -type CacheGenNumLoader interface { - GetStoreCacheGenNumber(tenantIDs []string) string -} - -// Store for chunks. -type Store interface { - Put(ctx context.Context, chunks []Chunk) error - PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error - Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) - // GetChunkRefs returns the un-loaded chunks and the fetchers to be used to load them. You can load each slice of chunks ([]Chunk), - // using the corresponding Fetcher (fetchers[i].FetchChunks(ctx, chunks[i], ...) - GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) - LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) - LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) - GetChunkFetcher(tm model.Time) *Fetcher - - // DeleteChunk deletes a chunks index entry and then deletes the actual chunk from chunk storage. - // It takes care of chunks which are deleting partially by creating and inserting a new chunk first and then deleting the original chunk - DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error - // DeleteSeriesIDs is only relevant for SeriesStore. - DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error - Stop() -} - -// CompositeStore is a Store which delegates to various stores depending -// on when they were activated. -type CompositeStore struct { - compositeStore -} - -type compositeStore struct { - cacheGenNumLoader CacheGenNumLoader - stores []compositeStoreEntry -} - -type compositeStoreEntry struct { - start model.Time - Store -} - -// NewCompositeStore creates a new Store which delegates to different stores depending -// on time. -func NewCompositeStore(cacheGenNumLoader CacheGenNumLoader) CompositeStore { - return CompositeStore{compositeStore{cacheGenNumLoader: cacheGenNumLoader}} -} - -// AddPeriod adds the configuration for a period of time to the CompositeStore -func (c *CompositeStore) AddPeriod(storeCfg StoreConfig, cfg PeriodConfig, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) error { - schema, err := cfg.CreateSchema() - if err != nil { - return err - } - - return c.addSchema(storeCfg, schema, cfg.From.Time, index, chunks, limits, chunksCache, writeDedupeCache) -} - -func (c *CompositeStore) addSchema(storeCfg StoreConfig, schema BaseSchema, start model.Time, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) error { - var ( - err error - store Store - ) - - switch s := schema.(type) { - case SeriesStoreSchema: - store, err = newSeriesStore(storeCfg, s, index, chunks, limits, chunksCache, writeDedupeCache) - case StoreSchema: - store, err = newStore(storeCfg, s, index, chunks, limits, chunksCache) - default: - err = errors.New("invalid schema type") - } - if err != nil { - return err - } - c.stores = append(c.stores, compositeStoreEntry{start: start, Store: store}) - return nil -} - -func (c compositeStore) Put(ctx context.Context, chunks []Chunk) error { - for _, chunk := range chunks { - err := c.forStores(ctx, chunk.UserID, chunk.From, chunk.Through, func(innerCtx context.Context, from, through model.Time, store Store) error { - return store.PutOne(innerCtx, from, through, chunk) - }) - if err != nil { - return err - } - } - return nil -} - -func (c compositeStore) PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error { - return c.forStores(ctx, chunk.UserID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { - return store.PutOne(innerCtx, from, through, chunk) - }) -} - -func (c compositeStore) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) { - var results []Chunk - err := c.forStores(ctx, userID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { - chunks, err := store.Get(innerCtx, userID, from, through, matchers...) - if err != nil { - return err - } - results = append(results, chunks...) - return nil - }) - return results, err -} - -// LabelValuesForMetricName retrieves all label values for a single label name and metric name. -func (c compositeStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) { - var result UniqueStrings - err := c.forStores(ctx, userID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { - labelValues, err := store.LabelValuesForMetricName(innerCtx, userID, from, through, metricName, labelName) - if err != nil { - return err - } - result.Add(labelValues...) - return nil - }) - return result.Strings(), err -} - -// LabelNamesForMetricName retrieves all label names for a metric name. -func (c compositeStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { - var result UniqueStrings - err := c.forStores(ctx, userID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { - labelNames, err := store.LabelNamesForMetricName(innerCtx, userID, from, through, metricName) - if err != nil { - return err - } - result.Add(labelNames...) - return nil - }) - return result.Strings(), err -} - -func (c compositeStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { - chunkIDs := [][]Chunk{} - fetchers := []*Fetcher{} - err := c.forStores(ctx, userID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { - ids, fetcher, err := store.GetChunkRefs(innerCtx, userID, from, through, matchers...) - if err != nil { - return err - } - - // Skip it if there are no chunks. - if len(ids) == 0 { - return nil - } - - chunkIDs = append(chunkIDs, ids...) - fetchers = append(fetchers, fetcher...) - return nil - }) - return chunkIDs, fetchers, err -} - -func (c compositeStore) GetChunkFetcher(tm model.Time) *Fetcher { - // find the schema with the lowest start _after_ tm - j := sort.Search(len(c.stores), func(j int) bool { - return c.stores[j].start > tm - }) - - // reduce it by 1 because we want a schema with start <= tm - j-- - - if 0 <= j && j < len(c.stores) { - return c.stores[j].GetChunkFetcher(tm) - } - - return nil -} - -// DeleteSeriesIDs deletes series IDs from index in series store -func (c CompositeStore) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { - return c.forStores(ctx, userID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { - return store.DeleteSeriesIDs(innerCtx, from, through, userID, metric) - }) -} - -// DeleteChunk deletes a chunks index entry and then deletes the actual chunk from chunk storage. -// It takes care of chunks which are deleting partially by creating and inserting a new chunk first and then deleting the original chunk -func (c CompositeStore) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error { - return c.forStores(ctx, userID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { - return store.DeleteChunk(innerCtx, from, through, userID, chunkID, metric, partiallyDeletedInterval) - }) -} - -func (c compositeStore) Stop() { - for _, store := range c.stores { - store.Stop() - } -} - -func (c compositeStore) forStores(ctx context.Context, userID string, from, through model.Time, callback func(innerCtx context.Context, from, through model.Time, store Store) error) error { - if len(c.stores) == 0 { - return nil - } - - ctx = c.injectCacheGen(ctx, []string{userID}) - - // first, find the schema with the highest start _before or at_ from - i := sort.Search(len(c.stores), func(i int) bool { - return c.stores[i].start > from - }) - if i > 0 { - i-- - } else { - // This could happen if we get passed a sample from before 1970. - i = 0 - from = c.stores[0].start - } - - // next, find the schema with the lowest start _after_ through - j := sort.Search(len(c.stores), func(j int) bool { - return c.stores[j].start > through - }) - - min := func(a, b model.Time) model.Time { - if a < b { - return a - } - return b - } - - start := from - for ; i < j; i++ { - nextSchemaStarts := model.Latest - if i+1 < len(c.stores) { - nextSchemaStarts = c.stores[i+1].start - } - - end := min(through, nextSchemaStarts-1) - err := callback(ctx, start, end, c.stores[i].Store) - if err != nil { - return err - } - - start = nextSchemaStarts - } - - return nil -} - -func (c compositeStore) injectCacheGen(ctx context.Context, tenantIDs []string) context.Context { - if c.cacheGenNumLoader == nil { - return ctx - } - - return cache.InjectCacheGenNumber(ctx, c.cacheGenNumLoader.GetStoreCacheGenNumber(tenantIDs)) -} diff --git a/internal/cortex/chunk/composite_store_test.go b/internal/cortex/chunk/composite_store_test.go deleted file mode 100644 index b42b3d2f5e..0000000000 --- a/internal/cortex/chunk/composite_store_test.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/weaveworks/common/test" -) - -type mockStore int - -func (m mockStore) Put(ctx context.Context, chunks []Chunk) error { - return nil -} - -func (m mockStore) PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error { - return nil -} - -func (m mockStore) Get(tx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) { - return nil, nil -} -func (m mockStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) { - return nil, nil -} - -func (m mockStore) GetChunkRefs(tx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { - return nil, nil, nil -} - -func (m mockStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { - return nil, nil -} - -func (m mockStore) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error { - return nil -} -func (m mockStore) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { - return nil -} - -func (m mockStore) GetChunkFetcher(tm model.Time) *Fetcher { - return nil -} - -func (m mockStore) Stop() {} - -func TestCompositeStore(t *testing.T) { - type result struct { - from, through model.Time - store Store - } - collect := func(results *[]result) func(_ context.Context, from, through model.Time, store Store) error { - return func(_ context.Context, from, through model.Time, store Store) error { - *results = append(*results, result{from, through, store}) - return nil - } - } - cs := compositeStore{ - stores: []compositeStoreEntry{ - {model.TimeFromUnix(0), mockStore(1)}, - {model.TimeFromUnix(100), mockStore(2)}, - {model.TimeFromUnix(200), mockStore(3)}, - }, - } - - for i, tc := range []struct { - cs compositeStore - from, through int64 - want []result - }{ - // Test we have sensible results when there are no schema's defined - {compositeStore{}, 0, 1, []result{}}, - - // Test we have sensible results when there is a single schema - { - compositeStore{ - stores: []compositeStoreEntry{ - {model.TimeFromUnix(0), mockStore(1)}, - }, - }, - 0, 10, - []result{ - {model.TimeFromUnix(0), model.TimeFromUnix(10), mockStore(1)}, - }, - }, - - // Test we have sensible results for negative (ie pre 1970) times - { - compositeStore{ - stores: []compositeStoreEntry{ - {model.TimeFromUnix(0), mockStore(1)}, - }, - }, - -10, -9, - []result{}, - }, - { - compositeStore{ - stores: []compositeStoreEntry{ - {model.TimeFromUnix(0), mockStore(1)}, - }, - }, - -10, 10, - []result{ - {model.TimeFromUnix(0), model.TimeFromUnix(10), mockStore(1)}, - }, - }, - - // Test we have sensible results when there is two schemas - { - compositeStore{ - stores: []compositeStoreEntry{ - {model.TimeFromUnix(0), mockStore(1)}, - {model.TimeFromUnix(100), mockStore(2)}, - }, - }, - 34, 165, - []result{ - {model.TimeFromUnix(34), model.TimeFromUnix(100) - 1, mockStore(1)}, - {model.TimeFromUnix(100), model.TimeFromUnix(165), mockStore(2)}, - }, - }, - - // Test all the various combination we can get when there are three schemas - { - cs, 34, 65, - []result{ - {model.TimeFromUnix(34), model.TimeFromUnix(65), mockStore(1)}, - }, - }, - - { - cs, 244, 6785, - []result{ - {model.TimeFromUnix(244), model.TimeFromUnix(6785), mockStore(3)}, - }, - }, - - { - cs, 34, 165, - []result{ - {model.TimeFromUnix(34), model.TimeFromUnix(100) - 1, mockStore(1)}, - {model.TimeFromUnix(100), model.TimeFromUnix(165), mockStore(2)}, - }, - }, - - { - cs, 151, 264, - []result{ - {model.TimeFromUnix(151), model.TimeFromUnix(200) - 1, mockStore(2)}, - {model.TimeFromUnix(200), model.TimeFromUnix(264), mockStore(3)}, - }, - }, - - { - cs, 32, 264, - []result{ - {model.TimeFromUnix(32), model.TimeFromUnix(100) - 1, mockStore(1)}, - {model.TimeFromUnix(100), model.TimeFromUnix(200) - 1, mockStore(2)}, - {model.TimeFromUnix(200), model.TimeFromUnix(264), mockStore(3)}, - }, - }, - } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - have := []result{} - err := tc.cs.forStores(context.Background(), userID, model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), collect(&have)) - require.NoError(t, err) - if !reflect.DeepEqual(tc.want, have) { - t.Fatalf("wrong stores - %s", test.Diff(tc.want, have)) - } - }) - } -} - -type mockStoreLabel struct { - mockStore - values []string -} - -func (m mockStoreLabel) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) { - return m.values, nil -} - -func (m mockStoreLabel) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { - return m.values, nil -} - -func TestCompositeStoreLabels(t *testing.T) { - t.Parallel() - - cs := compositeStore{ - stores: []compositeStoreEntry{ - {model.TimeFromUnix(0), mockStore(1)}, - {model.TimeFromUnix(20), mockStoreLabel{mockStore(1), []string{"b", "c", "e"}}}, - {model.TimeFromUnix(40), mockStoreLabel{mockStore(1), []string{"a", "b", "c", "f"}}}, - }, - } - - for i, tc := range []struct { - from, through int64 - want []string - }{ - { - 0, 10, - nil, - }, - { - 0, 30, - []string{"b", "c", "e"}, - }, - { - 0, 40, - []string{"a", "b", "c", "e", "f"}, - }, - } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - have, err := cs.LabelNamesForMetricName(context.Background(), "", model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), "") - require.NoError(t, err) - if !reflect.DeepEqual(tc.want, have) { - t.Fatalf("wrong label names - %s", test.Diff(tc.want, have)) - } - have, err = cs.LabelValuesForMetricName(context.Background(), "", model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), "", "") - require.NoError(t, err) - if !reflect.DeepEqual(tc.want, have) { - t.Fatalf("wrong label values - %s", test.Diff(tc.want, have)) - } - }) - } - -} - -type mockStoreGetChunkFetcher struct { - mockStore - chunkFetcher *Fetcher -} - -func (m mockStoreGetChunkFetcher) GetChunkFetcher(tm model.Time) *Fetcher { - return m.chunkFetcher -} - -func TestCompositeStore_GetChunkFetcher(t *testing.T) { - cs := compositeStore{ - stores: []compositeStoreEntry{ - {model.TimeFromUnix(10), mockStoreGetChunkFetcher{mockStore(0), &Fetcher{}}}, - {model.TimeFromUnix(20), mockStoreGetChunkFetcher{mockStore(1), &Fetcher{}}}, - }, - } - - for _, tc := range []struct { - name string - tm model.Time - expectedFetcher *Fetcher - }{ - { - name: "no matching store", - tm: model.TimeFromUnix(0), - }, - { - name: "first store", - tm: model.TimeFromUnix(10), - expectedFetcher: cs.stores[0].Store.(mockStoreGetChunkFetcher).chunkFetcher, - }, - { - name: "still first store", - tm: model.TimeFromUnix(11), - expectedFetcher: cs.stores[0].Store.(mockStoreGetChunkFetcher).chunkFetcher, - }, - { - name: "second store", - tm: model.TimeFromUnix(20), - expectedFetcher: cs.stores[1].Store.(mockStoreGetChunkFetcher).chunkFetcher, - }, - { - name: "still second store", - tm: model.TimeFromUnix(21), - expectedFetcher: cs.stores[1].Store.(mockStoreGetChunkFetcher).chunkFetcher, - }, - } { - t.Run(tc.name, func(t *testing.T) { - require.Same(t, tc.expectedFetcher, cs.GetChunkFetcher(tc.tm)) - }) - } - -} diff --git a/internal/cortex/chunk/encoding/bigchunk.go b/internal/cortex/chunk/encoding/bigchunk.go deleted file mode 100644 index 187ee197f4..0000000000 --- a/internal/cortex/chunk/encoding/bigchunk.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package encoding - -import ( - "bytes" - "encoding/binary" - "errors" - "io" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/tsdb/chunkenc" -) - -const samplesPerChunk = 120 - -var errOutOfBounds = errors.New("out of bounds") - -type smallChunk struct { - chunkenc.XORChunk - start int64 -} - -// bigchunk is a set of prometheus/tsdb chunks. It grows over time and has no -// upperbound on number of samples it can contain. -type bigchunk struct { - chunks []smallChunk - - appender chunkenc.Appender - remainingSamples int -} - -func newBigchunk() *bigchunk { - return &bigchunk{} -} - -func (b *bigchunk) Add(sample model.SamplePair) (Chunk, error) { - if b.remainingSamples == 0 { - if bigchunkSizeCapBytes > 0 && b.Size() > bigchunkSizeCapBytes { - return addToOverflowChunk(sample) - } - if err := b.addNextChunk(sample.Timestamp); err != nil { - return nil, err - } - } - - b.appender.Append(int64(sample.Timestamp), float64(sample.Value)) - b.remainingSamples-- - return nil, nil -} - -// addNextChunk adds a new XOR "subchunk" to the internal list of chunks. -func (b *bigchunk) addNextChunk(start model.Time) error { - // To save memory, we "compact" the previous chunk - the array backing the slice - // will be upto 2x too big, and we can save this space. - const chunkCapacityExcess = 32 // don't bother copying if it's within this range - if l := len(b.chunks); l > 0 { - oldBuf := b.chunks[l-1].XORChunk.Bytes() - if cap(oldBuf) > len(oldBuf)+chunkCapacityExcess { - buf := make([]byte, len(oldBuf)) - copy(buf, oldBuf) - compacted, err := chunkenc.FromData(chunkenc.EncXOR, buf) - if err != nil { - return err - } - b.chunks[l-1].XORChunk = *compacted.(*chunkenc.XORChunk) - } - } - - // Explicitly reallocate slice to avoid up to 2x overhead if we let append() do it - if len(b.chunks)+1 > cap(b.chunks) { - newChunks := make([]smallChunk, len(b.chunks), len(b.chunks)+1) - copy(newChunks, b.chunks) - b.chunks = newChunks - } - b.chunks = append(b.chunks, smallChunk{ - XORChunk: *chunkenc.NewXORChunk(), - start: int64(start), - }) - - appender, err := b.chunks[len(b.chunks)-1].Appender() - if err != nil { - return err - } - b.appender = appender - b.remainingSamples = samplesPerChunk - return nil -} - -func (b *bigchunk) Marshal(wio io.Writer) error { - w := writer{wio} - if err := w.WriteVarInt16(uint16(len(b.chunks))); err != nil { - return err - } - for _, chunk := range b.chunks { - buf := chunk.Bytes() - if err := w.WriteVarInt16(uint16(len(buf))); err != nil { - return err - } - if _, err := w.Write(buf); err != nil { - return err - } - } - return nil -} - -func (b *bigchunk) MarshalToBuf(buf []byte) error { - writer := bytes.NewBuffer(buf) - return b.Marshal(writer) -} - -func (b *bigchunk) UnmarshalFromBuf(buf []byte) error { - r := reader{buf: buf} - numChunks, err := r.ReadUint16() - if err != nil { - return err - } - - b.chunks = make([]smallChunk, 0, numChunks+1) // allow one extra space in case we want to add new data - var reuseIter chunkenc.Iterator - for i := uint16(0); i < numChunks; i++ { - chunkLen, err := r.ReadUint16() - if err != nil { - return err - } - - chunkBuf, err := r.ReadBytes(int(chunkLen)) - if err != nil { - return err - } - - chunk, err := chunkenc.FromData(chunkenc.EncXOR, chunkBuf) - if err != nil { - return err - } - - var start int64 - start, reuseIter, err = firstTime(chunk, reuseIter) - if err != nil { - return err - } - - b.chunks = append(b.chunks, smallChunk{ - XORChunk: *chunk.(*chunkenc.XORChunk), - start: int64(start), - }) - } - return nil -} - -func (b *bigchunk) Encoding() Encoding { - return Bigchunk -} - -func (b *bigchunk) Utilization() float64 { - return 1.0 -} - -func (b *bigchunk) Len() int { - sum := 0 - for _, c := range b.chunks { - sum += c.NumSamples() - } - return sum -} - -func (b *bigchunk) Size() int { - sum := 2 // For the number of sub chunks. - for _, c := range b.chunks { - sum += 2 // For the length of the sub chunk. - sum += len(c.Bytes()) - } - return sum -} - -func (b *bigchunk) NewIterator(reuseIter Iterator) Iterator { - if bci, ok := reuseIter.(*bigchunkIterator); ok { - bci.bigchunk = b - bci.i = 0 - if len(b.chunks) > 0 { - bci.curr = b.chunks[0].Iterator(bci.curr) - } else { - bci.curr = chunkenc.NewNopIterator() - } - return bci - } - var it chunkenc.Iterator - if len(b.chunks) > 0 { - it = b.chunks[0].Iterator(it) - } else { - it = chunkenc.NewNopIterator() - } - return &bigchunkIterator{ - bigchunk: b, - curr: it, - } -} - -func (b *bigchunk) Slice(start, end model.Time) Chunk { - i, j := 0, len(b.chunks) - for k := 0; k < len(b.chunks); k++ { - if b.chunks[k].start <= int64(start) { - i = k - } - if b.chunks[k].start > int64(end) { - j = k - break - } - } - return &bigchunk{ - chunks: b.chunks[i:j], - } -} - -func (b *bigchunk) Rebound(start, end model.Time) (Chunk, error) { - return reboundChunk(b, start, end) -} - -type writer struct { - io.Writer -} - -func (w writer) WriteVarInt16(i uint16) error { - var b [2]byte - binary.LittleEndian.PutUint16(b[:], i) - _, err := w.Write(b[:]) - return err -} - -type reader struct { - i int - buf []byte -} - -func (r *reader) ReadUint16() (uint16, error) { - if r.i+2 > len(r.buf) { - return 0, errOutOfBounds - } - result := binary.LittleEndian.Uint16(r.buf[r.i:]) - r.i += 2 - return result, nil -} - -func (r *reader) ReadBytes(count int) ([]byte, error) { - if r.i+count > len(r.buf) { - return nil, errOutOfBounds - } - result := r.buf[r.i : r.i+count] - r.i += count - return result, nil -} - -type bigchunkIterator struct { - *bigchunk - - curr chunkenc.Iterator - i int -} - -func (it *bigchunkIterator) FindAtOrAfter(target model.Time) bool { - if it.i >= len(it.chunks) { - return false - } - - // If the seek is outside the current chunk, use the index to find the right - // chunk. - if int64(target) < it.chunks[it.i].start || - (it.i+1 < len(it.chunks) && int64(target) >= it.chunks[it.i+1].start) { - it.curr = nil - for it.i = 0; it.i+1 < len(it.chunks) && int64(target) >= it.chunks[it.i+1].start; it.i++ { - } - } - - if it.curr == nil { - it.curr = it.chunks[it.i].Iterator(it.curr) - } else if t, _ := it.curr.At(); int64(target) <= t { - it.curr = it.chunks[it.i].Iterator(it.curr) - } - - for it.curr.Next() { - t, _ := it.curr.At() - if t >= int64(target) { - return true - } - } - // Timestamp is after the end of that chunk - if there is another chunk - // then the position we need is at the beginning of it. - if it.i+1 < len(it.chunks) { - it.i++ - it.curr = it.chunks[it.i].Iterator(it.curr) - it.curr.Next() - return true - } - return false -} - -func (it *bigchunkIterator) Scan() bool { - if it.curr.Next() { - return true - } - if err := it.curr.Err(); err != nil { - return false - } - - for it.i < len(it.chunks)-1 { - it.i++ - it.curr = it.chunks[it.i].Iterator(it.curr) - if it.curr.Next() { - return true - } - } - return false -} - -func (it *bigchunkIterator) Value() model.SamplePair { - t, v := it.curr.At() - return model.SamplePair{ - Timestamp: model.Time(t), - Value: model.SampleValue(v), - } -} - -func (it *bigchunkIterator) Batch(size int) Batch { - var result Batch - j := 0 - for j < size { - t, v := it.curr.At() - result.Timestamps[j] = t - result.Values[j] = v - j++ - - if j < size && !it.Scan() { - break - } - } - result.Length = j - return result -} - -func (it *bigchunkIterator) Err() error { - if it.curr != nil { - return it.curr.Err() - } - return nil -} - -func firstTime(c chunkenc.Chunk, iter chunkenc.Iterator) (int64, chunkenc.Iterator, error) { - var first int64 - iter = c.Iterator(iter) - if iter.Next() { - first, _ = iter.At() - } - return first, iter, iter.Err() -} diff --git a/internal/cortex/chunk/encoding/bigchunk_test.go b/internal/cortex/chunk/encoding/bigchunk_test.go deleted file mode 100644 index 4b21b9e7cb..0000000000 --- a/internal/cortex/chunk/encoding/bigchunk_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package encoding - -import ( - "bytes" - "fmt" - "testing" - - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" -) - -func TestSliceBiggerChunk(t *testing.T) { - var c Chunk = newBigchunk() - for i := 0; i < 12*3600/15; i++ { - nc, err := c.Add(model.SamplePair{ - Timestamp: model.Time(i * step), - Value: model.SampleValue(i), - }) - require.NoError(t, err) - require.Nil(t, nc) - } - - // Test for when the slice aligns perfectly with the sub-chunk boundaries. - - for i := 0; i < (12*3600/15)-480; i += 120 { - s := c.Slice(model.Time(i*step), model.Time((i+479)*step)) - iter := s.NewIterator(nil) - for j := i; j < i+480; j++ { - require.True(t, iter.Scan()) - sample := iter.Value() - require.Equal(t, sample.Timestamp, model.Time(j*step)) - require.Equal(t, sample.Value, model.SampleValue(j)) - } - require.False(t, iter.Scan()) - require.NoError(t, iter.Err()) - } - - // Test for when the slice does not align perfectly with the sub-chunk boundaries. - for i := 0; i < (12*3600/15)-500; i += 100 { - s := c.Slice(model.Time(i*step), model.Time((i+500)*step)) - iter := s.NewIterator(nil) - - // Consume some samples until we get to where we want to be. - for { - require.True(t, iter.Scan()) - sample := iter.Value() - if sample.Timestamp == model.Time(i*step) { - break - } - } - - for j := i; j < i+500; j++ { - sample := iter.Value() - require.Equal(t, sample.Timestamp, model.Time(j*step)) - require.Equal(t, sample.Value, model.SampleValue(j)) - require.True(t, iter.Scan()) - } - - // Now try via seek - iter = s.NewIterator(iter) - require.True(t, iter.FindAtOrAfter(model.Time(i*step))) - sample := iter.Value() - require.Equal(t, sample.Timestamp, model.Time(i*step)) - require.Equal(t, sample.Value, model.SampleValue(i)) - } -} - -func BenchmarkBiggerChunkMemory(b *testing.B) { - for i := 0; i < b.N; i++ { - var c Chunk = newBigchunk() - for i := 0; i < 12*3600/15; i++ { - nc, err := c.Add(model.SamplePair{ - Timestamp: model.Time(i * step), - Value: model.SampleValue(i), - }) - require.NoError(b, err) - require.Nil(b, nc) - } - - c.(*bigchunk).printSize() - } -} - -// printSize calculates various sizes of the chunk when encoded, and in memory. -func (b *bigchunk) printSize() { - var buf bytes.Buffer - _ = b.Marshal(&buf) - - var size, allocd int - for _, c := range b.chunks { - size += len(c.Bytes()) - allocd += cap(c.Bytes()) - } - - fmt.Println("encodedlen =", len(buf.Bytes()), "subchunks =", len(b.chunks), "len =", size, "cap =", allocd) -} diff --git a/internal/cortex/chunk/encoding/chunk.go b/internal/cortex/chunk/encoding/chunk.go deleted file mode 100644 index 2321e0a579..0000000000 --- a/internal/cortex/chunk/encoding/chunk.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package encoding - -import ( - "errors" - "io" - "sort" - - "github.com/prometheus/common/model" - errs "github.com/weaveworks/common/errors" - - "github.com/thanos-io/thanos/internal/cortex/prom1/storage/metric" -) - -const ( - // ChunkLen is the length of a chunk in bytes. - ChunkLen = 1024 - - ErrSliceNoDataInRange = errs.Error("chunk has no data for given range to slice") - ErrSliceChunkOverflow = errs.Error("slicing should not overflow a chunk") -) - -var ( - errChunkBoundsExceeded = errors.New("attempted access outside of chunk boundaries") -) - -// Chunk is the interface for all chunks. Chunks are generally not -// goroutine-safe. -type Chunk interface { - // Add adds a SamplePair to the chunks, performs any necessary - // re-encoding, and creates any necessary overflow chunk. - // The returned Chunk is the overflow chunk if it was created. - // The returned Chunk is nil if the sample got appended to the same chunk. - Add(sample model.SamplePair) (Chunk, error) - // NewIterator returns an iterator for the chunks. - // The iterator passed as argument is for re-use. Depending on implementation, - // the iterator can be re-used or a new iterator can be allocated. - NewIterator(Iterator) Iterator - Marshal(io.Writer) error - UnmarshalFromBuf([]byte) error - Encoding() Encoding - Utilization() float64 - - // Slice returns a smaller chunk that includes all samples between start and end - // (inclusive). Its may over estimate. On some encodings it is a noop. - Slice(start, end model.Time) Chunk - - // Rebound returns a smaller chunk that includes all samples between start and end (inclusive). - // We do not want to change existing Slice implementations because - // it is built specifically for query optimization and is a noop for some of the encodings. - Rebound(start, end model.Time) (Chunk, error) - - // Len returns the number of samples in the chunk. Implementations may be - // expensive. - Len() int - - // Size returns the approximate length of the chunk in bytes. - Size() int -} - -// Iterator enables efficient access to the content of a chunk. It is -// generally not safe to use an Iterator concurrently with or after chunk -// mutation. -type Iterator interface { - // Scans the next value in the chunk. Directly after the iterator has - // been created, the next value is the first value in the - // chunk. Otherwise, it is the value following the last value scanned or - // found (by one of the Find... methods). Returns false if either the - // end of the chunk is reached or an error has occurred. - Scan() bool - // Finds the oldest value at or after the provided time. Returns false - // if either the chunk contains no value at or after the provided time, - // or an error has occurred. - FindAtOrAfter(model.Time) bool - // Returns the last value scanned (by the scan method) or found (by one - // of the find... methods). It returns model.ZeroSamplePair before any of - // those methods were called. - Value() model.SamplePair - // Returns a batch of the provisded size; NB not idempotent! Should only be called - // once per Scan. - Batch(size int) Batch - // Returns the last error encountered. In general, an error signals data - // corruption in the chunk and requires quarantining. - Err() error -} - -// BatchSize is samples per batch; this was choose by benchmarking all sizes from -// 1 to 128. -const BatchSize = 12 - -// Batch is a sorted set of (timestamp, value) pairs. They are intended to be -// small, and passed by value. -type Batch struct { - Timestamps [BatchSize]int64 - Values [BatchSize]float64 - Index int - Length int -} - -// RangeValues is a utility function that retrieves all values within the given -// range from an Iterator. -func RangeValues(it Iterator, in metric.Interval) ([]model.SamplePair, error) { - result := []model.SamplePair{} - if !it.FindAtOrAfter(in.OldestInclusive) { - return result, it.Err() - } - for !it.Value().Timestamp.After(in.NewestInclusive) { - result = append(result, it.Value()) - if !it.Scan() { - break - } - } - return result, it.Err() -} - -// addToOverflowChunk is a utility function that creates a new chunk as overflow -// chunk, adds the provided sample to it, and returns a chunk slice containing -// the provided old chunk followed by the new overflow chunk. -func addToOverflowChunk(s model.SamplePair) (Chunk, error) { - overflowChunk := New() - _, err := overflowChunk.Add(s) - if err != nil { - return nil, err - } - return overflowChunk, nil -} - -// transcodeAndAdd is a utility function that transcodes the dst chunk into the -// provided src chunk (plus the necessary overflow chunks) and then adds the -// provided sample. It returns the new chunks (transcoded plus overflow) with -// the new sample at the end. -func transcodeAndAdd(dst Chunk, src Chunk, s model.SamplePair) ([]Chunk, error) { - Ops.WithLabelValues(Transcode).Inc() - - var ( - head = dst - newChunk Chunk - body = []Chunk{head} - err error - ) - - it := src.NewIterator(nil) - for it.Scan() { - if newChunk, err = head.Add(it.Value()); err != nil { - return nil, err - } - if newChunk != nil { - body = append(body, newChunk) - head = newChunk - } - } - if it.Err() != nil { - return nil, it.Err() - } - - if newChunk, err = head.Add(s); err != nil { - return nil, err - } - if newChunk != nil { - body = append(body, newChunk) - } - return body, nil -} - -// indexAccessor allows accesses to samples by index. -type indexAccessor interface { - timestampAtIndex(int) model.Time - sampleValueAtIndex(int) model.SampleValue - err() error -} - -// indexAccessingChunkIterator is a chunk iterator for chunks for which an -// indexAccessor implementation exists. -type indexAccessingChunkIterator struct { - len int - pos int - lastValue model.SamplePair - acc indexAccessor -} - -func newIndexAccessingChunkIterator(len int, acc indexAccessor) *indexAccessingChunkIterator { - return &indexAccessingChunkIterator{ - len: len, - pos: -1, - lastValue: model.ZeroSamplePair, - acc: acc, - } -} - -// scan implements Iterator. -func (it *indexAccessingChunkIterator) Scan() bool { - it.pos++ - if it.pos >= it.len { - return false - } - it.lastValue = model.SamplePair{ - Timestamp: it.acc.timestampAtIndex(it.pos), - Value: it.acc.sampleValueAtIndex(it.pos), - } - return it.acc.err() == nil -} - -// findAtOrAfter implements Iterator. -func (it *indexAccessingChunkIterator) FindAtOrAfter(t model.Time) bool { - i := sort.Search(it.len, func(i int) bool { - return !it.acc.timestampAtIndex(i).Before(t) - }) - if i == it.len || it.acc.err() != nil { - return false - } - it.pos = i - it.lastValue = model.SamplePair{ - Timestamp: it.acc.timestampAtIndex(i), - Value: it.acc.sampleValueAtIndex(i), - } - return true -} - -// value implements Iterator. -func (it *indexAccessingChunkIterator) Value() model.SamplePair { - return it.lastValue -} - -func (it *indexAccessingChunkIterator) Batch(size int) Batch { - var batch Batch - j := 0 - for j < size && it.pos < it.len { - batch.Timestamps[j] = int64(it.acc.timestampAtIndex(it.pos)) - batch.Values[j] = float64(it.acc.sampleValueAtIndex(it.pos)) - it.pos++ - j++ - } - // Interface contract is that you call Scan before calling Batch; therefore - // without this decrement, you'd end up skipping samples. - it.pos-- - batch.Index = 0 - batch.Length = j - return batch -} - -// err implements Iterator. -func (it *indexAccessingChunkIterator) Err() error { - return it.acc.err() -} - -func reboundChunk(c Chunk, start, end model.Time) (Chunk, error) { - itr := c.NewIterator(nil) - if !itr.FindAtOrAfter(start) { - return nil, ErrSliceNoDataInRange - } - - pc, err := NewForEncoding(c.Encoding()) - if err != nil { - return nil, err - } - - for !itr.Value().Timestamp.After(end) { - oc, err := pc.Add(itr.Value()) - if err != nil { - return nil, err - } - - if oc != nil { - return nil, ErrSliceChunkOverflow - } - if !itr.Scan() { - break - } - } - - err = itr.Err() - if err != nil { - return nil, err - } - - if pc.Len() == 0 { - return nil, ErrSliceNoDataInRange - } - - return pc, nil -} diff --git a/internal/cortex/chunk/encoding/chunk_test.go b/internal/cortex/chunk/encoding/chunk_test.go deleted file mode 100644 index 427624ba9c..0000000000 --- a/internal/cortex/chunk/encoding/chunk_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Note: this file has tests for code in both delta.go and doubledelta.go -- -// it may make sense to split those out later, but given that the tests are -// near-identical and share a helper, this feels simpler for now. - -package encoding - -import ( - "bytes" - "fmt" - "testing" - "time" - - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" -) - -func TestLen(t *testing.T) { - chunks := []Chunk{} - for _, encoding := range []Encoding{DoubleDelta, Varbit, Bigchunk, PrometheusXorChunk} { - c, err := NewForEncoding(encoding) - if err != nil { - t.Fatal(err) - } - chunks = append(chunks, c) - } - - for _, c := range chunks { - for i := 0; i <= 10; i++ { - if c.Len() != i { - t.Errorf("chunk type %s should have %d samples, had %d", c.Encoding(), i, c.Len()) - } - - cs, err := c.Add(model.SamplePair{ - Timestamp: model.Time(i), - Value: model.SampleValue(i), - }) - require.NoError(t, err) - require.Nil(t, cs) - } - } -} - -var step = int(15 * time.Second / time.Millisecond) - -func TestChunk(t *testing.T) { - for _, tc := range []struct { - encoding Encoding - maxSamples int - }{ - {DoubleDelta, 989}, - {Varbit, 2048}, - {Bigchunk, 4096}, - {PrometheusXorChunk, 2048}, - } { - for samples := tc.maxSamples / 10; samples < tc.maxSamples; samples += tc.maxSamples / 10 { - - // DoubleDelta doesn't support zero length chunks. - if tc.encoding == DoubleDelta && samples == 0 { - continue - } - - t.Run(fmt.Sprintf("testChunkEncoding/%s/%d", tc.encoding.String(), samples), func(t *testing.T) { - testChunkEncoding(t, tc.encoding, samples) - }) - - t.Run(fmt.Sprintf("testChunkSeek/%s/%d", tc.encoding.String(), samples), func(t *testing.T) { - testChunkSeek(t, tc.encoding, samples) - }) - - t.Run(fmt.Sprintf("testChunkSeekForward/%s/%d", tc.encoding.String(), samples), func(t *testing.T) { - testChunkSeekForward(t, tc.encoding, samples) - }) - - t.Run(fmt.Sprintf("testChunkBatch/%s/%d", tc.encoding.String(), samples), func(t *testing.T) { - testChunkBatch(t, tc.encoding, samples) - }) - - if tc.encoding != PrometheusXorChunk { - t.Run(fmt.Sprintf("testChunkRebound/%s/%d", tc.encoding.String(), samples), func(t *testing.T) { - testChunkRebound(t, tc.encoding, samples) - }) - } - } - } -} - -func mkChunk(t *testing.T, encoding Encoding, samples int) Chunk { - chunk, err := NewForEncoding(encoding) - require.NoError(t, err) - - for i := 0; i < samples; i++ { - newChunk, err := chunk.Add(model.SamplePair{ - Timestamp: model.Time(i * step), - Value: model.SampleValue(i), - }) - require.NoError(t, err) - require.Nil(t, newChunk) - } - - return chunk -} - -// testChunkEncoding checks chunks roundtrip and contain all their samples. -func testChunkEncoding(t *testing.T, encoding Encoding, samples int) { - chunk := mkChunk(t, encoding, samples) - - var buf bytes.Buffer - err := chunk.Marshal(&buf) - require.NoError(t, err) - - bs1 := buf.Bytes() - chunk, err = NewForEncoding(encoding) - require.NoError(t, err) - - err = chunk.UnmarshalFromBuf(bs1) - require.NoError(t, err) - - // Check all the samples are in there. - iter := chunk.NewIterator(nil) - for i := 0; i < samples; i++ { - require.True(t, iter.Scan()) - sample := iter.Value() - require.EqualValues(t, model.Time(i*step), sample.Timestamp) - require.EqualValues(t, model.SampleValue(i), sample.Value) - } - require.False(t, iter.Scan()) - require.NoError(t, iter.Err()) - - // Check seek works after unmarshal - iter = chunk.NewIterator(iter) - for i := 0; i < samples; i += samples / 10 { - require.True(t, iter.FindAtOrAfter(model.Time(i*step))) - } - - // Check the byte representation after another Marshall is the same. - buf = bytes.Buffer{} - err = chunk.Marshal(&buf) - require.NoError(t, err) - bs2 := buf.Bytes() - - require.Equal(t, bs1, bs2) -} - -// testChunkSeek checks seek works as expected. -// This version of the test will seek backwards. -func testChunkSeek(t *testing.T, encoding Encoding, samples int) { - chunk := mkChunk(t, encoding, samples) - - iter := chunk.NewIterator(nil) - for i := 0; i < samples; i += samples / 10 { - if i > 0 { - // Seek one millisecond before the actual time - require.True(t, iter.FindAtOrAfter(model.Time(i*step-1)), "1ms before step %d not found", i) - sample := iter.Value() - require.EqualValues(t, model.Time(i*step), sample.Timestamp) - require.EqualValues(t, model.SampleValue(i), sample.Value) - } - // Now seek to exactly the right time - require.True(t, iter.FindAtOrAfter(model.Time(i*step))) - sample := iter.Value() - require.EqualValues(t, model.Time(i*step), sample.Timestamp) - require.EqualValues(t, model.SampleValue(i), sample.Value) - - j := i + 1 - for ; j < samples; j++ { - require.True(t, iter.Scan()) - sample := iter.Value() - require.EqualValues(t, model.Time(j*step), sample.Timestamp) - require.EqualValues(t, model.SampleValue(j), sample.Value) - } - require.False(t, iter.Scan()) - require.NoError(t, iter.Err()) - } - // Check seek past the end of the chunk returns failure - require.False(t, iter.FindAtOrAfter(model.Time(samples*step+1))) -} - -func testChunkSeekForward(t *testing.T, encoding Encoding, samples int) { - chunk := mkChunk(t, encoding, samples) - - iter := chunk.NewIterator(nil) - for i := 0; i < samples; i += samples / 10 { - require.True(t, iter.FindAtOrAfter(model.Time(i*step))) - sample := iter.Value() - require.EqualValues(t, model.Time(i*step), sample.Timestamp) - require.EqualValues(t, model.SampleValue(i), sample.Value) - - j := i + 1 - for ; j < (i+samples/10) && j < samples; j++ { - require.True(t, iter.Scan()) - sample := iter.Value() - require.EqualValues(t, model.Time(j*step), sample.Timestamp) - require.EqualValues(t, model.SampleValue(j), sample.Value) - } - } - require.False(t, iter.Scan()) - require.NoError(t, iter.Err()) -} - -func testChunkBatch(t *testing.T, encoding Encoding, samples int) { - chunk := mkChunk(t, encoding, samples) - - // Check all the samples are in there. - iter := chunk.NewIterator(nil) - for i := 0; i < samples; { - require.True(t, iter.Scan()) - batch := iter.Batch(BatchSize) - for j := 0; j < batch.Length; j++ { - require.EqualValues(t, int64((i+j)*step), batch.Timestamps[j]) - require.EqualValues(t, float64(i+j), batch.Values[j]) - } - i += batch.Length - } - require.False(t, iter.Scan()) - require.NoError(t, iter.Err()) -} - -func testChunkRebound(t *testing.T, encoding Encoding, samples int) { - for _, tc := range []struct { - name string - sliceFrom, sliceTo model.Time - err error - }{ - { - name: "slice first half", - sliceFrom: 0, - sliceTo: model.Time((samples / 2) * step), - }, - { - name: "slice second half", - sliceFrom: model.Time((samples / 2) * step), - sliceTo: model.Time((samples - 1) * step), - }, - { - name: "slice in the middle", - sliceFrom: model.Time(int(float64(samples)*0.25) * step), - sliceTo: model.Time(int(float64(samples)*0.75) * step), - }, - { - name: "slice no data in range", - err: ErrSliceNoDataInRange, - sliceFrom: model.Time((samples + 1) * step), - sliceTo: model.Time(samples * 2 * step), - }, - { - name: "slice interval not aligned with sample intervals", - sliceFrom: model.Time(0 + step/2), - sliceTo: model.Time(samples * step).Add(time.Duration(-step / 2)), - }, - } { - t.Run(tc.name, func(t *testing.T) { - originalChunk := mkChunk(t, encoding, samples) - - newChunk, err := originalChunk.Rebound(tc.sliceFrom, tc.sliceTo) - if tc.err != nil { - require.Equal(t, tc.err, err) - return - } - require.NoError(t, err) - - chunkItr := originalChunk.NewIterator(nil) - chunkItr.FindAtOrAfter(tc.sliceFrom) - - newChunkItr := newChunk.NewIterator(nil) - newChunkItr.Scan() - - for { - require.Equal(t, chunkItr.Value(), newChunkItr.Value()) - - originalChunksHasMoreSamples := chunkItr.Scan() - newChunkHasMoreSamples := newChunkItr.Scan() - - // originalChunk and newChunk both should end at same time or newChunk should end before or at slice end time - if !originalChunksHasMoreSamples || chunkItr.Value().Timestamp > tc.sliceTo { - require.False(t, newChunkHasMoreSamples) - break - } - - require.True(t, newChunkHasMoreSamples) - } - - }) - } -} diff --git a/internal/cortex/chunk/encoding/delta_helpers.go b/internal/cortex/chunk/encoding/delta_helpers.go deleted file mode 100644 index f598af87a0..0000000000 --- a/internal/cortex/chunk/encoding/delta_helpers.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package encoding - -import ( - "math" - - "github.com/prometheus/common/model" -) - -type deltaBytes byte - -const ( - d0 deltaBytes = 0 - d1 deltaBytes = 1 - d2 deltaBytes = 2 - d4 deltaBytes = 4 - d8 deltaBytes = 8 -) - -func bytesNeededForUnsignedTimestampDelta(deltaT model.Time) deltaBytes { - switch { - case deltaT > math.MaxUint32: - return d8 - case deltaT > math.MaxUint16: - return d4 - case deltaT > math.MaxUint8: - return d2 - default: - return d1 - } -} - -func bytesNeededForSignedTimestampDelta(deltaT model.Time) deltaBytes { - switch { - case deltaT > math.MaxInt32 || deltaT < math.MinInt32: - return d8 - case deltaT > math.MaxInt16 || deltaT < math.MinInt16: - return d4 - case deltaT > math.MaxInt8 || deltaT < math.MinInt8: - return d2 - default: - return d1 - } -} - -func bytesNeededForIntegerSampleValueDelta(deltaV model.SampleValue) deltaBytes { - switch { - case deltaV < math.MinInt32 || deltaV > math.MaxInt32: - return d8 - case deltaV < math.MinInt16 || deltaV > math.MaxInt16: - return d4 - case deltaV < math.MinInt8 || deltaV > math.MaxInt8: - return d2 - case deltaV != 0: - return d1 - default: - return d0 - } -} - -func max(a, b deltaBytes) deltaBytes { - if a > b { - return a - } - return b -} - -// isInt64 returns true if v can be represented as an int64. -func isInt64(v model.SampleValue) bool { - // Note: Using math.Modf is slower than the conversion approach below. - return model.SampleValue(int64(v)) == v -} diff --git a/internal/cortex/chunk/encoding/doubledelta.go b/internal/cortex/chunk/encoding/doubledelta.go deleted file mode 100644 index 597b049ca0..0000000000 --- a/internal/cortex/chunk/encoding/doubledelta.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package encoding - -import ( - "encoding/binary" - "fmt" - "io" - "math" - - "github.com/prometheus/common/model" -) - -// The 37-byte header of a delta-encoded chunk looks like: -// -// - used buf bytes: 2 bytes -// - time double-delta bytes: 1 bytes -// - value double-delta bytes: 1 bytes -// - is integer: 1 byte -// - base time: 8 bytes -// - base value: 8 bytes -// - base time delta: 8 bytes -// - base value delta: 8 bytes -const ( - doubleDeltaHeaderBytes = 37 - doubleDeltaHeaderMinBytes = 21 // header isn't full for chunk w/ one sample - - doubleDeltaHeaderBufLenOffset = 0 - doubleDeltaHeaderTimeBytesOffset = 2 - doubleDeltaHeaderValueBytesOffset = 3 - doubleDeltaHeaderIsIntOffset = 4 - doubleDeltaHeaderBaseTimeOffset = 5 - doubleDeltaHeaderBaseValueOffset = 13 - doubleDeltaHeaderBaseTimeDeltaOffset = 21 - doubleDeltaHeaderBaseValueDeltaOffset = 29 -) - -// A doubleDeltaEncodedChunk adaptively stores sample timestamps and values with -// a double-delta encoding of various types (int, float) and bit widths. A base -// value and timestamp and a base delta for each is saved in the header. The -// payload consists of double-deltas, i.e. deviations from the values and -// timestamps calculated by applying the base value and time and the base deltas. -// However, once 8 bytes would be needed to encode a double-delta value, a -// fall-back to the absolute numbers happens (so that timestamps are saved -// directly as int64 and values as float64). -// doubleDeltaEncodedChunk implements the chunk interface. -type doubleDeltaEncodedChunk []byte - -// newDoubleDeltaEncodedChunk returns a newly allocated doubleDeltaEncodedChunk. -func newDoubleDeltaEncodedChunk(tb, vb deltaBytes, isInt bool, length int) *doubleDeltaEncodedChunk { - if tb < 1 { - panic("need at least 1 time delta byte") - } - if length < doubleDeltaHeaderBytes+16 { - panic(fmt.Errorf( - "chunk length %d bytes is insufficient, need at least %d", - length, doubleDeltaHeaderBytes+16, - )) - } - c := make(doubleDeltaEncodedChunk, doubleDeltaHeaderIsIntOffset+1, length) - - c[doubleDeltaHeaderTimeBytesOffset] = byte(tb) - c[doubleDeltaHeaderValueBytesOffset] = byte(vb) - if vb < d8 && isInt { // Only use int for fewer than 8 value double-delta bytes. - c[doubleDeltaHeaderIsIntOffset] = 1 - } else { - c[doubleDeltaHeaderIsIntOffset] = 0 - } - return &c -} - -// Add implements chunk. -func (c *doubleDeltaEncodedChunk) Add(s model.SamplePair) (Chunk, error) { - // TODO(beorn7): Since we return &c, this method might cause an unnecessary allocation. - if c.Len() == 0 { - c.addFirstSample(s) - return nil, nil - } - - tb := c.timeBytes() - vb := c.valueBytes() - - if c.Len() == 1 { - err := c.addSecondSample(s, tb, vb) - return nil, err - } - - remainingBytes := cap(*c) - len(*c) - sampleSize := c.sampleSize() - - // Do we generally have space for another sample in this chunk? If not, - // overflow into a new one. - if remainingBytes < sampleSize { - return addToOverflowChunk(s) - } - - projectedTime := c.baseTime() + model.Time(c.Len())*c.baseTimeDelta() - ddt := s.Timestamp - projectedTime - - projectedValue := c.baseValue() + model.SampleValue(c.Len())*c.baseValueDelta() - ddv := s.Value - projectedValue - - ntb, nvb, nInt := tb, vb, c.isInt() - // If the new sample is incompatible with the current encoding, reencode the - // existing chunk data into new chunk(s). - if c.isInt() && !isInt64(ddv) { - // int->float. - nvb = d4 - nInt = false - } else if !c.isInt() && vb == d4 && projectedValue+model.SampleValue(float32(ddv)) != s.Value { - // float32->float64. - nvb = d8 - } else { - if tb < d8 { - // Maybe more bytes for timestamp. - ntb = max(tb, bytesNeededForSignedTimestampDelta(ddt)) - } - if c.isInt() && vb < d8 { - // Maybe more bytes for sample value. - nvb = max(vb, bytesNeededForIntegerSampleValueDelta(ddv)) - } - } - if tb != ntb || vb != nvb || c.isInt() != nInt { - if len(*c)*2 < cap(*c) { - result, err := transcodeAndAdd(newDoubleDeltaEncodedChunk(ntb, nvb, nInt, cap(*c)), c, s) - if err != nil { - return nil, err - } - // We cannot handle >2 chunks returned as we can only return 1 chunk. - // Ideally there wont be >2 chunks, but if it happens to be >2, - // we fall through to perfom `addToOverflowChunk` instead. - if len(result) == 1 { - // Replace the current chunk with the new bigger chunk. - c0 := result[0].(*doubleDeltaEncodedChunk) - *c = *c0 - return nil, nil - } else if len(result) == 2 { - // Replace the current chunk with the new bigger chunk - // and return the additional chunk. - c0 := result[0].(*doubleDeltaEncodedChunk) - c1 := result[1].(*doubleDeltaEncodedChunk) - *c = *c0 - return c1, nil - } - } - - // Chunk is already half full. Better create a new one and save the transcoding efforts. - // We also perform this if `transcodeAndAdd` resulted in >2 chunks. - return addToOverflowChunk(s) - } - - offset := len(*c) - (*c) = (*c)[:offset+sampleSize] - - switch tb { - case d1: - (*c)[offset] = byte(ddt) - case d2: - binary.LittleEndian.PutUint16((*c)[offset:], uint16(ddt)) - case d4: - binary.LittleEndian.PutUint32((*c)[offset:], uint32(ddt)) - case d8: - // Store the absolute value (no delta) in case of d8. - binary.LittleEndian.PutUint64((*c)[offset:], uint64(s.Timestamp)) - default: - return nil, fmt.Errorf("invalid number of bytes for time delta: %d", tb) - } - - offset += int(tb) - - if c.isInt() { - switch vb { - case d0: - // No-op. Constant delta is stored as base value. - case d1: - (*c)[offset] = byte(int8(ddv)) - case d2: - binary.LittleEndian.PutUint16((*c)[offset:], uint16(int16(ddv))) - case d4: - binary.LittleEndian.PutUint32((*c)[offset:], uint32(int32(ddv))) - // d8 must not happen. Those samples are encoded as float64. - default: - return nil, fmt.Errorf("invalid number of bytes for integer delta: %d", vb) - } - } else { - switch vb { - case d4: - binary.LittleEndian.PutUint32((*c)[offset:], math.Float32bits(float32(ddv))) - case d8: - // Store the absolute value (no delta) in case of d8. - binary.LittleEndian.PutUint64((*c)[offset:], math.Float64bits(float64(s.Value))) - default: - return nil, fmt.Errorf("invalid number of bytes for floating point delta: %d", vb) - } - } - return nil, nil -} - -// FirstTime implements chunk. -func (c doubleDeltaEncodedChunk) FirstTime() model.Time { - return c.baseTime() -} - -// NewIterator implements chunk. -func (c *doubleDeltaEncodedChunk) NewIterator(_ Iterator) Iterator { - return newIndexAccessingChunkIterator(c.Len(), &doubleDeltaEncodedIndexAccessor{ - c: *c, - baseT: c.baseTime(), - baseΔT: c.baseTimeDelta(), - baseV: c.baseValue(), - baseΔV: c.baseValueDelta(), - tBytes: c.timeBytes(), - vBytes: c.valueBytes(), - isInt: c.isInt(), - }) -} - -func (c *doubleDeltaEncodedChunk) Slice(_, _ model.Time) Chunk { - return c -} - -func (c *doubleDeltaEncodedChunk) Rebound(start, end model.Time) (Chunk, error) { - return reboundChunk(c, start, end) -} - -// Marshal implements chunk. -func (c doubleDeltaEncodedChunk) Marshal(w io.Writer) error { - if len(c) > math.MaxUint16 { - panic("chunk buffer length would overflow a 16 bit uint") - } - binary.LittleEndian.PutUint16(c[doubleDeltaHeaderBufLenOffset:], uint16(len(c))) - - n, err := w.Write(c[:cap(c)]) - if err != nil { - return err - } - if n != cap(c) { - return fmt.Errorf("wanted to write %d bytes, wrote %d", cap(c), n) - } - return nil -} - -// MarshalToBuf implements chunk. -func (c doubleDeltaEncodedChunk) MarshalToBuf(buf []byte) error { - if len(c) > math.MaxUint16 { - panic("chunk buffer length would overflow a 16 bit uint") - } - binary.LittleEndian.PutUint16(c[doubleDeltaHeaderBufLenOffset:], uint16(len(c))) - - n := copy(buf, c) - if n != len(c) { - return fmt.Errorf("wanted to copy %d bytes to buffer, copied %d", len(c), n) - } - return nil -} - -// UnmarshalFromBuf implements chunk. -func (c *doubleDeltaEncodedChunk) UnmarshalFromBuf(buf []byte) error { - (*c) = (*c)[:cap((*c))] - copy((*c), buf) - return c.setLen() -} - -// setLen sets the length of the underlying slice and performs some sanity checks. -func (c *doubleDeltaEncodedChunk) setLen() error { - l := binary.LittleEndian.Uint16((*c)[doubleDeltaHeaderBufLenOffset:]) - if int(l) > cap((*c)) { - return fmt.Errorf("doubledelta chunk length exceeded during unmarshalling: %d", l) - } - if int(l) < doubleDeltaHeaderMinBytes { - return fmt.Errorf("doubledelta chunk length less than header size: %d < %d", l, doubleDeltaHeaderMinBytes) - } - switch c.timeBytes() { - case d1, d2, d4, d8: - // Pass. - default: - return fmt.Errorf("invalid number of time bytes in doubledelta chunk: %d", c.timeBytes()) - } - switch c.valueBytes() { - case d0, d1, d2, d4, d8: - // Pass. - default: - return fmt.Errorf("invalid number of value bytes in doubledelta chunk: %d", c.valueBytes()) - } - (*c) = (*c)[:l] - return nil -} - -// Encoding implements chunk. -func (c doubleDeltaEncodedChunk) Encoding() Encoding { return DoubleDelta } - -// Utilization implements chunk. -func (c doubleDeltaEncodedChunk) Utilization() float64 { - return float64(len(c)-doubleDeltaHeaderIsIntOffset-1) / float64(cap(c)) -} - -func (c doubleDeltaEncodedChunk) baseTime() model.Time { - return model.Time( - binary.LittleEndian.Uint64( - c[doubleDeltaHeaderBaseTimeOffset:], - ), - ) -} - -func (c doubleDeltaEncodedChunk) baseValue() model.SampleValue { - return model.SampleValue( - math.Float64frombits( - binary.LittleEndian.Uint64( - c[doubleDeltaHeaderBaseValueOffset:], - ), - ), - ) -} - -func (c doubleDeltaEncodedChunk) baseTimeDelta() model.Time { - if len(c) < doubleDeltaHeaderBaseTimeDeltaOffset+8 { - return 0 - } - return model.Time( - binary.LittleEndian.Uint64( - c[doubleDeltaHeaderBaseTimeDeltaOffset:], - ), - ) -} - -func (c doubleDeltaEncodedChunk) baseValueDelta() model.SampleValue { - if len(c) < doubleDeltaHeaderBaseValueDeltaOffset+8 { - return 0 - } - return model.SampleValue( - math.Float64frombits( - binary.LittleEndian.Uint64( - c[doubleDeltaHeaderBaseValueDeltaOffset:], - ), - ), - ) -} - -func (c doubleDeltaEncodedChunk) timeBytes() deltaBytes { - return deltaBytes(c[doubleDeltaHeaderTimeBytesOffset]) -} - -func (c doubleDeltaEncodedChunk) valueBytes() deltaBytes { - return deltaBytes(c[doubleDeltaHeaderValueBytesOffset]) -} - -func (c doubleDeltaEncodedChunk) sampleSize() int { - return int(c.timeBytes() + c.valueBytes()) -} - -// Len implements Chunk. Runs in constant time. -func (c doubleDeltaEncodedChunk) Len() int { - if len(c) <= doubleDeltaHeaderIsIntOffset+1 { - return 0 - } - if len(c) <= doubleDeltaHeaderBaseValueOffset+8 { - return 1 - } - return (len(c)-doubleDeltaHeaderBytes)/c.sampleSize() + 2 -} - -func (c doubleDeltaEncodedChunk) Size() int { - return len(c) -} - -func (c doubleDeltaEncodedChunk) isInt() bool { - return c[doubleDeltaHeaderIsIntOffset] == 1 -} - -// addFirstSample is a helper method only used by c.add(). It adds timestamp and -// value as base time and value. -func (c *doubleDeltaEncodedChunk) addFirstSample(s model.SamplePair) { - (*c) = (*c)[:doubleDeltaHeaderBaseValueOffset+8] - binary.LittleEndian.PutUint64( - (*c)[doubleDeltaHeaderBaseTimeOffset:], - uint64(s.Timestamp), - ) - binary.LittleEndian.PutUint64( - (*c)[doubleDeltaHeaderBaseValueOffset:], - math.Float64bits(float64(s.Value)), - ) -} - -// addSecondSample is a helper method only used by c.add(). It calculates the -// base delta from the provided sample and adds it to the chunk. -func (c *doubleDeltaEncodedChunk) addSecondSample(s model.SamplePair, tb, vb deltaBytes) error { - baseTimeDelta := s.Timestamp - c.baseTime() - if baseTimeDelta < 0 { - return fmt.Errorf("base time delta is less than zero: %v", baseTimeDelta) - } - (*c) = (*c)[:doubleDeltaHeaderBytes] - if tb >= d8 || bytesNeededForUnsignedTimestampDelta(baseTimeDelta) >= d8 { - // If already the base delta needs d8 (or we are at d8 - // already, anyway), we better encode this timestamp - // directly rather than as a delta and switch everything - // to d8. - (*c)[doubleDeltaHeaderTimeBytesOffset] = byte(d8) - binary.LittleEndian.PutUint64( - (*c)[doubleDeltaHeaderBaseTimeDeltaOffset:], - uint64(s.Timestamp), - ) - } else { - binary.LittleEndian.PutUint64( - (*c)[doubleDeltaHeaderBaseTimeDeltaOffset:], - uint64(baseTimeDelta), - ) - } - baseValue := c.baseValue() - baseValueDelta := s.Value - baseValue - if vb >= d8 || baseValue+baseValueDelta != s.Value { - // If we can't reproduce the original sample value (or - // if we are at d8 already, anyway), we better encode - // this value directly rather than as a delta and switch - // everything to d8. - (*c)[doubleDeltaHeaderValueBytesOffset] = byte(d8) - (*c)[doubleDeltaHeaderIsIntOffset] = 0 - binary.LittleEndian.PutUint64( - (*c)[doubleDeltaHeaderBaseValueDeltaOffset:], - math.Float64bits(float64(s.Value)), - ) - } else { - binary.LittleEndian.PutUint64( - (*c)[doubleDeltaHeaderBaseValueDeltaOffset:], - math.Float64bits(float64(baseValueDelta)), - ) - } - return nil -} - -// doubleDeltaEncodedIndexAccessor implements indexAccessor. -type doubleDeltaEncodedIndexAccessor struct { - c doubleDeltaEncodedChunk - baseT, baseΔT model.Time - baseV, baseΔV model.SampleValue - tBytes, vBytes deltaBytes - isInt bool - lastErr error -} - -func (acc *doubleDeltaEncodedIndexAccessor) err() error { - return acc.lastErr -} - -func (acc *doubleDeltaEncodedIndexAccessor) timestampAtIndex(idx int) model.Time { - if idx == 0 { - return acc.baseT - } - if idx == 1 { - // If time bytes are at d8, the time is saved directly rather - // than as a difference. - if acc.tBytes == d8 { - return acc.baseΔT - } - return acc.baseT + acc.baseΔT - } - - offset := doubleDeltaHeaderBytes + (idx-2)*int(acc.tBytes+acc.vBytes) - - switch acc.tBytes { - case d1: - return acc.baseT + - model.Time(idx)*acc.baseΔT + - model.Time(int8(acc.c[offset])) - case d2: - return acc.baseT + - model.Time(idx)*acc.baseΔT + - model.Time(int16(binary.LittleEndian.Uint16(acc.c[offset:]))) - case d4: - return acc.baseT + - model.Time(idx)*acc.baseΔT + - model.Time(int32(binary.LittleEndian.Uint32(acc.c[offset:]))) - case d8: - // Take absolute value for d8. - return model.Time(binary.LittleEndian.Uint64(acc.c[offset:])) - default: - acc.lastErr = fmt.Errorf("invalid number of bytes for time delta: %d", acc.tBytes) - return model.Earliest - } -} - -func (acc *doubleDeltaEncodedIndexAccessor) sampleValueAtIndex(idx int) model.SampleValue { - if idx == 0 { - return acc.baseV - } - if idx == 1 { - // If value bytes are at d8, the value is saved directly rather - // than as a difference. - if acc.vBytes == d8 { - return acc.baseΔV - } - return acc.baseV + acc.baseΔV - } - - offset := doubleDeltaHeaderBytes + (idx-2)*int(acc.tBytes+acc.vBytes) + int(acc.tBytes) - - if acc.isInt { - switch acc.vBytes { - case d0: - return acc.baseV + - model.SampleValue(idx)*acc.baseΔV - case d1: - return acc.baseV + - model.SampleValue(idx)*acc.baseΔV + - model.SampleValue(int8(acc.c[offset])) - case d2: - return acc.baseV + - model.SampleValue(idx)*acc.baseΔV + - model.SampleValue(int16(binary.LittleEndian.Uint16(acc.c[offset:]))) - case d4: - return acc.baseV + - model.SampleValue(idx)*acc.baseΔV + - model.SampleValue(int32(binary.LittleEndian.Uint32(acc.c[offset:]))) - // No d8 for ints. - default: - acc.lastErr = fmt.Errorf("invalid number of bytes for integer delta: %d", acc.vBytes) - return 0 - } - } else { - switch acc.vBytes { - case d4: - return acc.baseV + - model.SampleValue(idx)*acc.baseΔV + - model.SampleValue(math.Float32frombits(binary.LittleEndian.Uint32(acc.c[offset:]))) - case d8: - // Take absolute value for d8. - return model.SampleValue(math.Float64frombits(binary.LittleEndian.Uint64(acc.c[offset:]))) - default: - acc.lastErr = fmt.Errorf("invalid number of bytes for floating point delta: %d", acc.vBytes) - return 0 - } - } -} diff --git a/internal/cortex/chunk/encoding/factory.go b/internal/cortex/chunk/encoding/factory.go deleted file mode 100644 index 15e3968553..0000000000 --- a/internal/cortex/chunk/encoding/factory.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package encoding - -import ( - "errors" - "flag" - "fmt" - "strconv" -) - -// Encoding defines which encoding we are using, delta, doubledelta, or varbit -type Encoding byte - -// Config configures the behaviour of chunk encoding -type Config struct{} - -var ( - // DefaultEncoding exported for use in unit tests elsewhere - DefaultEncoding = Bigchunk - bigchunkSizeCapBytes = 0 -) - -// RegisterFlags registers configuration settings. -func (Config) RegisterFlags(f *flag.FlagSet) { - f.Var(&DefaultEncoding, "ingester.chunk-encoding", "Encoding version to use for chunks.") - f.IntVar(&bigchunkSizeCapBytes, "store.bigchunk-size-cap-bytes", bigchunkSizeCapBytes, "When using bigchunk encoding, start a new bigchunk if over this size (0 = unlimited)") -} - -// Validate errors out if the encoding is set to Delta. -func (Config) Validate() error { - if DefaultEncoding == Delta { - // Delta is deprecated. - return errors.New("delta encoding is deprecated") - } - return nil -} - -// String implements flag.Value. -func (e Encoding) String() string { - if known, found := encodings[e]; found { - return known.Name - } - return fmt.Sprintf("%d", e) -} - -const ( - // Delta encoding is no longer supported and will be automatically changed to DoubleDelta. - // It still exists here to not change the `ingester.chunk-encoding` flag values. - Delta Encoding = iota - // DoubleDelta encoding - DoubleDelta - // Varbit encoding - Varbit - // Bigchunk encoding - Bigchunk - // PrometheusXorChunk is a wrapper around Prometheus XOR-encoded chunk. - PrometheusXorChunk -) - -type encoding struct { - Name string - New func() Chunk -} - -var encodings = map[Encoding]encoding{ - DoubleDelta: { - Name: "DoubleDelta", - New: func() Chunk { - return newDoubleDeltaEncodedChunk(d1, d0, true, ChunkLen) - }, - }, - Varbit: { - Name: "Varbit", - New: func() Chunk { - return newVarbitChunk(varbitZeroEncoding) - }, - }, - Bigchunk: { - Name: "Bigchunk", - New: func() Chunk { - return newBigchunk() - }, - }, - PrometheusXorChunk: { - Name: "PrometheusXorChunk", - New: func() Chunk { - return newPrometheusXorChunk() - }, - }, -} - -// Set implements flag.Value. -func (e *Encoding) Set(s string) error { - // First see if the name was given - for k, v := range encodings { - if s == v.Name { - *e = k - return nil - } - } - // Otherwise, accept a number - i, err := strconv.Atoi(s) - if err != nil { - return err - } - - _, ok := encodings[Encoding(i)] - if !ok { - return fmt.Errorf("invalid chunk encoding: %s", s) - } - - *e = Encoding(i) - return nil -} - -// New creates a new chunk according to the encoding set by the -// DefaultEncoding flag. -func New() Chunk { - chunk, err := NewForEncoding(DefaultEncoding) - if err != nil { - panic(err) - } - return chunk -} - -// NewForEncoding allows configuring what chunk type you want -func NewForEncoding(encoding Encoding) (Chunk, error) { - enc, ok := encodings[encoding] - if !ok { - return nil, fmt.Errorf("unknown chunk encoding: %v", encoding) - } - - return enc.New(), nil -} - -// MustRegisterEncoding add a new chunk encoding. There is no locking, so this -// must be called in init(). -func MustRegisterEncoding(enc Encoding, name string, f func() Chunk) { - _, ok := encodings[enc] - if ok { - panic("double register encoding") - } - - encodings[enc] = encoding{ - Name: name, - New: f, - } -} diff --git a/internal/cortex/chunk/encoding/instrumentation.go b/internal/cortex/chunk/encoding/instrumentation.go deleted file mode 100644 index 97d96cecad..0000000000 --- a/internal/cortex/chunk/encoding/instrumentation.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package encoding - -import "github.com/prometheus/client_golang/prometheus" - -// Usually, a separate file for instrumentation is frowned upon. Metrics should -// be close to where they are used. However, the metrics below are set all over -// the place, so we go for a separate instrumentation file in this case. -var ( - Ops = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "chunk_ops_total", - Help: "The total number of chunk operations by their type.", - }, - []string{OpTypeLabel}, - ) - DescOps = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "chunkdesc_ops_total", - Help: "The total number of chunk descriptor operations by their type.", - }, - []string{OpTypeLabel}, - ) - NumMemDescs = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "memory_chunkdescs", - Help: "The current number of chunk descriptors in memory.", - }) -) - -const ( - namespace = "prometheus" - subsystem = "local_storage" - - // OpTypeLabel is the label name for chunk operation types. - OpTypeLabel = "type" - - // Op-types for ChunkOps. - - // CreateAndPin is the label value for create-and-pin chunk ops. - CreateAndPin = "create" // A Desc creation with refCount=1. - // PersistAndUnpin is the label value for persist chunk ops. - PersistAndUnpin = "persist" - // Pin is the label value for pin chunk ops (excludes pin on creation). - Pin = "pin" - // Unpin is the label value for unpin chunk ops (excludes the unpin on persisting). - Unpin = "unpin" - // Transcode is the label value for transcode chunk ops. - Transcode = "transcode" - // Drop is the label value for drop chunk ops. - Drop = "drop" - - // Op-types for ChunkOps and ChunkDescOps. - - // Evict is the label value for evict chunk desc ops. - Evict = "evict" - // Load is the label value for load chunk and chunk desc ops. - Load = "load" -) - -func init() { - prometheus.MustRegister(Ops) - prometheus.MustRegister(DescOps) - prometheus.MustRegister(NumMemDescs) -} - -// NumMemChunks is the total number of chunks in memory. This is a global -// counter, also used internally, so not implemented as metrics. Collected in -// MemorySeriesStorage. -// TODO(beorn7): Having this as an exported global variable is really bad. -var NumMemChunks int64 diff --git a/internal/cortex/chunk/encoding/prometheus_chunk.go b/internal/cortex/chunk/encoding/prometheus_chunk.go deleted file mode 100644 index 69ee16cd55..0000000000 --- a/internal/cortex/chunk/encoding/prometheus_chunk.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package encoding - -import ( - "io" - - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/tsdb/chunkenc" -) - -// Wrapper around Prometheus chunk. -type prometheusXorChunk struct { - chunk chunkenc.Chunk -} - -func newPrometheusXorChunk() *prometheusXorChunk { - return &prometheusXorChunk{} -} - -// Add adds another sample to the chunk. While Add works, it is only implemented -// to make tests work, and should not be used in production. In particular, it appends -// all samples to single chunk, and uses new Appender for each Add. -func (p *prometheusXorChunk) Add(m model.SamplePair) (Chunk, error) { - if p.chunk == nil { - p.chunk = chunkenc.NewXORChunk() - } - - app, err := p.chunk.Appender() - if err != nil { - return nil, err - } - - app.Append(int64(m.Timestamp), float64(m.Value)) - return nil, nil -} - -func (p *prometheusXorChunk) NewIterator(iterator Iterator) Iterator { - if p.chunk == nil { - return errorIterator("Prometheus chunk is not set") - } - - if pit, ok := iterator.(*prometheusChunkIterator); ok { - pit.c = p.chunk - pit.it = p.chunk.Iterator(pit.it) - return pit - } - - return &prometheusChunkIterator{c: p.chunk, it: p.chunk.Iterator(nil)} -} - -func (p *prometheusXorChunk) Marshal(i io.Writer) error { - if p.chunk == nil { - return errors.New("chunk data not set") - } - _, err := i.Write(p.chunk.Bytes()) - return err -} - -func (p *prometheusXorChunk) UnmarshalFromBuf(bytes []byte) error { - c, err := chunkenc.FromData(chunkenc.EncXOR, bytes) - if err != nil { - return errors.Wrap(err, "failed to create Prometheus chunk from bytes") - } - - p.chunk = c - return nil -} - -func (p *prometheusXorChunk) Encoding() Encoding { - return PrometheusXorChunk -} - -func (p *prometheusXorChunk) Utilization() float64 { - // Used for reporting when chunk is used to store new data. - return 0 -} - -func (p *prometheusXorChunk) Slice(_, _ model.Time) Chunk { - return p -} - -func (p *prometheusXorChunk) Rebound(from, to model.Time) (Chunk, error) { - return nil, errors.New("Rebound not supported by PrometheusXorChunk") -} - -func (p *prometheusXorChunk) Len() int { - if p.chunk == nil { - return 0 - } - return p.chunk.NumSamples() -} - -func (p *prometheusXorChunk) Size() int { - if p.chunk == nil { - return 0 - } - return len(p.chunk.Bytes()) -} - -type prometheusChunkIterator struct { - c chunkenc.Chunk // we need chunk, because FindAtOrAfter needs to start with fresh iterator. - it chunkenc.Iterator -} - -func (p *prometheusChunkIterator) Scan() bool { - return p.it.Next() -} - -func (p *prometheusChunkIterator) FindAtOrAfter(time model.Time) bool { - // FindAtOrAfter must return OLDEST value at given time. That means we need to start with a fresh iterator, - // otherwise we cannot guarantee OLDEST. - p.it = p.c.Iterator(p.it) - return p.it.Seek(int64(time)) -} - -func (p *prometheusChunkIterator) Value() model.SamplePair { - ts, val := p.it.At() - return model.SamplePair{ - Timestamp: model.Time(ts), - Value: model.SampleValue(val), - } -} - -func (p *prometheusChunkIterator) Batch(size int) Batch { - var batch Batch - j := 0 - for j < size { - t, v := p.it.At() - batch.Timestamps[j] = t - batch.Values[j] = v - j++ - if j < size && !p.it.Next() { - break - } - } - batch.Index = 0 - batch.Length = j - return batch -} - -func (p *prometheusChunkIterator) Err() error { - return p.it.Err() -} - -type errorIterator string - -func (e errorIterator) Scan() bool { return false } -func (e errorIterator) FindAtOrAfter(time model.Time) bool { return false } -func (e errorIterator) Value() model.SamplePair { panic("no values") } -func (e errorIterator) Batch(size int) Batch { panic("no values") } -func (e errorIterator) Err() error { return errors.New(string(e)) } diff --git a/internal/cortex/chunk/encoding/varbit.go b/internal/cortex/chunk/encoding/varbit.go deleted file mode 100644 index 03faaf9fe2..0000000000 --- a/internal/cortex/chunk/encoding/varbit.go +++ /dev/null @@ -1,1232 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// nolint //Since this was copied from Prometheus leave it as is -package encoding - -import ( - "encoding/binary" - "fmt" - "io" - "math" - - "github.com/prometheus/common/model" -) - -// The varbit chunk encoding is broadly similar to the double-delta -// chunks. However, it uses a number of different bit-widths to save the -// double-deltas (rather than 1, 2, or 4 bytes). Also, it doesn't use the delta -// of the first two samples of a chunk as the base delta, but uses a "sliding" -// delta, i.e. the delta of the two previous samples. Both differences make -// random access more expensive. Sample values can be encoded with the same -// double-delta scheme as timestamps, but different value encodings can be -// chosen adaptively, among them XOR encoding and "zero" encoding for constant -// sample values. Overall, the varbit encoding results in a much better -// compression ratio (~1.3 bytes per sample compared to ~3.3 bytes per sample -// with double-delta encoding, for typical data sets). -// -// Major parts of the varbit encoding are inspired by the following paper: -// Gorilla: A Fast, Scalable, In-Memory Time Series Database -// T. Pelkonen et al., Facebook Inc. -// http://www.vldb.org/pvldb/vol8/p1816-teller.pdf -// Note that there are significant differences, some due to the way Prometheus -// chunks work, others to optimize for the Prometheus use-case. -// -// Layout of a 1024 byte varbit chunk (big endian, wherever it matters): -// - first time (int64): 8 bytes bit 0000-0063 -// - first value (float64): 8 bytes bit 0064-0127 -// - last time (int64): 8 bytes bit 0128-0191 -// - last value (float64): 8 bytes bit 0192-0255 -// - first Δt (t1-t0, unsigned): 3 bytes bit 0256-0279 -// - flags (byte) 1 byte bit 0280-0287 -// - bit offset for next sample 2 bytes bit 0288-0303 -// - first Δv for value encoding 1, otherwise payload -// 4 bytes bit 0304-0335 -// - payload 973 bytes bit 0336-8119 -// The following only exists if the chunk is still open. Otherwise, it might be -// used by payload. -// - bit offset for current ΔΔt=0 count 2 bytes bit 8120-8135 -// - last Δt 3 bytes bit 8136-8159 -// - special bytes for value encoding 4 bytes bit 8160-8191 -// - for encoding 1: last Δv 4 bytes bit 8160-8191 -// - for encoding 2: count of -// - last leading zeros (1 byte) 1 byte bit 8160-8167 -// - last significant bits (1 byte) 1 byte bit 8168-8175 -// -// FLAGS -// -// The two least significant bits of the flags byte define the value encoding -// for the whole chunk, see below. The most significant byte of the flags byte -// is set if the chunk is closed. No samples can be added anymore to a closed -// chunk. Furthermore, the last value of a closed chunk is only saved in the -// header (last time, last value), while in a chunk that is still open, the last -// sample in the payload is the same sample as saved in the header. -// -// The remaining bits in the flags byte are currently unused. -// -// TIMESTAMP ENCODING -// -// The 1st timestamp is saved directly. -// -// The difference to the 2nd timestamp is saved as first Δt. 3 bytes is enough -// for about 4.5h. Since we close a chunk after sitting idle for 1h, this -// limitation has no practical consequences. Should, for whatever reason, a -// larger delta be required, the chunk would be closed, i.e. the new sample is -// added as the last sample to the chunk, and the next sample will be added to a -// new chunk. -// -// From the 3rd timestamp on, a double-delta (ΔΔt) is saved: -// (t_{n} - t_{n-1}) - (t_{n-1} - t_{n-2}) -// To perform that operation, the last Δt is saved at the end of the chunk for -// as long the chunk is not closed yet (see above). -// -// Most of the times, ΔΔt is zero, even with the ms-precision of -// Prometheus. Therefore, we save a ΔΔt of zero as a leading '0' bit followed by -// 7 bits counting the number of consecutive ΔΔt==0 (the count is offset by -1, -// so the range of 0 to 127 represents 1 to 128 repetitions). -// -// If ΔΔt != 0, we essentially apply the Gorilla encoding scheme (cf. section -// 4.1.1 in the paper) but with different bit buckets as Prometheus uses ms -// rather than s, and the default scrape interval is 1m rather than 4m). In -// particular: -// -// - If ΔΔt is between [-32,31], store '10' followed by a 6 bit value. This is -// for minor irregularities in the scrape interval. -// -// - If ΔΔt is between [-65536,65535], store '110' followed by a 17 bit -// value. This will typically happen if a scrape is missed completely. -// -// - If ΔΔt is between [-4194304,4194303], store '111' followed by a 23 bit -// value. This spans more than 1h, which is usually enough as we close a -// chunk anyway if it doesn't receive any sample in 1h. -// -// - Should we nevertheless encounter a larger ΔΔt, we simply close the chunk, -// add the new sample as the last of the chunk, and add subsequent samples to -// a new chunk. -// -// VALUE ENCODING -// -// Value encoding can change and is determined by the two least significant bits -// of the 'flags' byte at bit position 280. The encoding can be changed without -// transcoding upon adding the 3rd sample. After that, an encoding change -// results either in transcoding or in closing the chunk. -// -// The 1st sample value is always saved directly. The 2nd sample value is saved -// in the header as the last value. Upon saving the 3rd value, an encoding is -// chosen, and the chunk is prepared accordingly. -// -// The following value encodings exist (with their value in the flags byte): -// -// 0: "Zero encoding". -// -// In many time series, the value simply stays constant over a long time -// (e.g. the "up" time series). In that case, all sample values are determined -// by the 1st value, and no further value encoding is happening at all. The -// payload consists entirely of timestamps. -// -// 1: Integer double-delta encoding. -// -// Many Prometheus metrics are integer counters and change in a quite regular -// fashion, similar to timestamps. Thus, the same double-delta encoding can be -// applied. This encoding works like the timestamp encoding described above, but -// with different bit buckets and without counting of repeated ΔΔv=0. The case -// of ΔΔv=0 is represented by a single '0' bit for each occurrence. The first Δv -// is saved as an int32 at bit position 288. The most recent Δv is saved as an -// int32 at the end of the chunk (see above). If Δv cannot be represented as a -// 32 bit signed integer, no integer double-delta encoding can be applied. -// -// Bit buckets (lead-in bytes followed by (signed) value bits): -// - '0': 0 bit -// - '10': 6 bit -// - '110': 13 bit -// - '1110': 20 bit -// - '1111': 33 bit -// Since Δv is restricted to 32 bit, 33 bit are always enough for ΔΔv. -// -// 2: XOR encoding. -// -// This follows almost precisely the Gorilla value encoding (cf. section 4.1.2 -// of the paper). The last count of leading zeros and the last count of -// meaningful bits in the XOR value is saved at the end of the chunk for as long -// as the chunk is not closed yet (see above). Note, though, that the number of -// significant bits is saved as (count-1), i.e. a saved value of 0 means 1 -// significant bit, a saved value of 1 means 2, and so on. Also, we save the -// numbers of leading zeros and significant bits anew if they drop a -// lot. Otherwise, you can easily be locked in with a high number of significant -// bits. -// -// 3: Direct encoding. -// -// If the sample values are just random, it is most efficient to save sample -// values directly as float64. -// -// ZIPPING TIMESTAMPS AND VALUES TOGETHER -// -// Usually, encoded timestamps and encoded values simply alternate. There are -// two exceptions: -// -// (1) With the "zero encoding" for values, the payload only contains -// timestamps. -// -// (2) In a consecutive row of up to 128 ΔΔt=0 repeats, the count of timestamps -// determines how many sample values will follow directly after another. - -const ( - varbitMinLength = 128 - varbitMaxLength = 8191 - - // Useful byte offsets. - varbitFirstTimeOffset = 0 - varbitFirstValueOffset = 8 - varbitLastTimeOffset = 16 - varbitLastValueOffset = 24 - varbitFirstTimeDeltaOffset = 32 - varbitFlagOffset = 35 - varbitNextSampleBitOffsetOffset = 36 - varbitFirstValueDeltaOffset = 38 - // The following are in the "footer" and only usable if the chunk is - // still open. - varbitCountOffsetBitOffset = ChunkLen - 9 - varbitLastTimeDeltaOffset = ChunkLen - 7 - varbitLastValueDeltaOffset = ChunkLen - 4 - varbitLastLeadingZerosCountOffset = ChunkLen - 4 - varbitLastSignificantBitsCountOffset = ChunkLen - 3 - - varbitFirstSampleBitOffset uint16 = 0 // Symbolic, don't really read or write here. - varbitSecondSampleBitOffset uint16 = 1 // Symbolic, don't really read or write here. - // varbitThirdSampleBitOffset is a bit special. Depending on the encoding, there can - // be various things at this offset. It's most of the time symbolic, but in the best - // case (zero encoding for values), it will be the real offset for the 3rd sample. - varbitThirdSampleBitOffset uint16 = varbitFirstValueDeltaOffset * 8 - - // If the bit offset for the next sample is above this threshold, no new - // samples can be added to the chunk's payload (because the payload has - // already reached the footer). However, one more sample can be saved in - // the header as the last sample. - varbitNextSampleBitOffsetThreshold = 8 * varbitCountOffsetBitOffset - - varbitMaxTimeDelta = 1 << 24 // What fits into a 3-byte timestamp. -) - -type varbitValueEncoding byte - -const ( - varbitZeroEncoding varbitValueEncoding = iota - varbitIntDoubleDeltaEncoding - varbitXOREncoding - varbitDirectEncoding -) - -// varbitWorstCaseBitsPerSample provides the worst-case number of bits needed -// per sample with the various value encodings. The counts already include the -// up to 27 bits taken by a timestamp. -var varbitWorstCaseBitsPerSample = map[varbitValueEncoding]int{ - varbitZeroEncoding: 27 + 0, - varbitIntDoubleDeltaEncoding: 27 + 38, - varbitXOREncoding: 27 + 13 + 64, - varbitDirectEncoding: 27 + 64, -} - -// varbitChunk implements the chunk interface. -type varbitChunk []byte - -// newVarbitChunk returns a newly allocated varbitChunk. For simplicity, all -// varbit chunks must have the length as determined by the ChunkLen constant. -func newVarbitChunk(enc varbitValueEncoding) *varbitChunk { - if ChunkLen < varbitMinLength || ChunkLen > varbitMaxLength { - panic(fmt.Errorf( - "invalid chunk length of %d bytes, need at least %d bytes and at most %d bytes", - ChunkLen, varbitMinLength, varbitMaxLength, - )) - } - if enc > varbitDirectEncoding { - panic(fmt.Errorf("unknown varbit value encoding: %v", enc)) - } - c := make(varbitChunk, ChunkLen) - c.setValueEncoding(enc) - return &c -} - -// Add implements chunk. -func (c *varbitChunk) Add(s model.SamplePair) (Chunk, error) { - offset := c.nextSampleOffset() - switch { - case c.closed(): - return addToOverflowChunk(s) - case offset > varbitNextSampleBitOffsetThreshold: - c.addLastSample(s) - return nil, nil - case offset == varbitFirstSampleBitOffset: - c.addFirstSample(s) - return nil, nil - case offset == varbitSecondSampleBitOffset: - err := c.addSecondSample(s) - return nil, err - } - return c.addLaterSample(s, offset) -} - -// NewIterator implements chunk. -func (c varbitChunk) NewIterator(_ Iterator) Iterator { - return newVarbitChunkIterator(c) -} - -func (c *varbitChunk) Slice(_, _ model.Time) Chunk { - return c -} - -func (c *varbitChunk) Rebound(start, end model.Time) (Chunk, error) { - return reboundChunk(c, start, end) -} - -// Marshal implements chunk. -func (c varbitChunk) Marshal(w io.Writer) error { - size := c.Size() - n, err := w.Write(c[:size]) - if err != nil { - return err - } - if n != size { - return fmt.Errorf("wanted to write %d bytes, wrote %d", size, n) - } - return nil -} - -// UnmarshalFromBuf implements chunk. -func (c varbitChunk) UnmarshalFromBuf(buf []byte) error { - if copied := copy(c, buf); copied != cap(c) && copied != c.marshalLen() { - return fmt.Errorf("incorrect byte count copied from buffer during unmarshalling, want %d or %d, got %d", c.marshalLen(), ChunkLen, copied) - } - return nil -} - -// Encoding implements chunk. -func (c varbitChunk) Encoding() Encoding { return Varbit } - -// Utilization implements chunk. -func (c varbitChunk) Utilization() float64 { - // 15 bytes is the length of the chunk footer. - return math.Min(float64(c.nextSampleOffset()/8+15)/float64(cap(c)), 1) -} - -// marshalLen returns the number of bytes that should be marshalled for this chunk -// (if someone has used a version of this code that doesn't just send 1024 every time) -func (c varbitChunk) marshalLen() int { - bits := c.nextSampleOffset() - if bits < varbitThirdSampleBitOffset { - bits = varbitThirdSampleBitOffset - } - bytes := int(bits)/8 + 1 - if bytes > len(c) { - bytes = len(c) - } - return bytes -} - -// Len implements chunk. Runs in O(n). -func (c varbitChunk) Len() int { - it := c.NewIterator(nil) - i := 0 - for ; it.Scan(); i++ { - } - return i -} - -func (c varbitChunk) Size() int { - return cap(c) -} - -func (c varbitChunk) firstTime() model.Time { - return model.Time( - binary.BigEndian.Uint64( - c[varbitFirstTimeOffset:], - ), - ) -} - -func (c varbitChunk) firstValue() model.SampleValue { - return model.SampleValue( - math.Float64frombits( - binary.BigEndian.Uint64( - c[varbitFirstValueOffset:], - ), - ), - ) -} - -func (c varbitChunk) lastTime() model.Time { - return model.Time( - binary.BigEndian.Uint64( - c[varbitLastTimeOffset:], - ), - ) -} - -func (c varbitChunk) lastValue() model.SampleValue { - return model.SampleValue( - math.Float64frombits( - binary.BigEndian.Uint64( - c[varbitLastValueOffset:], - ), - ), - ) -} - -func (c varbitChunk) firstTimeDelta() model.Time { - // Only the first 3 bytes are actually the timestamp, so get rid of the - // last one by bitshifting. - return model.Time(c[varbitFirstTimeDeltaOffset+2]) | - model.Time(c[varbitFirstTimeDeltaOffset+1])<<8 | - model.Time(c[varbitFirstTimeDeltaOffset])<<16 -} - -// firstValueDelta returns an undefined result if the encoding type is not 1. -func (c varbitChunk) firstValueDelta() int32 { - return int32(binary.BigEndian.Uint32(c[varbitFirstValueDeltaOffset:])) -} - -// lastTimeDelta returns an undefined result if the chunk is closed already. -func (c varbitChunk) lastTimeDelta() model.Time { - return model.Time(c[varbitLastTimeDeltaOffset+2]) | - model.Time(c[varbitLastTimeDeltaOffset+1])<<8 | - model.Time(c[varbitLastTimeDeltaOffset])<<16 -} - -// setLastTimeDelta must not be called if the chunk is closed already. It most -// not be called with a time that doesn't fit into 24bit, either. -func (c varbitChunk) setLastTimeDelta(dT model.Time) { - if dT > varbitMaxTimeDelta { - panic("Δt overflows 24 bit") - } - c[varbitLastTimeDeltaOffset] = byte(dT >> 16) - c[varbitLastTimeDeltaOffset+1] = byte(dT >> 8) - c[varbitLastTimeDeltaOffset+2] = byte(dT) -} - -// lastValueDelta returns an undefined result if the chunk is closed already. -func (c varbitChunk) lastValueDelta() int32 { - return int32(binary.BigEndian.Uint32(c[varbitLastValueDeltaOffset:])) -} - -// setLastValueDelta must not be called if the chunk is closed already. -func (c varbitChunk) setLastValueDelta(dV int32) { - binary.BigEndian.PutUint32(c[varbitLastValueDeltaOffset:], uint32(dV)) -} - -func (c varbitChunk) nextSampleOffset() uint16 { - return binary.BigEndian.Uint16(c[varbitNextSampleBitOffsetOffset:]) -} - -func (c varbitChunk) setNextSampleOffset(offset uint16) { - binary.BigEndian.PutUint16(c[varbitNextSampleBitOffsetOffset:], offset) -} - -func (c varbitChunk) valueEncoding() varbitValueEncoding { - return varbitValueEncoding(c[varbitFlagOffset] & 0x03) -} - -func (c varbitChunk) setValueEncoding(enc varbitValueEncoding) { - if enc > varbitDirectEncoding { - panic("invalid varbit value encoding") - } - c[varbitFlagOffset] &^= 0x03 // Clear. - c[varbitFlagOffset] |= byte(enc) // Set. -} - -func (c varbitChunk) closed() bool { - return c[varbitFlagOffset] > 0x7F // Most significant bit set. -} - -func (c varbitChunk) zeroDDTRepeats() (repeats uint64, offset uint16) { - offset = binary.BigEndian.Uint16(c[varbitCountOffsetBitOffset:]) - if offset == 0 { - return 0, 0 - } - return c.readBitPattern(offset, 7) + 1, offset -} - -func (c varbitChunk) setZeroDDTRepeats(repeats uint64, offset uint16) { - switch repeats { - case 0: - // Just clear the offset. - binary.BigEndian.PutUint16(c[varbitCountOffsetBitOffset:], 0) - return - case 1: - // First time we set a repeat here, so set the offset. But only - // if we haven't reached the footer yet. (If that's the case, we - // would overwrite ourselves below, and we don't need the offset - // later anyway because no more samples will be added to this - // chunk.) - if offset+7 <= varbitNextSampleBitOffsetThreshold { - binary.BigEndian.PutUint16(c[varbitCountOffsetBitOffset:], offset) - } - default: - // For a change, we are writing somewhere where we have written - // before. We need to clear the bits first. - posIn1stByte := offset % 8 - c[offset/8] &^= bitMask[7][posIn1stByte] - if posIn1stByte > 1 { - c[offset/8+1] &^= bitMask[posIn1stByte-1][0] - } - } - c.addBitPattern(offset, repeats-1, 7) -} - -func (c varbitChunk) setLastSample(s model.SamplePair) { - binary.BigEndian.PutUint64( - c[varbitLastTimeOffset:], - uint64(s.Timestamp), - ) - binary.BigEndian.PutUint64( - c[varbitLastValueOffset:], - math.Float64bits(float64(s.Value)), - ) -} - -// addFirstSample is a helper method only used by c.add(). It adds timestamp and -// value as base time and value. -func (c *varbitChunk) addFirstSample(s model.SamplePair) { - binary.BigEndian.PutUint64( - (*c)[varbitFirstTimeOffset:], - uint64(s.Timestamp), - ) - binary.BigEndian.PutUint64( - (*c)[varbitFirstValueOffset:], - math.Float64bits(float64(s.Value)), - ) - c.setLastSample(s) // To simplify handling of single-sample chunks. - c.setNextSampleOffset(varbitSecondSampleBitOffset) -} - -// addSecondSample is a helper method only used by c.add(). It calculates the -// first time delta from the provided sample and adds it to the chunk together -// with the provided sample as the last sample. -func (c *varbitChunk) addSecondSample(s model.SamplePair) error { - firstTimeDelta := s.Timestamp - c.firstTime() - if firstTimeDelta < 0 { - return fmt.Errorf("first Δt is less than zero: %v", firstTimeDelta) - } - if firstTimeDelta > varbitMaxTimeDelta { - // A time delta too great. Still, we can add it as a last sample - // before overflowing. - c.addLastSample(s) - return nil - } - (*c)[varbitFirstTimeDeltaOffset] = byte(firstTimeDelta >> 16) - (*c)[varbitFirstTimeDeltaOffset+1] = byte(firstTimeDelta >> 8) - (*c)[varbitFirstTimeDeltaOffset+2] = byte(firstTimeDelta) - - // Also set firstTimeDelta as the last time delta to be able to use the - // normal methods for adding later samples. - c.setLastTimeDelta(firstTimeDelta) - - c.setLastSample(s) - c.setNextSampleOffset(varbitThirdSampleBitOffset) - return nil -} - -// addLastSample is a helper method only used by c.add() and in other helper -// methods called by c.add(). It simply sets the given sample as the last sample -// in the heador and declares the chunk closed. In other words, addLastSample -// adds the very last sample added to this chunk ever, while setLastSample sets -// the sample most recently added to the chunk so that it can be used for the -// calculations required to add the next sample. -func (c *varbitChunk) addLastSample(s model.SamplePair) { - c.setLastSample(s) - (*c)[varbitFlagOffset] |= 0x80 - return -} - -// addLaterSample is a helper method only used by c.add(). It adds a third or -// later sample. -func (c *varbitChunk) addLaterSample(s model.SamplePair, offset uint16) (Chunk, error) { - var ( - lastTime = c.lastTime() - lastTimeDelta = c.lastTimeDelta() - newTimeDelta = s.Timestamp - lastTime - lastValue = c.lastValue() - encoding = c.valueEncoding() - ) - - if newTimeDelta < 0 { - return nil, fmt.Errorf("Δt is less than zero: %v", newTimeDelta) - } - if offset == varbitThirdSampleBitOffset { - offset, encoding = c.prepForThirdSample(lastValue, s.Value, encoding) - } - if newTimeDelta > varbitMaxTimeDelta { - // A time delta too great. Still, we can add it as a last sample - // before overflowing. - c.addLastSample(s) - return nil, nil - } - - // Analyze worst case, does it fit? If not, set new sample as the last. - if int(offset)+varbitWorstCaseBitsPerSample[encoding] > ChunkLen*8 { - c.addLastSample(s) - return nil, nil - } - - // Transcoding/overflow decisions first. - if encoding == varbitZeroEncoding && s.Value != lastValue { - // Cannot go on with zero encoding. - if offset <= ChunkLen*4 { - var result []Chunk - var err error - if isInt32(s.Value - lastValue) { - // Trying int encoding looks promising. - result, err = transcodeAndAdd(newVarbitChunk(varbitIntDoubleDeltaEncoding), c, s) - } else { - result, err = transcodeAndAdd(newVarbitChunk(varbitXOREncoding), c, s) - } - if err != nil { - return nil, err - } - - // We cannot handle >2 chunks returned as we can only return 1 chunk. - // Ideally there wont be >2 chunks, but if it happens to be >2, - // we fall through to perfom `addToOverflowChunk` instead. - if len(result) == 1 { - // Replace the current chunk with the new bigger chunk. - c0 := result[0].(*varbitChunk) - *c = *c0 - return nil, nil - } else if len(result) == 2 { - // Replace the current chunk with the new bigger chunk - // and return the additional chunk. - c0 := result[0].(*varbitChunk) - c1 := result[1].(*varbitChunk) - *c = *c0 - return c1, nil - } - } - - // Chunk is already half full. Better create a new one and save the transcoding efforts. - // We also perform this if `transcodeAndAdd` resulted in >2 chunks. - return addToOverflowChunk(s) - } - if encoding == varbitIntDoubleDeltaEncoding && !isInt32(s.Value-lastValue) { - // Cannot go on with int encoding. - if offset <= ChunkLen*4 { - result, err := transcodeAndAdd(newVarbitChunk(varbitXOREncoding), c, s) - if err != nil { - return nil, err - } - // We cannot handle >2 chunks returned as we can only return 1 chunk. - // Ideally there wont be >2 chunks, but if it happens to be >2, - // we fall through to perfom `addToOverflowChunk` instead. - if len(result) == 1 { - // Replace the current chunk with the new bigger chunk. - c0 := result[0].(*varbitChunk) - *c = *c0 - return nil, nil - } else if len(result) == 2 { - // Replace the current chunk with the new bigger chunk - // and return the additional chunk. - c0 := result[0].(*varbitChunk) - c1 := result[1].(*varbitChunk) - *c = *c0 - return c1, nil - } - } - - // Chunk is already half full. Better create a new one and save the transcoding efforts. - // We also perform this if `transcodeAndAdd` resulted in >2 chunks. - return addToOverflowChunk(s) - } - - offset, overflow := c.addDDTime(offset, lastTimeDelta, newTimeDelta) - if overflow { - c.addLastSample(s) - return nil, nil - } - switch encoding { - case varbitZeroEncoding: - // Nothing to do. - case varbitIntDoubleDeltaEncoding: - offset = c.addDDValue(offset, lastValue, s.Value) - case varbitXOREncoding: - offset = c.addXORValue(offset, lastValue, s.Value) - case varbitDirectEncoding: - offset = c.addBitPattern(offset, math.Float64bits(float64(s.Value)), 64) - default: - return nil, fmt.Errorf("unknown Varbit value encoding: %v", encoding) - } - - c.setNextSampleOffset(offset) - c.setLastSample(s) - return nil, nil -} - -func (c varbitChunk) prepForThirdSample( - lastValue, newValue model.SampleValue, encoding varbitValueEncoding, -) (uint16, varbitValueEncoding) { - var ( - offset = varbitThirdSampleBitOffset - firstValue = c.firstValue() - firstValueDelta = lastValue - firstValue - firstXOR = math.Float64bits(float64(firstValue)) ^ math.Float64bits(float64(lastValue)) - _, firstSignificantBits = countBits(firstXOR) - secondXOR = math.Float64bits(float64(lastValue)) ^ math.Float64bits(float64(newValue)) - _, secondSignificantBits = countBits(secondXOR) - ) - // Now pick an initial encoding and prepare things accordingly. - // However, never pick an encoding "below" the one initially set. - switch { - case encoding == varbitZeroEncoding && lastValue == firstValue && lastValue == newValue: - // Stay at zero encoding. - // No value to be set. - // No offset change required. - case encoding <= varbitIntDoubleDeltaEncoding && isInt32(firstValueDelta): - encoding = varbitIntDoubleDeltaEncoding - binary.BigEndian.PutUint32( - c[varbitFirstValueDeltaOffset:], - uint32(int32(firstValueDelta)), - ) - c.setLastValueDelta(int32(firstValueDelta)) - offset += 32 - case encoding == varbitDirectEncoding || firstSignificantBits+secondSignificantBits > 100: - // Heuristics based on three samples only is a bit weak, - // but if we need 50+13 = 63 bits per sample already - // now, we might be better off going for direct encoding. - encoding = varbitDirectEncoding - // Put bit pattern directly where otherwise the delta would have gone. - binary.BigEndian.PutUint64( - c[varbitFirstValueDeltaOffset:], - math.Float64bits(float64(lastValue)), - ) - offset += 64 - default: - encoding = varbitXOREncoding - offset = c.addXORValue(offset, firstValue, lastValue) - } - c.setValueEncoding(encoding) - c.setNextSampleOffset(offset) - return offset, encoding -} - -// addDDTime requires that lastTimeDelta and newTimeDelta are positive and don't overflow 24bit. -func (c varbitChunk) addDDTime(offset uint16, lastTimeDelta, newTimeDelta model.Time) (newOffset uint16, overflow bool) { - timeDD := newTimeDelta - lastTimeDelta - - if !isSignedIntN(int64(timeDD), 23) { - return offset, true - } - - c.setLastTimeDelta(newTimeDelta) - repeats, repeatsOffset := c.zeroDDTRepeats() - - if timeDD == 0 { - if repeats == 0 || repeats == 128 { - // First zeroDDT, or counter full, prepare new counter. - offset = c.addZeroBit(offset) - repeatsOffset = offset - offset += 7 - repeats = 0 - } - c.setZeroDDTRepeats(repeats+1, repeatsOffset) - return offset, false - } - - // No zero repeat. If we had any before, clear the DDT offset. - c.setZeroDDTRepeats(0, repeatsOffset) - - switch { - case isSignedIntN(int64(timeDD), 6): - offset = c.addOneBitsWithTrailingZero(offset, 1) - offset = c.addSignedInt(offset, int64(timeDD), 6) - case isSignedIntN(int64(timeDD), 17): - offset = c.addOneBitsWithTrailingZero(offset, 2) - offset = c.addSignedInt(offset, int64(timeDD), 17) - case isSignedIntN(int64(timeDD), 23): - offset = c.addOneBits(offset, 3) - offset = c.addSignedInt(offset, int64(timeDD), 23) - default: - panic("unexpected required bits for ΔΔt") - } - return offset, false -} - -// addDDValue requires that newValue-lastValue can be represented with an int32. -func (c varbitChunk) addDDValue(offset uint16, lastValue, newValue model.SampleValue) uint16 { - newValueDelta := int64(newValue - lastValue) - lastValueDelta := c.lastValueDelta() - valueDD := newValueDelta - int64(lastValueDelta) - c.setLastValueDelta(int32(newValueDelta)) - - switch { - case valueDD == 0: - return c.addZeroBit(offset) - case isSignedIntN(valueDD, 6): - offset = c.addOneBitsWithTrailingZero(offset, 1) - return c.addSignedInt(offset, valueDD, 6) - case isSignedIntN(valueDD, 13): - offset = c.addOneBitsWithTrailingZero(offset, 2) - return c.addSignedInt(offset, valueDD, 13) - case isSignedIntN(valueDD, 20): - offset = c.addOneBitsWithTrailingZero(offset, 3) - return c.addSignedInt(offset, valueDD, 20) - case isSignedIntN(valueDD, 33): - offset = c.addOneBits(offset, 4) - return c.addSignedInt(offset, valueDD, 33) - default: - panic("unexpected required bits for ΔΔv") - } -} - -func (c varbitChunk) addXORValue(offset uint16, lastValue, newValue model.SampleValue) uint16 { - lastPattern := math.Float64bits(float64(lastValue)) - newPattern := math.Float64bits(float64(newValue)) - xor := lastPattern ^ newPattern - if xor == 0 { - return c.addZeroBit(offset) - } - - lastLeadingBits := c[varbitLastLeadingZerosCountOffset] - lastSignificantBits := c[varbitLastSignificantBitsCountOffset] - newLeadingBits, newSignificantBits := countBits(xor) - - // Short entry if the new significant bits fit into the same box as the - // last significant bits. However, should the new significant bits be - // shorter by 10 or more, go for a long entry instead, as we will - // probably save more (11 bit one-time overhead, potentially more to - // save later). - if newLeadingBits >= lastLeadingBits && - newLeadingBits+newSignificantBits <= lastLeadingBits+lastSignificantBits && - lastSignificantBits-newSignificantBits < 10 { - offset = c.addOneBitsWithTrailingZero(offset, 1) - return c.addBitPattern( - offset, - xor>>(64-lastLeadingBits-lastSignificantBits), - uint16(lastSignificantBits), - ) - } - - // Long entry. - c[varbitLastLeadingZerosCountOffset] = newLeadingBits - c[varbitLastSignificantBitsCountOffset] = newSignificantBits - offset = c.addOneBits(offset, 2) - offset = c.addBitPattern(offset, uint64(newLeadingBits), 5) - offset = c.addBitPattern(offset, uint64(newSignificantBits-1), 6) // Note -1! - return c.addBitPattern( - offset, - xor>>(64-newLeadingBits-newSignificantBits), - uint16(newSignificantBits), - ) -} - -func (c varbitChunk) addZeroBit(offset uint16) uint16 { - if offset < varbitNextSampleBitOffsetThreshold { - // Writing a zero to a never touched area is a no-op. - // Just increase the offset. - return offset + 1 - } - newByte := c[offset/8] &^ bitMask[1][offset%8] - c[offset/8] = newByte - // TODO(beorn7): The two lines above could be written as - // c[offset/8] &^= bitMask[1][offset%8] - // However, that tickles a compiler bug with GOARCH=386. - // See https://github.com/prometheus/prometheus/issues/1509 - return offset + 1 -} - -func (c varbitChunk) addOneBits(offset uint16, n uint16) uint16 { - if n > 7 { - panic("unexpected number of control bits") - } - b := 8 - offset%8 - if b > n { - b = n - } - c[offset/8] |= bitMask[b][offset%8] - offset += b - b = n - b - if b > 0 { - c[offset/8] |= bitMask[b][0] - offset += b - } - return offset -} -func (c varbitChunk) addOneBitsWithTrailingZero(offset uint16, n uint16) uint16 { - offset = c.addOneBits(offset, n) - return c.addZeroBit(offset) -} - -// addSignedInt adds i as a signed integer with n bits. It requires i to be -// representable as such. (Check with isSignedIntN first.) -func (c varbitChunk) addSignedInt(offset uint16, i int64, n uint16) uint16 { - if i < 0 && n < 64 { - i += 1 << n - } - return c.addBitPattern(offset, uint64(i), n) -} - -// addBitPattern adds the last n bits of the given pattern. Other bits in the -// pattern must be 0. -func (c varbitChunk) addBitPattern(offset uint16, pattern uint64, n uint16) uint16 { - var ( - byteOffset = offset / 8 - bitsToWrite = 8 - offset%8 - newOffset = offset + n - ) - - // Clean up the parts of the footer we will write into. (But not more as - // we are still using the value related part of the footer when we have - // already overwritten timestamp related parts.) - if newOffset > varbitNextSampleBitOffsetThreshold { - pos := offset - if pos < varbitNextSampleBitOffsetThreshold { - pos = varbitNextSampleBitOffsetThreshold - } - for pos < newOffset { - posInByte := pos % 8 - bitsToClear := newOffset - pos - if bitsToClear > 8-posInByte { - bitsToClear = 8 - posInByte - } - c[pos/8] &^= bitMask[bitsToClear][posInByte] - pos += bitsToClear - } - } - - for n > 0 { - if n <= bitsToWrite { - c[byteOffset] |= byte(pattern << (bitsToWrite - n)) - break - } - c[byteOffset] |= byte(pattern >> (n - bitsToWrite)) - n -= bitsToWrite - bitsToWrite = 8 - byteOffset++ - } - return newOffset -} - -// readBitPattern reads n bits at the given offset and returns them as the last -// n bits in a uint64. -func (c varbitChunk) readBitPattern(offset, n uint16) uint64 { - var ( - result uint64 - byteOffset = offset / 8 - bitOffset = offset % 8 - trailingBits, bitsToRead uint16 - ) - - for n > 0 { - trailingBits = 0 - bitsToRead = 8 - bitOffset - if bitsToRead > n { - trailingBits = bitsToRead - n - bitsToRead = n - } - result <<= bitsToRead - result |= uint64( - (c[byteOffset] & bitMask[bitsToRead][bitOffset]) >> trailingBits, - ) - n -= bitsToRead - byteOffset++ - bitOffset = 0 - } - return result -} - -type varbitChunkIterator struct { - c varbitChunk - // pos is the bit position within the chunk for the next sample to be - // decoded when scan() is called (i.e. it is _not_ the bit position of - // the sample currently returned by value()). The symbolic values - // varbitFirstSampleBitOffset and varbitSecondSampleBitOffset are also - // used for pos. len is the offset of the first bit in the chunk that is - // not part of the payload. If pos==len, then the iterator is positioned - // behind the last sample in the payload. However, the next call of - // scan() still has to check if the chunk is closed, in which case there - // is one more sample, saved in the header. To mark the iterator as - // having scanned that last sample, too, pos is set to len+1. - pos, len uint16 - t, dT model.Time - repeats byte // Repeats of ΔΔt=0. - v model.SampleValue - dV int64 // Only used for int value encoding. - leading, significant uint16 - enc varbitValueEncoding - lastError error - rewound bool - nextT model.Time // Only for rewound state. - nextV model.SampleValue // Only for rewound state. -} - -func newVarbitChunkIterator(c varbitChunk) *varbitChunkIterator { - return &varbitChunkIterator{ - c: c, - len: c.nextSampleOffset(), - t: model.Earliest, - enc: c.valueEncoding(), - significant: 1, - } -} - -// scan implements Iterator. -func (it *varbitChunkIterator) Scan() bool { - if it.lastError != nil { - return false - } - if it.rewound { - it.t = it.nextT - it.v = it.nextV - it.rewound = false - return true - } - if it.pos > it.len { - return false - } - if it.pos == it.len && it.repeats == 0 { - it.pos = it.len + 1 - if !it.c.closed() { - return false - } - it.t = it.c.lastTime() - it.v = it.c.lastValue() - return it.lastError == nil - } - if it.pos == varbitFirstSampleBitOffset { - it.t = it.c.firstTime() - it.v = it.c.firstValue() - it.pos = varbitSecondSampleBitOffset - return it.lastError == nil - } - if it.pos == varbitSecondSampleBitOffset { - if it.len == varbitThirdSampleBitOffset && !it.c.closed() { - // Special case: Chunk has only two samples. - it.t = it.c.lastTime() - it.v = it.c.lastValue() - it.pos = it.len + 1 - return it.lastError == nil - } - it.dT = it.c.firstTimeDelta() - it.t += it.dT - // Value depends on encoding. - switch it.enc { - case varbitZeroEncoding: - it.pos = varbitThirdSampleBitOffset - case varbitIntDoubleDeltaEncoding: - it.dV = int64(it.c.firstValueDelta()) - it.v += model.SampleValue(it.dV) - it.pos = varbitThirdSampleBitOffset + 32 - case varbitXOREncoding: - it.pos = varbitThirdSampleBitOffset - it.readXOR() - case varbitDirectEncoding: - it.v = model.SampleValue(math.Float64frombits( - binary.BigEndian.Uint64(it.c[varbitThirdSampleBitOffset/8:]), - )) - it.pos = varbitThirdSampleBitOffset + 64 - default: - it.lastError = fmt.Errorf("unknown varbit value encoding: %v", it.enc) - } - return it.lastError == nil - } - // 3rd sample or later does not have special cases anymore. - it.readDDT() - switch it.enc { - case varbitZeroEncoding: - // Do nothing. - case varbitIntDoubleDeltaEncoding: - it.readDDV() - case varbitXOREncoding: - it.readXOR() - case varbitDirectEncoding: - it.v = model.SampleValue(math.Float64frombits(it.readBitPattern(64))) - return it.lastError == nil - default: - it.lastError = fmt.Errorf("unknown varbit value encoding: %v", it.enc) - return false - } - return it.lastError == nil -} - -// findAtOrAfter implements Iterator. -func (it *varbitChunkIterator) FindAtOrAfter(t model.Time) bool { - if it.len == 0 || t.After(it.c.lastTime()) { - return false - } - first := it.c.firstTime() - if !t.After(first) { - it.reset() - return it.Scan() - } - if t == it.t { - return it.lastError == nil - } - if t.Before(it.t) { - it.reset() - } - for it.Scan() && t.After(it.t) { - // TODO(beorn7): If we are in a repeat, we could iterate forward - // much faster. - } - return it.lastError == nil -} - -// value implements Iterator. -func (it *varbitChunkIterator) Value() model.SamplePair { - return model.SamplePair{ - Timestamp: it.t, - Value: it.v, - } -} - -func (it *varbitChunkIterator) Batch(size int) Batch { - var batch Batch - j := 0 - for j < size { - batch.Timestamps[j] = int64(it.t) - batch.Values[j] = float64(it.v) - j++ - if j < size && !it.Scan() { - break - } - } - batch.Index = 0 - batch.Length = j - return batch -} - -// err implements Iterator. -func (it *varbitChunkIterator) Err() error { - return it.lastError -} - -func (it *varbitChunkIterator) readDDT() { - if it.repeats > 0 { - it.repeats-- - } else { - switch it.readControlBits(3) { - case 0: - it.repeats = byte(it.readBitPattern(7)) - case 1: - it.dT += model.Time(it.readSignedInt(6)) - case 2: - it.dT += model.Time(it.readSignedInt(17)) - case 3: - it.dT += model.Time(it.readSignedInt(23)) - default: - panic("unexpected number of control bits") - } - } - it.t += it.dT -} - -func (it *varbitChunkIterator) readDDV() { - switch it.readControlBits(4) { - case 0: - // Do nothing. - case 1: - it.dV += it.readSignedInt(6) - case 2: - it.dV += it.readSignedInt(13) - case 3: - it.dV += it.readSignedInt(20) - case 4: - it.dV += it.readSignedInt(33) - default: - panic("unexpected number of control bits") - } - it.v += model.SampleValue(it.dV) -} - -func (it *varbitChunkIterator) readXOR() { - switch it.readControlBits(2) { - case 0: - return - case 1: - // Do nothing right now. All done below. - case 2: - it.leading = uint16(it.readBitPattern(5)) - it.significant = uint16(it.readBitPattern(6)) + 1 - default: - panic("unexpected number of control bits") - } - pattern := math.Float64bits(float64(it.v)) - pattern ^= it.readBitPattern(it.significant) << (64 - it.significant - it.leading) - it.v = model.SampleValue(math.Float64frombits(pattern)) -} - -// readControlBits reads successive 1-bits and stops after reading the first -// 0-bit. It also stops once it has read max bits. It returns the number of read -// 1-bits. -func (it *varbitChunkIterator) readControlBits(max uint16) uint16 { - var count uint16 - for count < max && int(it.pos/8) < len(it.c) { - b := it.c[it.pos/8] & bitMask[1][it.pos%8] - it.pos++ - if b == 0 { - return count - } - count++ - } - if int(it.pos/8) >= len(it.c) { - it.lastError = errChunkBoundsExceeded - } - return count -} - -func (it *varbitChunkIterator) readBitPattern(n uint16) uint64 { - if len(it.c)*8 < int(it.pos)+int(n) { - it.lastError = errChunkBoundsExceeded - return 0 - } - u := it.c.readBitPattern(it.pos, n) - it.pos += n - return u -} - -func (it *varbitChunkIterator) readSignedInt(n uint16) int64 { - u := it.readBitPattern(n) - if n < 64 && u >= 1<<(n-1) { - u -= 1 << n - } - return int64(u) -} - -// reset puts the chunk iterator into the state it had upon creation. -func (it *varbitChunkIterator) reset() { - it.pos = 0 - it.t = model.Earliest - it.dT = 0 - it.repeats = 0 - it.v = 0 - it.dV = 0 - it.leading = 0 - it.significant = 1 - it.rewound = false -} - -// rewind "rewinds" the chunk iterator by one step. Since one cannot simply -// rewind a Varbit chunk, the old values have to be provided by the -// caller. Rewinding an already rewound chunk panics. After a call of scan or -// reset, a chunk can be rewound again. -func (it *varbitChunkIterator) rewind(t model.Time, v model.SampleValue) { - if it.rewound { - panic("cannot rewind varbit chunk twice") - } - it.rewound = true - it.nextT = it.t - it.nextV = it.v - it.t = t - it.v = v -} diff --git a/internal/cortex/chunk/encoding/varbit_helpers.go b/internal/cortex/chunk/encoding/varbit_helpers.go deleted file mode 100644 index 73013d84d7..0000000000 --- a/internal/cortex/chunk/encoding/varbit_helpers.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// nolint //Since this was copied from Prometheus leave it as is -package encoding - -import "github.com/prometheus/common/model" - -var ( - // bit masks for consecutive bits in a byte at various offsets. - bitMask = [][]byte{ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // 0 bit - {0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01}, // 1 bit - {0xC0, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x01}, // 2 bit - {0xE0, 0x70, 0x38, 0x1C, 0x0E, 0x07, 0x03, 0x01}, // 3 bit - {0xF0, 0x78, 0x3C, 0x1E, 0x0F, 0x07, 0x03, 0x01}, // 4 bit - {0xF8, 0x7C, 0x3E, 0x1F, 0x0F, 0x07, 0x03, 0x01}, // 5 bit - {0xFC, 0x7E, 0x3F, 0x1F, 0x0F, 0x07, 0x03, 0x01}, // 6 bit - {0xFE, 0x7F, 0x3F, 0x1F, 0x0F, 0x07, 0x03, 0x01}, // 7 bit - {0xFF, 0x7F, 0x3F, 0x1F, 0x0F, 0x07, 0x03, 0x01}, // 8 bit - } -) - -// isInt32 returns true if v can be represented as an int32. -func isInt32(v model.SampleValue) bool { - return model.SampleValue(int32(v)) == v -} - -// countBits returns the number of leading zero bits and the number of -// significant bits after that in the given bit pattern. The maximum number of -// leading zeros is 31 (so that it can be represented by a 5bit number). Leading -// zeros beyond that are considered part of the significant bits. -func countBits(pattern uint64) (leading, significant byte) { - // TODO(beorn7): This would probably be faster with ugly endless switch - // statements. - if pattern == 0 { - return - } - for pattern < 1<<63 { - leading++ - pattern <<= 1 - } - for pattern > 0 { - significant++ - pattern <<= 1 - } - if leading > 31 { // 5 bit limit. - significant += leading - 31 - leading = 31 - } - return -} - -// isSignedIntN returns if n can be represented as a signed int with the given -// bit length. -func isSignedIntN(i int64, n byte) bool { - upper := int64(1) << (n - 1) - if i >= upper { - return false - } - lower := upper - (1 << n) - if i < lower { - return false - } - return true -} diff --git a/internal/cortex/chunk/encoding/varbit_test.go b/internal/cortex/chunk/encoding/varbit_test.go deleted file mode 100644 index b15cf63910..0000000000 --- a/internal/cortex/chunk/encoding/varbit_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package encoding - -import "testing" - -func TestCountBits(t *testing.T) { - for i := byte(0); i < 56; i++ { - for j := byte(0); j <= 8; j++ { - for k := byte(0); k < 8; k++ { - p := uint64(bitMask[j][k]) << i - gotLeading, gotSignificant := countBits(p) - wantLeading := 56 - i + k - wantSignificant := j - if j+k > 8 { - wantSignificant -= j + k - 8 - } - if wantLeading > 31 { - wantSignificant += wantLeading - 31 - wantLeading = 31 - } - if p == 0 { - wantLeading = 0 - wantSignificant = 0 - } - if wantLeading != gotLeading { - t.Errorf( - "unexpected leading bit count for i=%d, j=%d, k=%d; want %d, got %d", - i, j, k, wantLeading, gotLeading, - ) - } - if wantSignificant != gotSignificant { - t.Errorf( - "unexpected significant bit count for i=%d, j=%d, k=%d; want %d, got %d", - i, j, k, wantSignificant, gotSignificant, - ) - } - } - } - } -} diff --git a/internal/cortex/chunk/fixtures.go b/internal/cortex/chunk/fixtures.go deleted file mode 100644 index bfbe270075..0000000000 --- a/internal/cortex/chunk/fixtures.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -// Chunk functions used only in tests - -import ( - "context" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/thanos-io/thanos/internal/cortex/util" -) - -// BenchmarkLabels is a real example from Kubernetes' embedded cAdvisor metrics, lightly obfuscated -var BenchmarkLabels = labels.Labels{ - {Name: model.MetricNameLabel, Value: "container_cpu_usage_seconds_total"}, - {Name: "beta_kubernetes_io_arch", Value: "amd64"}, - {Name: "beta_kubernetes_io_instance_type", Value: "c3.somesize"}, - {Name: "beta_kubernetes_io_os", Value: "linux"}, - {Name: "container_name", Value: "some-name"}, - {Name: "cpu", Value: "cpu01"}, - {Name: "failure_domain_beta_kubernetes_io_region", Value: "somewhere-1"}, - {Name: "failure_domain_beta_kubernetes_io_zone", Value: "somewhere-1b"}, - {Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"}, - {Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"}, - {Name: "instance", Value: "ip-111-11-1-11.ec2.internal"}, - {Name: "job", Value: "kubernetes-cadvisor"}, - {Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"}, - {Name: "monitor", Value: "prod"}, - {Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"}, - {Name: "namespace", Value: "kube-system"}, - {Name: "pod_name", Value: "some-other-name-5j8s8"}, -} - -// DefaultSchemaConfig creates a simple schema config for testing -func DefaultSchemaConfig(store, schema string, from model.Time) SchemaConfig { - s := SchemaConfig{ - Configs: []PeriodConfig{{ - IndexType: store, - Schema: schema, - From: DayTime{from}, - ChunkTables: PeriodicTableConfig{ - Prefix: "cortex", - Period: 7 * 24 * time.Hour, - }, - IndexTables: PeriodicTableConfig{ - Prefix: "cortex_chunks", - Period: 7 * 24 * time.Hour, - }, - }}, - } - if err := s.Validate(); err != nil { - panic(err) - } - return s -} - -// ChunksToMatrix converts a set of chunks to a model.Matrix. -func ChunksToMatrix(ctx context.Context, chunks []Chunk, from, through model.Time) (model.Matrix, error) { - // Group chunks by series, sort and dedupe samples. - metrics := map[model.Fingerprint]model.Metric{} - samplesBySeries := map[model.Fingerprint][][]model.SamplePair{} - for _, c := range chunks { - ss, err := c.Samples(from, through) - if err != nil { - return nil, err - } - - metrics[c.Fingerprint] = util.LabelsToMetric(c.Metric) - samplesBySeries[c.Fingerprint] = append(samplesBySeries[c.Fingerprint], ss) - } - - matrix := make(model.Matrix, 0, len(samplesBySeries)) - for fp, ss := range samplesBySeries { - matrix = append(matrix, &model.SampleStream{ - Metric: metrics[fp], - Values: util.MergeNSampleSets(ss...), - }) - } - - return matrix, nil -} diff --git a/internal/cortex/chunk/index_reader.go b/internal/cortex/chunk/index_reader.go deleted file mode 100644 index 882b6a62e6..0000000000 --- a/internal/cortex/chunk/index_reader.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "context" -) - -// IndexEntryProcessor receives index entries from a table. -type IndexEntryProcessor interface { - ProcessIndexEntry(indexEntry IndexEntry) error - - // Will this user be accepted by the processor? - AcceptUser(user string) bool - - // Called at the end of reading of index entries. - Flush() error -} - -// IndexReader parses index entries and passes them to the IndexEntryProcessor. -type IndexReader interface { - IndexTableNames(ctx context.Context) ([]string, error) - - // Reads a single table from index, and passes individual index entries to the processors. - // - // All entries with the same TableName, HashValue and RangeValue are passed to the same processor, - // and all such entries (with different Values) are passed before index entries with different - // values of HashValue and RangeValue are passed to the same processor. - // - // This allows IndexEntryProcessor to find when values for given Hash and Range finish: - // as soon as new Hash and Range differ from last IndexEntry. - // - // Index entries passed to the same processor arrive sorted by HashValue and RangeValue. - ReadIndexEntries(ctx context.Context, table string, processors []IndexEntryProcessor) error -} diff --git a/internal/cortex/chunk/inmemory_storage_client.go b/internal/cortex/chunk/inmemory_storage_client.go deleted file mode 100644 index 68b1313524..0000000000 --- a/internal/cortex/chunk/inmemory_storage_client.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "sort" - "strings" - "sync" - - "github.com/go-kit/log/level" - - "github.com/thanos-io/thanos/internal/cortex/util/log" -) - -type MockStorageMode int - -var errPermissionDenied = errors.New("permission denied") - -const ( - MockStorageModeReadWrite = 0 - MockStorageModeReadOnly = 1 - MockStorageModeWriteOnly = 2 -) - -// MockStorage is a fake in-memory StorageClient. -type MockStorage struct { - mtx sync.RWMutex - tables map[string]*mockTable - objects map[string][]byte - - numIndexWrites int - numChunkWrites int - mode MockStorageMode -} - -type mockTable struct { - items map[string][]mockItem - write, read int64 -} - -type mockItem struct { - rangeValue []byte - value []byte -} - -// NewMockStorage creates a new MockStorage. -func NewMockStorage() *MockStorage { - return &MockStorage{ - tables: map[string]*mockTable{}, - objects: map[string][]byte{}, - } -} - -func (m *MockStorage) GetSortedObjectKeys() []string { - m.mtx.RLock() - defer m.mtx.RUnlock() - - keys := make([]string, 0, len(m.objects)) - for k := range m.objects { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -func (m *MockStorage) GetObjectCount() int { - m.mtx.RLock() - defer m.mtx.RUnlock() - - return len(m.objects) -} - -// Stop doesn't do anything. -func (*MockStorage) Stop() { -} - -func (m *MockStorage) SetMode(mode MockStorageMode) { - m.mode = mode -} - -// ListTables implements StorageClient. -func (m *MockStorage) ListTables(_ context.Context) ([]string, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - var tableNames []string - for tableName := range m.tables { - func(tableName string) { - tableNames = append(tableNames, tableName) - }(tableName) - } - return tableNames, nil -} - -// CreateTable implements StorageClient. -func (m *MockStorage) CreateTable(_ context.Context, desc TableDesc) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - if _, ok := m.tables[desc.Name]; ok { - return fmt.Errorf("table already exists") - } - - m.tables[desc.Name] = &mockTable{ - items: map[string][]mockItem{}, - write: desc.ProvisionedWrite, - read: desc.ProvisionedRead, - } - - return nil -} - -// DeleteTable implements StorageClient. -func (m *MockStorage) DeleteTable(_ context.Context, name string) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - if _, ok := m.tables[name]; !ok { - return fmt.Errorf("table does not exist") - } - - delete(m.tables, name) - - return nil -} - -// DescribeTable implements StorageClient. -func (m *MockStorage) DescribeTable(_ context.Context, name string) (desc TableDesc, isActive bool, err error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - table, ok := m.tables[name] - if !ok { - return TableDesc{}, false, fmt.Errorf("not found") - } - - return TableDesc{ - Name: name, - ProvisionedRead: table.read, - ProvisionedWrite: table.write, - }, true, nil -} - -// UpdateTable implements StorageClient. -func (m *MockStorage) UpdateTable(_ context.Context, _, desc TableDesc) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - table, ok := m.tables[desc.Name] - if !ok { - return fmt.Errorf("not found") - } - - table.read = desc.ProvisionedRead - table.write = desc.ProvisionedWrite - - return nil -} - -// NewWriteBatch implements StorageClient. -func (m *MockStorage) NewWriteBatch() WriteBatch { - return &mockWriteBatch{} -} - -// BatchWrite implements StorageClient. -func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.mode == MockStorageModeReadOnly { - return errPermissionDenied - } - - mockBatch := *batch.(*mockWriteBatch) - seenWrites := map[string]bool{} - - m.numIndexWrites += len(mockBatch.inserts) - - for _, req := range mockBatch.inserts { - table, ok := m.tables[req.tableName] - if !ok { - return fmt.Errorf("table not found") - } - - // Check for duplicate writes by RangeKey in same batch - key := fmt.Sprintf("%s:%s:%x", req.tableName, req.hashValue, req.rangeValue) - if _, ok := seenWrites[key]; ok { - return fmt.Errorf("Dupe write in batch") - } - seenWrites[key] = true - - level.Debug(log.WithContext(ctx, log.Logger)).Log("msg", "write", "hash", req.hashValue, "range", req.rangeValue) - - items := table.items[req.hashValue] - - // insert in order - i := sort.Search(len(items), func(i int) bool { - return bytes.Compare(items[i].rangeValue, req.rangeValue) >= 0 - }) - if i >= len(items) || !bytes.Equal(items[i].rangeValue, req.rangeValue) { - items = append(items, mockItem{}) - copy(items[i+1:], items[i:]) - } else { - // if duplicate write then just update the value - items[i].value = req.value - continue - } - items[i] = mockItem{ - rangeValue: req.rangeValue, - value: req.value, - } - - table.items[req.hashValue] = items - } - - for _, req := range mockBatch.deletes { - table, ok := m.tables[req.tableName] - if !ok { - return fmt.Errorf("table not found") - } - - items := table.items[req.hashValue] - - i := sort.Search(len(items), func(i int) bool { - return bytes.Compare(items[i].rangeValue, req.rangeValue) >= 0 - }) - - if i >= len(items) || !bytes.Equal(items[i].rangeValue, req.rangeValue) { - continue - } - - if len(items) == 1 { - items = nil - } else { - items = items[:i+copy(items[i:], items[i+1:])] - } - - table.items[req.hashValue] = items - } - return nil -} - -// QueryPages implements StorageClient. -func (m *MockStorage) QueryPages(ctx context.Context, queries []IndexQuery, callback func(IndexQuery, ReadBatch) (shouldContinue bool)) error { - m.mtx.RLock() - defer m.mtx.RUnlock() - - if m.mode == MockStorageModeWriteOnly { - return errPermissionDenied - } - - for _, query := range queries { - err := m.query(ctx, query, func(b ReadBatch) bool { - return callback(query, b) - }) - if err != nil { - return err - } - } - - return nil -} - -func (m *MockStorage) query(ctx context.Context, query IndexQuery, callback func(ReadBatch) (shouldContinue bool)) error { - logger := log.WithContext(ctx, log.Logger) - level.Debug(logger).Log("msg", "QueryPages", "query", query.HashValue) - - table, ok := m.tables[query.TableName] - if !ok { - return fmt.Errorf("table not found") - } - - items, ok := table.items[query.HashValue] - if !ok { - level.Debug(logger).Log("msg", "not found") - return nil - } - - if query.RangeValuePrefix != nil { - level.Debug(logger).Log("msg", "lookup prefix", "hash", query.HashValue, "range_prefix", query.RangeValuePrefix, "num_items", len(items)) - - // the smallest index i in [0, n) at which f(i) is true - i := sort.Search(len(items), func(i int) bool { - if bytes.Compare(items[i].rangeValue, query.RangeValuePrefix) > 0 { - return true - } - return bytes.HasPrefix(items[i].rangeValue, query.RangeValuePrefix) - }) - j := sort.Search(len(items)-i, func(j int) bool { - if bytes.Compare(items[i+j].rangeValue, query.RangeValuePrefix) < 0 { - return false - } - return !bytes.HasPrefix(items[i+j].rangeValue, query.RangeValuePrefix) - }) - - level.Debug(logger).Log("msg", "found range", "from_inclusive", i, "to_exclusive", i+j) - if i > len(items) || j == 0 { - return nil - } - items = items[i : i+j] - - } else if query.RangeValueStart != nil { - level.Debug(logger).Log("msg", "lookup range", "hash", query.HashValue, "range_start", query.RangeValueStart, "num_items", len(items)) - - // the smallest index i in [0, n) at which f(i) is true - i := sort.Search(len(items), func(i int) bool { - return bytes.Compare(items[i].rangeValue, query.RangeValueStart) >= 0 - }) - - level.Debug(logger).Log("msg", "found range [%d)", "index", i) - if i > len(items) { - return nil - } - items = items[i:] - - } else { - level.Debug(logger).Log("msg", "lookup", "hash", query.HashValue, "num_items", len(items)) - } - - // Filters - if query.ValueEqual != nil { - level.Debug(logger).Log("msg", "filter by equality", "value_equal", query.ValueEqual) - - filtered := make([]mockItem, 0) - for _, v := range items { - if bytes.Equal(v.value, query.ValueEqual) { - filtered = append(filtered, v) - } - } - items = filtered - } - - result := mockReadBatch{} - result.items = append(result.items, items...) - - callback(&result) - return nil -} - -// PutChunks implements StorageClient. -func (m *MockStorage) PutChunks(_ context.Context, chunks []Chunk) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.mode == MockStorageModeReadOnly { - return errPermissionDenied - } - - m.numChunkWrites += len(chunks) - - for i := range chunks { - buf, err := chunks[i].Encoded() - if err != nil { - return err - } - m.objects[chunks[i].ExternalKey()] = buf - } - return nil -} - -// GetChunks implements StorageClient. -func (m *MockStorage) GetChunks(ctx context.Context, chunkSet []Chunk) ([]Chunk, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - if m.mode == MockStorageModeWriteOnly { - return nil, errPermissionDenied - } - - decodeContext := NewDecodeContext() - result := []Chunk{} - for _, chunk := range chunkSet { - key := chunk.ExternalKey() - buf, ok := m.objects[key] - if !ok { - return nil, ErrStorageObjectNotFound - } - if err := chunk.Decode(decodeContext, buf); err != nil { - return nil, err - } - result = append(result, chunk) - } - return result, nil -} - -// DeleteChunk implements StorageClient. -func (m *MockStorage) DeleteChunk(ctx context.Context, userID, chunkID string) error { - if m.mode == MockStorageModeReadOnly { - return errPermissionDenied - } - - return m.DeleteObject(ctx, chunkID) -} - -func (m *MockStorage) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - if m.mode == MockStorageModeWriteOnly { - return nil, errPermissionDenied - } - - buf, ok := m.objects[objectKey] - if !ok { - return nil, ErrStorageObjectNotFound - } - - return ioutil.NopCloser(bytes.NewReader(buf)), nil -} - -func (m *MockStorage) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { - buf, err := ioutil.ReadAll(object) - if err != nil { - return err - } - - if m.mode == MockStorageModeReadOnly { - return errPermissionDenied - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - m.objects[objectKey] = buf - return nil -} - -func (m *MockStorage) DeleteObject(ctx context.Context, objectKey string) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.mode == MockStorageModeReadOnly { - return errPermissionDenied - } - - if _, ok := m.objects[objectKey]; !ok { - return ErrStorageObjectNotFound - } - - delete(m.objects, objectKey) - return nil -} - -// List implements chunk.ObjectClient. -func (m *MockStorage) List(ctx context.Context, prefix, delimiter string) ([]StorageObject, []StorageCommonPrefix, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - if m.mode == MockStorageModeWriteOnly { - return nil, nil, errPermissionDenied - } - - prefixes := map[string]struct{}{} - - storageObjects := make([]StorageObject, 0, len(m.objects)) - for key := range m.objects { - if !strings.HasPrefix(key, prefix) { - continue - } - - // ToDo: Store mtime when we have mtime based use-cases for storage objects - if delimiter == "" { - storageObjects = append(storageObjects, StorageObject{Key: key}) - continue - } - - ix := strings.Index(key[len(prefix):], delimiter) - if ix < 0 { - storageObjects = append(storageObjects, StorageObject{Key: key}) - continue - } - - commonPrefix := key[:len(prefix)+ix+len(delimiter)] // Include delimeter in the common prefix. - prefixes[commonPrefix] = struct{}{} - } - - var commonPrefixes = []StorageCommonPrefix(nil) - for p := range prefixes { - commonPrefixes = append(commonPrefixes, StorageCommonPrefix(p)) - } - - // Object stores return results in sorted order. - sort.Slice(storageObjects, func(i, j int) bool { - return storageObjects[i].Key < storageObjects[j].Key - }) - sort.Slice(commonPrefixes, func(i, j int) bool { - return commonPrefixes[i] < commonPrefixes[j] - }) - - return storageObjects, commonPrefixes, nil -} - -type mockWriteBatch struct { - inserts []struct { - tableName, hashValue string - rangeValue []byte - value []byte - } - deletes []struct { - tableName, hashValue string - rangeValue []byte - } -} - -func (b *mockWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { - b.deletes = append(b.deletes, struct { - tableName, hashValue string - rangeValue []byte - }{tableName: tableName, hashValue: hashValue, rangeValue: rangeValue}) -} - -func (b *mockWriteBatch) Add(tableName, hashValue string, rangeValue []byte, value []byte) { - b.inserts = append(b.inserts, struct { - tableName, hashValue string - rangeValue []byte - value []byte - }{tableName, hashValue, rangeValue, value}) -} - -type mockReadBatch struct { - items []mockItem -} - -func (b *mockReadBatch) Iterator() ReadBatchIterator { - return &mockReadBatchIter{ - index: -1, - mockReadBatch: b, - } -} - -type mockReadBatchIter struct { - index int - *mockReadBatch -} - -func (b *mockReadBatchIter) Next() bool { - b.index++ - return b.index < len(b.items) -} - -func (b *mockReadBatchIter) RangeValue() []byte { - return b.items[b.index].rangeValue -} - -func (b *mockReadBatchIter) Value() []byte { - return b.items[b.index].value -} diff --git a/internal/cortex/chunk/json_helpers.go b/internal/cortex/chunk/json_helpers.go deleted file mode 100644 index db9d65ee84..0000000000 --- a/internal/cortex/chunk/json_helpers.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "sort" - "unsafe" - - jsoniter "github.com/json-iterator/go" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" -) - -func init() { - jsoniter.RegisterTypeDecoderFunc("labels.Labels", decodeLabels) - jsoniter.RegisterTypeEncoderFunc("labels.Labels", encodeLabels, labelsIsEmpty) - jsoniter.RegisterTypeDecoderFunc("model.Time", decodeModelTime) - jsoniter.RegisterTypeEncoderFunc("model.Time", encodeModelTime, modelTimeIsEmpty) -} - -// Override Prometheus' labels.Labels decoder which goes via a map -func decodeLabels(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - labelsPtr := (*labels.Labels)(ptr) - *labelsPtr = make(labels.Labels, 0, 10) - iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { - value := iter.ReadString() - *labelsPtr = append(*labelsPtr, labels.Label{Name: key, Value: value}) - return true - }) - // Labels are always sorted, but earlier Cortex using a map would - // output in any order so we have to sort on read in - sort.Sort(*labelsPtr) -} - -// Override Prometheus' labels.Labels encoder which goes via a map -func encodeLabels(ptr unsafe.Pointer, stream *jsoniter.Stream) { - labelsPtr := (*labels.Labels)(ptr) - stream.WriteObjectStart() - for i, v := range *labelsPtr { - if i != 0 { - stream.WriteMore() - } - stream.WriteString(v.Name) - stream.WriteRaw(`:`) - stream.WriteString(v.Value) - } - stream.WriteObjectEnd() -} - -func labelsIsEmpty(ptr unsafe.Pointer) bool { - labelsPtr := (*labels.Labels)(ptr) - return len(*labelsPtr) == 0 -} - -// Decode via jsoniter's float64 routine is faster than getting the string data and decoding as two integers -func decodeModelTime(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - pt := (*model.Time)(ptr) - f := iter.ReadFloat64() - *pt = model.Time(int64(f * 1000)) -} - -// Write out the timestamp as an int divided by 1000. This is ~3x faster than converting to a float. -// Adapted from https://github.com/prometheus/prometheus/blob/cc39021b2bb6f829c7a626e4bdce2f338d1b76db/web/api/v1/api.go#L829 -func encodeModelTime(ptr unsafe.Pointer, stream *jsoniter.Stream) { - pt := (*model.Time)(ptr) - t := int64(*pt) - if t < 0 { - stream.WriteRaw(`-`) - t = -t - } - stream.WriteInt64(t / 1000) - fraction := t % 1000 - if fraction != 0 { - stream.WriteRaw(`.`) - if fraction < 100 { - stream.WriteRaw(`0`) - } - if fraction < 10 { - stream.WriteRaw(`0`) - } - stream.WriteInt64(fraction) - } -} - -func modelTimeIsEmpty(ptr unsafe.Pointer) bool { - return false -} diff --git a/internal/cortex/chunk/opts.go b/internal/cortex/chunk/opts.go deleted file mode 100644 index e948d98ca5..0000000000 --- a/internal/cortex/chunk/opts.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "strings" - "unicode/utf8" -) - -// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped. -var regexMetaCharacterBytes [16]byte - -// isRegexMetaCharacter reports whether byte b needs to be escaped. -func isRegexMetaCharacter(b byte) bool { - return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0 -} - -func init() { - for _, b := range []byte(`.+*?()|[]{}^$`) { - regexMetaCharacterBytes[b%16] |= 1 << (b / 16) - } -} - -// FindSetMatches returns list of values that can be equality matched on. -// copied from Prometheus querier.go, removed check for Prometheus wrapper. -func FindSetMatches(pattern string) []string { - escaped := false - sets := []*strings.Builder{{}} - for i := 0; i < len(pattern); i++ { - if escaped { - switch { - case isRegexMetaCharacter(pattern[i]): - sets[len(sets)-1].WriteByte(pattern[i]) - case pattern[i] == '\\': - sets[len(sets)-1].WriteByte('\\') - default: - return nil - } - escaped = false - } else { - switch { - case isRegexMetaCharacter(pattern[i]): - if pattern[i] == '|' { - sets = append(sets, &strings.Builder{}) - } else { - return nil - } - case pattern[i] == '\\': - escaped = true - default: - sets[len(sets)-1].WriteByte(pattern[i]) - } - } - } - matches := make([]string, 0, len(sets)) - for _, s := range sets { - if s.Len() > 0 { - matches = append(matches, s.String()) - } - } - return matches -} diff --git a/internal/cortex/chunk/opts_test.go b/internal/cortex/chunk/opts_test.go deleted file mode 100644 index 0d5fee4fb1..0000000000 --- a/internal/cortex/chunk/opts_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -// Refer to https://github.com/prometheus/prometheus/issues/2651. -func TestFindSetMatches(t *testing.T) { - cases := []struct { - pattern string - exp []string - }{ - // Simple sets. - { - pattern: "foo|bar|baz", - exp: []string{ - "foo", - "bar", - "baz", - }, - }, - // Simple sets containing escaped characters. - { - pattern: "fo\\.o|bar\\?|\\^baz", - exp: []string{ - "fo.o", - "bar?", - "^baz", - }, - }, - // Simple sets containing special characters without escaping. - { - pattern: "fo.o|bar?|^baz", - exp: nil, - }, - { - pattern: "foo\\|bar\\|baz", - exp: []string{ - "foo|bar|baz", - }, - }, - } - - for _, c := range cases { - matches := FindSetMatches(c.pattern) - require.Equal(t, c.exp, matches) - } -} diff --git a/internal/cortex/chunk/purger/delete_plan.pb.go b/internal/cortex/chunk/purger/delete_plan.pb.go deleted file mode 100644 index 6790c55c44..0000000000 --- a/internal/cortex/chunk/purger/delete_plan.pb.go +++ /dev/null @@ -1,1353 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: delete_plan.proto - -package purger - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/thanos-io/thanos/internal/cortex/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/thanos-io/thanos/internal/cortex/cortexpb" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// DeletePlan holds all the chunks that are supposed to be deleted within an interval(usually a day) -// This Proto file is used just for storing Delete Plans in proto format. -type DeletePlan struct { - PlanInterval *Interval `protobuf:"bytes,1,opt,name=plan_interval,json=planInterval,proto3" json:"plan_interval,omitempty"` - ChunksGroup []ChunksGroup `protobuf:"bytes,2,rep,name=chunks_group,json=chunksGroup,proto3" json:"chunks_group"` -} - -func (m *DeletePlan) Reset() { *m = DeletePlan{} } -func (*DeletePlan) ProtoMessage() {} -func (*DeletePlan) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{0} -} -func (m *DeletePlan) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeletePlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeletePlan.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeletePlan) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeletePlan.Merge(m, src) -} -func (m *DeletePlan) XXX_Size() int { - return m.Size() -} -func (m *DeletePlan) XXX_DiscardUnknown() { - xxx_messageInfo_DeletePlan.DiscardUnknown(m) -} - -var xxx_messageInfo_DeletePlan proto.InternalMessageInfo - -func (m *DeletePlan) GetPlanInterval() *Interval { - if m != nil { - return m.PlanInterval - } - return nil -} - -func (m *DeletePlan) GetChunksGroup() []ChunksGroup { - if m != nil { - return m.ChunksGroup - } - return nil -} - -// ChunksGroup holds ChunkDetails and Labels for a group of chunks which have same series ID -type ChunksGroup struct { - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/internal/cortex/cortexpb.LabelAdapter" json:"labels"` - Chunks []ChunkDetails `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` -} - -func (m *ChunksGroup) Reset() { *m = ChunksGroup{} } -func (*ChunksGroup) ProtoMessage() {} -func (*ChunksGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{1} -} -func (m *ChunksGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChunksGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChunksGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChunksGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChunksGroup.Merge(m, src) -} -func (m *ChunksGroup) XXX_Size() int { - return m.Size() -} -func (m *ChunksGroup) XXX_DiscardUnknown() { - xxx_messageInfo_ChunksGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_ChunksGroup proto.InternalMessageInfo - -func (m *ChunksGroup) GetChunks() []ChunkDetails { - if m != nil { - return m.Chunks - } - return nil -} - -type ChunkDetails struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - PartiallyDeletedInterval *Interval `protobuf:"bytes,2,opt,name=partially_deleted_interval,json=partiallyDeletedInterval,proto3" json:"partially_deleted_interval,omitempty"` -} - -func (m *ChunkDetails) Reset() { *m = ChunkDetails{} } -func (*ChunkDetails) ProtoMessage() {} -func (*ChunkDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{2} -} -func (m *ChunkDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChunkDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChunkDetails.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChunkDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChunkDetails.Merge(m, src) -} -func (m *ChunkDetails) XXX_Size() int { - return m.Size() -} -func (m *ChunkDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ChunkDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_ChunkDetails proto.InternalMessageInfo - -func (m *ChunkDetails) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *ChunkDetails) GetPartiallyDeletedInterval() *Interval { - if m != nil { - return m.PartiallyDeletedInterval - } - return nil -} - -type Interval struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` -} - -func (m *Interval) Reset() { *m = Interval{} } -func (*Interval) ProtoMessage() {} -func (*Interval) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{3} -} -func (m *Interval) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Interval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Interval.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Interval) XXX_Merge(src proto.Message) { - xxx_messageInfo_Interval.Merge(m, src) -} -func (m *Interval) XXX_Size() int { - return m.Size() -} -func (m *Interval) XXX_DiscardUnknown() { - xxx_messageInfo_Interval.DiscardUnknown(m) -} - -var xxx_messageInfo_Interval proto.InternalMessageInfo - -func (m *Interval) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *Interval) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func init() { - proto.RegisterType((*DeletePlan)(nil), "purgeplan.DeletePlan") - proto.RegisterType((*ChunksGroup)(nil), "purgeplan.ChunksGroup") - proto.RegisterType((*ChunkDetails)(nil), "purgeplan.ChunkDetails") - proto.RegisterType((*Interval)(nil), "purgeplan.Interval") -} - -func init() { proto.RegisterFile("delete_plan.proto", fileDescriptor_c38868cf63b27372) } - -var fileDescriptor_c38868cf63b27372 = []byte{ - // 446 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x8b, 0xd4, 0x30, - 0x18, 0x6d, 0xba, 0x52, 0xdc, 0x74, 0x5c, 0xd6, 0x2c, 0x68, 0x99, 0x43, 0x76, 0xe9, 0x69, 0x0e, - 0xda, 0x81, 0x15, 0x41, 0x41, 0x90, 0x1d, 0x0b, 0x32, 0xa0, 0xb0, 0x16, 0x4f, 0x5e, 0x4a, 0xda, - 0xc6, 0x6e, 0xdd, 0xb4, 0x89, 0x69, 0x2a, 0x7a, 0xf3, 0xe6, 0xd5, 0x9f, 0xe1, 0x0f, 0xf0, 0x47, - 0xec, 0x71, 0x8e, 0x8b, 0x87, 0xc1, 0xe9, 0x5c, 0x3c, 0xce, 0x4f, 0x90, 0xa6, 0xed, 0x4c, 0x15, - 0x3c, 0x78, 0xcb, 0xfb, 0xde, 0x7b, 0xc9, 0xcb, 0x4b, 0xe0, 0xed, 0x84, 0x32, 0xaa, 0x68, 0x28, - 0x18, 0x29, 0x3c, 0x21, 0xb9, 0xe2, 0x68, 0x5f, 0x54, 0x32, 0xa5, 0xcd, 0x60, 0x7c, 0x3f, 0xcd, - 0xd4, 0x45, 0x15, 0x79, 0x31, 0xcf, 0xa7, 0x29, 0x4f, 0xf9, 0x54, 0x2b, 0xa2, 0xea, 0xad, 0x46, - 0x1a, 0xe8, 0x55, 0xeb, 0x1c, 0x3f, 0x1e, 0xc8, 0x63, 0x2e, 0x15, 0xfd, 0x28, 0x24, 0x7f, 0x47, - 0x63, 0xd5, 0xa1, 0xa9, 0xb8, 0x4c, 0x7b, 0x22, 0xea, 0x16, 0xad, 0xd5, 0xfd, 0x02, 0x20, 0xf4, - 0x75, 0x94, 0x73, 0x46, 0x0a, 0xf4, 0x08, 0xde, 0x6a, 0x02, 0x84, 0x59, 0xa1, 0xa8, 0xfc, 0x40, - 0x98, 0x03, 0x4e, 0xc0, 0xc4, 0x3e, 0x3d, 0xf2, 0xb6, 0xd9, 0xbc, 0x79, 0x47, 0x05, 0xa3, 0x06, - 0xf6, 0x08, 0x3d, 0x85, 0xa3, 0xf8, 0xa2, 0x2a, 0x2e, 0xcb, 0x30, 0x95, 0xbc, 0x12, 0x8e, 0x79, - 0xb2, 0x37, 0xb1, 0x4f, 0xef, 0x0c, 0x8c, 0xcf, 0x34, 0xfd, 0xbc, 0x61, 0x67, 0x37, 0xae, 0x96, - 0xc7, 0x46, 0x60, 0xc7, 0xbb, 0x91, 0xfb, 0x1d, 0x40, 0x7b, 0x20, 0x41, 0x05, 0xb4, 0x18, 0x89, - 0x28, 0x2b, 0x1d, 0xa0, 0xb7, 0x3a, 0xf2, 0xfa, 0x1b, 0x78, 0x2f, 0x9a, 0xf9, 0x39, 0xc9, 0xe4, - 0xec, 0xac, 0xd9, 0xe7, 0xc7, 0xf2, 0xf8, 0xbf, 0x1a, 0x68, 0xfd, 0x67, 0x09, 0x11, 0x8a, 0xca, - 0xa0, 0x3b, 0x05, 0x3d, 0x84, 0x56, 0x1b, 0xa7, 0x8b, 0x7e, 0xf7, 0xef, 0xe8, 0x3e, 0x55, 0x24, - 0x63, 0x65, 0x97, 0xbd, 0x13, 0xbb, 0xef, 0xe1, 0x68, 0xc8, 0xa2, 0x03, 0x68, 0xce, 0x7d, 0x5d, - 0xdb, 0x7e, 0x60, 0xce, 0x7d, 0xf4, 0x0a, 0x8e, 0x05, 0x91, 0x2a, 0x23, 0x8c, 0x7d, 0x0a, 0xdb, - 0x47, 0x4f, 0x76, 0xf5, 0x9a, 0xff, 0xae, 0xd7, 0xd9, 0xda, 0xda, 0xf7, 0x49, 0x7a, 0xc6, 0x8d, - 0xe0, 0xcd, 0x6d, 0xed, 0xf7, 0x20, 0x2a, 0x15, 0x91, 0x2a, 0x54, 0x59, 0x4e, 0x4b, 0x45, 0x72, - 0x11, 0xe6, 0xa5, 0x3e, 0x7e, 0x2f, 0x38, 0xd4, 0xcc, 0xeb, 0x9e, 0x78, 0x59, 0xa2, 0x09, 0x3c, - 0xa4, 0x45, 0xf2, 0xa7, 0xd6, 0xd4, 0xda, 0x03, 0x5a, 0x24, 0x03, 0xe5, 0xec, 0xc9, 0x62, 0x85, - 0x8d, 0xeb, 0x15, 0x36, 0x36, 0x2b, 0x0c, 0x3e, 0xd7, 0x18, 0x7c, 0xab, 0x31, 0xb8, 0xaa, 0x31, - 0x58, 0xd4, 0x18, 0xfc, 0xac, 0x31, 0xf8, 0x55, 0x63, 0x63, 0x53, 0x63, 0xf0, 0x75, 0x8d, 0x8d, - 0xc5, 0x1a, 0x1b, 0xd7, 0x6b, 0x6c, 0xbc, 0xb1, 0xf4, 0x3d, 0x64, 0x64, 0xe9, 0xcf, 0xf5, 0xe0, - 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x46, 0x96, 0xf6, 0xe6, 0x02, 0x00, 0x00, -} - -func (this *DeletePlan) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DeletePlan) - if !ok { - that2, ok := that.(DeletePlan) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.PlanInterval.Equal(that1.PlanInterval) { - return false - } - if len(this.ChunksGroup) != len(that1.ChunksGroup) { - return false - } - for i := range this.ChunksGroup { - if !this.ChunksGroup[i].Equal(&that1.ChunksGroup[i]) { - return false - } - } - return true -} -func (this *ChunksGroup) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChunksGroup) - if !ok { - that2, ok := that.(ChunksGroup) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(&that1.Chunks[i]) { - return false - } - } - return true -} -func (this *ChunkDetails) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChunkDetails) - if !ok { - that2, ok := that.(ChunkDetails) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ID != that1.ID { - return false - } - if !this.PartiallyDeletedInterval.Equal(that1.PartiallyDeletedInterval) { - return false - } - return true -} -func (this *Interval) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Interval) - if !ok { - that2, ok := that.(Interval) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - return true -} -func (this *DeletePlan) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.DeletePlan{") - if this.PlanInterval != nil { - s = append(s, "PlanInterval: "+fmt.Sprintf("%#v", this.PlanInterval)+",\n") - } - if this.ChunksGroup != nil { - vs := make([]*ChunksGroup, len(this.ChunksGroup)) - for i := range vs { - vs[i] = &this.ChunksGroup[i] - } - s = append(s, "ChunksGroup: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ChunksGroup) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.ChunksGroup{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Chunks != nil { - vs := make([]*ChunkDetails, len(this.Chunks)) - for i := range vs { - vs[i] = &this.Chunks[i] - } - s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ChunkDetails) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.ChunkDetails{") - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - if this.PartiallyDeletedInterval != nil { - s = append(s, "PartiallyDeletedInterval: "+fmt.Sprintf("%#v", this.PartiallyDeletedInterval)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Interval) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.Interval{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringDeletePlan(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *DeletePlan) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeletePlan) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeletePlan) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChunksGroup) > 0 { - for iNdEx := len(m.ChunksGroup) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChunksGroup[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.PlanInterval != nil { - { - size, err := m.PlanInterval.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ChunksGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChunksGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChunksGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChunkDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChunkDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChunkDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PartiallyDeletedInterval != nil { - { - size, err := m.PartiallyDeletedInterval.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintDeletePlan(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Interval) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Interval) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Interval) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.EndTimestampMs != 0 { - i = encodeVarintDeletePlan(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintDeletePlan(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintDeletePlan(dAtA []byte, offset int, v uint64) int { - offset -= sovDeletePlan(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DeletePlan) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PlanInterval != nil { - l = m.PlanInterval.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - if len(m.ChunksGroup) > 0 { - for _, e := range m.ChunksGroup { - l = e.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - } - return n -} - -func (m *ChunksGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - } - return n -} - -func (m *ChunkDetails) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovDeletePlan(uint64(l)) - } - if m.PartiallyDeletedInterval != nil { - l = m.PartiallyDeletedInterval.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - return n -} - -func (m *Interval) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovDeletePlan(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovDeletePlan(uint64(m.EndTimestampMs)) - } - return n -} - -func sovDeletePlan(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDeletePlan(x uint64) (n int) { - return sovDeletePlan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *DeletePlan) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunksGroup := "[]ChunksGroup{" - for _, f := range this.ChunksGroup { - repeatedStringForChunksGroup += strings.Replace(strings.Replace(f.String(), "ChunksGroup", "ChunksGroup", 1), `&`, ``, 1) + "," - } - repeatedStringForChunksGroup += "}" - s := strings.Join([]string{`&DeletePlan{`, - `PlanInterval:` + strings.Replace(this.PlanInterval.String(), "Interval", "Interval", 1) + `,`, - `ChunksGroup:` + repeatedStringForChunksGroup + `,`, - `}`, - }, "") - return s -} -func (this *ChunksGroup) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]ChunkDetails{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "ChunkDetails", "ChunkDetails", 1), `&`, ``, 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&ChunksGroup{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func (this *ChunkDetails) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ChunkDetails{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `PartiallyDeletedInterval:` + strings.Replace(this.PartiallyDeletedInterval.String(), "Interval", "Interval", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Interval) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Interval{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `}`, - }, "") - return s -} -func valueToStringDeletePlan(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *DeletePlan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeletePlan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeletePlan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanInterval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PlanInterval == nil { - m.PlanInterval = &Interval{} - } - if err := m.PlanInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunksGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChunksGroup = append(m.ChunksGroup, ChunksGroup{}) - if err := m.ChunksGroup[len(m.ChunksGroup)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChunksGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChunksGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChunksGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, ChunkDetails{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChunkDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChunkDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChunkDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartiallyDeletedInterval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PartiallyDeletedInterval == nil { - m.PartiallyDeletedInterval = &Interval{} - } - if err := m.PartiallyDeletedInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Interval) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Interval: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Interval: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDeletePlan(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDeletePlan - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthDeletePlan - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipDeletePlan(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthDeletePlan - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthDeletePlan = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDeletePlan = fmt.Errorf("proto: integer overflow") -) diff --git a/internal/cortex/chunk/purger/delete_plan.proto b/internal/cortex/chunk/purger/delete_plan.proto deleted file mode 100644 index 8848531a69..0000000000 --- a/internal/cortex/chunk/purger/delete_plan.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -syntax = "proto3"; - -package purgeplan; - -option go_package = "purger"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/thanos-io/thanos/internal/cortex/cortexpb/cortex.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// DeletePlan holds all the chunks that are supposed to be deleted within an interval(usually a day) -// This Proto file is used just for storing Delete Plans in proto format. -message DeletePlan { - Interval plan_interval = 1; - repeated ChunksGroup chunks_group = 2 [(gogoproto.nullable) = false]; -} - -// ChunksGroup holds ChunkDetails and Labels for a group of chunks which have same series ID -message ChunksGroup { - repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/internal/cortex/cortexpb.LabelAdapter"]; - repeated ChunkDetails chunks = 2 [(gogoproto.nullable) = false]; -} - -message ChunkDetails { - string ID = 1; - Interval partially_deleted_interval = 2; -} - -message Interval { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; -} diff --git a/internal/cortex/chunk/purger/delete_requests_store.go b/internal/cortex/chunk/purger/delete_requests_store.go deleted file mode 100644 index 4113ad2a71..0000000000 --- a/internal/cortex/chunk/purger/delete_requests_store.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "context" - "encoding/binary" - "encoding/hex" - "errors" - "flag" - "fmt" - "hash/fnv" - "strconv" - "strings" - "time" - - "github.com/thanos-io/thanos/internal/cortex/chunk" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" -) - -type ( - DeleteRequestStatus string - CacheKind string - indexType string -) - -const ( - StatusReceived DeleteRequestStatus = "received" - StatusBuildingPlan DeleteRequestStatus = "buildingPlan" - StatusDeleting DeleteRequestStatus = "deleting" - StatusProcessed DeleteRequestStatus = "processed" - - separator = "\000" // separator for series selectors in delete requests - - // CacheKindStore is for cache gen number for store cache - CacheKindStore CacheKind = "store" - // CacheKindResults is for cache gen number for results cache - CacheKindResults CacheKind = "results" - - deleteRequestID indexType = "1" - deleteRequestDetails indexType = "2" - cacheGenNum indexType = "3" -) - -var ( - pendingDeleteRequestStatuses = []DeleteRequestStatus{StatusReceived, StatusBuildingPlan, StatusDeleting} - - ErrDeleteRequestNotFound = errors.New("could not find matching delete request") -) - -// DeleteRequest holds all the details about a delete request. -type DeleteRequest struct { - RequestID string `json:"request_id"` - UserID string `json:"-"` - StartTime model.Time `json:"start_time"` - EndTime model.Time `json:"end_time"` - Selectors []string `json:"selectors"` - Status DeleteRequestStatus `json:"status"` - Matchers [][]*labels.Matcher `json:"-"` - CreatedAt model.Time `json:"created_at"` -} - -// cacheGenNumbers holds store and results cache gen numbers for a user. -type cacheGenNumbers struct { - store, results string -} - -// DeleteStore provides all the methods required to manage lifecycle of delete request and things related to it. -type DeleteStore struct { - cfg DeleteStoreConfig - indexClient chunk.IndexClient -} - -// DeleteStoreConfig holds configuration for delete store. -type DeleteStoreConfig struct { - Store string `yaml:"store"` - RequestsTableName string `yaml:"requests_table_name"` - ProvisionConfig TableProvisioningConfig `yaml:"table_provisioning"` -} - -// RegisterFlags adds the flags required to configure this flag set. -func (cfg *DeleteStoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.ProvisionConfig.RegisterFlags("deletes.table", f) - f.StringVar(&cfg.Store, "deletes.store", "", "Store for keeping delete request") - f.StringVar(&cfg.RequestsTableName, "deletes.requests-table-name", "delete_requests", "Name of the table which stores delete requests") -} - -// NewDeleteStore creates a store for managing delete requests. -func NewDeleteStore(cfg DeleteStoreConfig, indexClient chunk.IndexClient) (*DeleteStore, error) { - ds := DeleteStore{ - cfg: cfg, - indexClient: indexClient, - } - - return &ds, nil -} - -// Add creates entries for a new delete request. -func (ds *DeleteStore) AddDeleteRequest(ctx context.Context, userID string, startTime, endTime model.Time, selectors []string) error { - return ds.addDeleteRequest(ctx, userID, model.Now(), startTime, endTime, selectors) - -} - -// addDeleteRequest is also used for tests to create delete requests with different createdAt time. -func (ds *DeleteStore) addDeleteRequest(ctx context.Context, userID string, createdAt, startTime, endTime model.Time, selectors []string) error { - requestID := generateUniqueID(userID, selectors) - - for { - _, err := ds.GetDeleteRequest(ctx, userID, string(requestID)) - if err != nil { - if err == ErrDeleteRequestNotFound { - break - } - return err - } - - // we have a collision here, lets recreate a new requestID and check for collision - time.Sleep(time.Millisecond) - requestID = generateUniqueID(userID, selectors) - } - - // userID, requestID - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - // Add an entry with userID, requestID as range key and status as value to make it easy to manage and lookup status - // We don't want to set anything in hash key here since we would want to find delete requests by just status - writeBatch := ds.indexClient.NewWriteBatch() - writeBatch.Add(ds.cfg.RequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID), []byte(StatusReceived)) - - // Add another entry with additional details like creation time, time range of delete request and selectors in value - rangeValue := fmt.Sprintf("%x:%x:%x", int64(createdAt), int64(startTime), int64(endTime)) - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s", deleteRequestDetails, userIDAndRequestID), - []byte(rangeValue), []byte(strings.Join(selectors, separator))) - - // we update only cache gen number because only query responses are changing at this stage. - // we still have to query data from store for doing query time filtering and we don't want to invalidate its results now. - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, CacheKindResults), - []byte{}, []byte(strconv.FormatInt(time.Now().Unix(), 10))) - - return ds.indexClient.BatchWrite(ctx, writeBatch) -} - -// GetDeleteRequestsByStatus returns all delete requests for given status. -func (ds *DeleteStore) GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - ValueEqual: []byte(status), - }) -} - -// GetDeleteRequestsForUserByStatus returns all delete requests for a user with given status. -func (ds *DeleteStore) GetDeleteRequestsForUserByStatus(ctx context.Context, userID string, status DeleteRequestStatus) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - RangeValuePrefix: []byte(userID), - ValueEqual: []byte(status), - }) -} - -// GetAllDeleteRequestsForUser returns all delete requests for a user. -func (ds *DeleteStore) GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - RangeValuePrefix: []byte(userID), - }) -} - -// UpdateStatus updates status of a delete request. -func (ds *DeleteStore) UpdateStatus(ctx context.Context, userID, requestID string, newStatus DeleteRequestStatus) error { - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - writeBatch := ds.indexClient.NewWriteBatch() - writeBatch.Add(ds.cfg.RequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID), []byte(newStatus)) - - if newStatus == StatusProcessed { - // we have deleted data from store so invalidate cache only for store since we don't have to do runtime filtering anymore. - // we don't have to change cache gen number because we were anyways doing runtime filtering - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, CacheKindStore), []byte{}, []byte(strconv.FormatInt(time.Now().Unix(), 10))) - } - - return ds.indexClient.BatchWrite(ctx, writeBatch) -} - -// GetDeleteRequest returns delete request with given requestID. -func (ds *DeleteStore) GetDeleteRequest(ctx context.Context, userID, requestID string) (*DeleteRequest, error) { - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - deleteRequests, err := ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - RangeValuePrefix: []byte(userIDAndRequestID), - }) - - if err != nil { - return nil, err - } - - if len(deleteRequests) == 0 { - return nil, ErrDeleteRequestNotFound - } - - return &deleteRequests[0], nil -} - -// GetPendingDeleteRequestsForUser returns all delete requests for a user which are not processed. -func (ds *DeleteStore) GetPendingDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { - pendingDeleteRequests := []DeleteRequest{} - for _, status := range pendingDeleteRequestStatuses { - deleteRequests, err := ds.GetDeleteRequestsForUserByStatus(ctx, userID, status) - if err != nil { - return nil, err - } - - pendingDeleteRequests = append(pendingDeleteRequests, deleteRequests...) - } - - return pendingDeleteRequests, nil -} - -func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery chunk.IndexQuery) ([]DeleteRequest, error) { - deleteRequests := []DeleteRequest{} - // No need to lock inside the callback since we run a single index query. - err := ds.indexClient.QueryPages(ctx, []chunk.IndexQuery{deleteQuery}, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - for itr.Next() { - userID, requestID := splitUserIDAndRequestID(string(itr.RangeValue())) - - deleteRequests = append(deleteRequests, DeleteRequest{ - UserID: userID, - RequestID: requestID, - Status: DeleteRequestStatus(itr.Value()), - }) - } - return true - }) - if err != nil { - return nil, err - } - - for i, deleteRequest := range deleteRequests { - deleteRequestQuery := []chunk.IndexQuery{ - { - TableName: ds.cfg.RequestsTableName, - HashValue: fmt.Sprintf("%s:%s:%s", deleteRequestDetails, deleteRequest.UserID, deleteRequest.RequestID), - }, - } - - var parseError error - err := ds.indexClient.QueryPages(ctx, deleteRequestQuery, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - itr.Next() - - deleteRequest, err = parseDeleteRequestTimestamps(itr.RangeValue(), deleteRequest) - if err != nil { - parseError = err - return false - } - - deleteRequest.Selectors = strings.Split(string(itr.Value()), separator) - deleteRequests[i] = deleteRequest - - return true - }) - - if err != nil { - return nil, err - } - - if parseError != nil { - return nil, parseError - } - } - - return deleteRequests, nil -} - -// getCacheGenerationNumbers returns cache gen numbers for a user. -func (ds *DeleteStore) getCacheGenerationNumbers(ctx context.Context, userID string) (*cacheGenNumbers, error) { - storeCacheGen, err := ds.queryCacheGenerationNumber(ctx, userID, CacheKindStore) - if err != nil { - return nil, err - } - - resultsCacheGen, err := ds.queryCacheGenerationNumber(ctx, userID, CacheKindResults) - if err != nil { - return nil, err - } - - return &cacheGenNumbers{storeCacheGen, resultsCacheGen}, nil -} - -func (ds *DeleteStore) queryCacheGenerationNumber(ctx context.Context, userID string, kind CacheKind) (string, error) { - query := chunk.IndexQuery{TableName: ds.cfg.RequestsTableName, HashValue: fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, kind)} - - genNumber := "" - err := ds.indexClient.QueryPages(ctx, []chunk.IndexQuery{query}, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - for itr.Next() { - genNumber = string(itr.Value()) - break - } - return false - }) - - if err != nil { - return "", err - } - - return genNumber, nil -} - -// RemoveDeleteRequest removes a delete request and increments cache gen number -func (ds *DeleteStore) RemoveDeleteRequest(ctx context.Context, userID, requestID string, createdAt, startTime, endTime model.Time) error { - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - writeBatch := ds.indexClient.NewWriteBatch() - writeBatch.Delete(ds.cfg.RequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID)) - - // Add another entry with additional details like creation time, time range of delete request and selectors in value - rangeValue := fmt.Sprintf("%x:%x:%x", int64(createdAt), int64(startTime), int64(endTime)) - writeBatch.Delete(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s", deleteRequestDetails, userIDAndRequestID), - []byte(rangeValue)) - - // we need to invalidate results cache since removal of delete request would cause query results to change - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, CacheKindResults), - []byte{}, []byte(strconv.FormatInt(time.Now().Unix(), 10))) - - return ds.indexClient.BatchWrite(ctx, writeBatch) -} - -func parseDeleteRequestTimestamps(rangeValue []byte, deleteRequest DeleteRequest) (DeleteRequest, error) { - hexParts := strings.Split(string(rangeValue), ":") - if len(hexParts) != 3 { - return deleteRequest, errors.New("invalid key in parsing delete request lookup response") - } - - createdAt, err := strconv.ParseInt(hexParts[0], 16, 64) - if err != nil { - return deleteRequest, err - } - - from, err := strconv.ParseInt(hexParts[1], 16, 64) - if err != nil { - return deleteRequest, err - - } - through, err := strconv.ParseInt(hexParts[2], 16, 64) - if err != nil { - return deleteRequest, err - - } - - deleteRequest.CreatedAt = model.Time(createdAt) - deleteRequest.StartTime = model.Time(from) - deleteRequest.EndTime = model.Time(through) - - return deleteRequest, nil -} - -// An id is useful in managing delete requests -func generateUniqueID(orgID string, selectors []string) []byte { - uniqueID := fnv.New32() - _, _ = uniqueID.Write([]byte(orgID)) - - timeNow := make([]byte, 8) - binary.LittleEndian.PutUint64(timeNow, uint64(time.Now().UnixNano())) - _, _ = uniqueID.Write(timeNow) - - for _, selector := range selectors { - _, _ = uniqueID.Write([]byte(selector)) - } - - return encodeUniqueID(uniqueID.Sum32()) -} - -func encodeUniqueID(t uint32) []byte { - throughBytes := make([]byte, 4) - binary.BigEndian.PutUint32(throughBytes, t) - encodedThroughBytes := make([]byte, 8) - hex.Encode(encodedThroughBytes, throughBytes) - return encodedThroughBytes -} - -func splitUserIDAndRequestID(rangeValue string) (userID, requestID string) { - lastIndex := strings.LastIndex(rangeValue, ":") - - userID = rangeValue[:lastIndex] - requestID = rangeValue[lastIndex+1:] - - return -} diff --git a/internal/cortex/chunk/purger/purger.go b/internal/cortex/chunk/purger/purger.go deleted file mode 100644 index fd58e6e011..0000000000 --- a/internal/cortex/chunk/purger/purger.go +++ /dev/null @@ -1,831 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "bytes" - "context" - "flag" - "fmt" - "io/ioutil" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" - "github.com/weaveworks/common/user" - - "github.com/thanos-io/thanos/internal/cortex/chunk" - "github.com/thanos-io/thanos/internal/cortex/cortexpb" - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" - "github.com/thanos-io/thanos/internal/cortex/util/services" -) - -const ( - millisecondPerDay = int64(24 * time.Hour / time.Millisecond) - statusSuccess = "success" - statusFail = "fail" - loadRequestsInterval = time.Hour - retryFailedRequestsInterval = 15 * time.Minute -) - -type purgerMetrics struct { - deleteRequestsProcessedTotal *prometheus.CounterVec - deleteRequestsChunksSelectedTotal *prometheus.CounterVec - deleteRequestsProcessingFailures *prometheus.CounterVec - loadPendingRequestsAttempsTotal *prometheus.CounterVec - oldestPendingDeleteRequestAgeSeconds prometheus.Gauge - pendingDeleteRequestsCount prometheus.Gauge -} - -func newPurgerMetrics(r prometheus.Registerer) *purgerMetrics { - m := purgerMetrics{} - - m.deleteRequestsProcessedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_processed_total", - Help: "Number of delete requests processed per user", - }, []string{"user"}) - m.deleteRequestsChunksSelectedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_chunks_selected_total", - Help: "Number of chunks selected while building delete plans per user", - }, []string{"user"}) - m.deleteRequestsProcessingFailures = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_processing_failures_total", - Help: "Number of delete requests processing failures per user", - }, []string{"user"}) - m.loadPendingRequestsAttempsTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_load_pending_requests_attempts_total", - Help: "Number of attempts that were made to load pending requests with status", - }, []string{"status"}) - m.oldestPendingDeleteRequestAgeSeconds = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "purger_oldest_pending_delete_request_age_seconds", - Help: "Age of oldest pending delete request in seconds, since they are over their cancellation period", - }) - m.pendingDeleteRequestsCount = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "purger_pending_delete_requests_count", - Help: "Count of delete requests which are over their cancellation period and have not finished processing yet", - }) - - return &m -} - -type deleteRequestWithLogger struct { - DeleteRequest - logger log.Logger // logger is initialized with userID and requestID to add context to every log generated using this -} - -// Config holds config for chunks Purger -type Config struct { - Enable bool `yaml:"enable"` - NumWorkers int `yaml:"num_workers"` - ObjectStoreType string `yaml:"object_store_type"` - DeleteRequestCancelPeriod time.Duration `yaml:"delete_request_cancel_period"` -} - -// RegisterFlags registers CLI flags for Config -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.Enable, "purger.enable", false, "Enable purger to allow deletion of series. Be aware that Delete series feature is still experimental") - f.IntVar(&cfg.NumWorkers, "purger.num-workers", 2, "Number of workers executing delete plans in parallel") - f.StringVar(&cfg.ObjectStoreType, "purger.object-store-type", "", "Name of the object store to use for storing delete plans") - f.DurationVar(&cfg.DeleteRequestCancelPeriod, "purger.delete-request-cancel-period", 24*time.Hour, "Allow cancellation of delete request until duration after they are created. Data would be deleted only after delete requests have been older than this duration. Ideally this should be set to at least 24h.") -} - -type workerJob struct { - planNo int - userID string - deleteRequestID string - logger log.Logger -} - -// Purger does the purging of data which is requested to be deleted. Purger only works for chunks. -type Purger struct { - services.Service - - cfg Config - deleteStore *DeleteStore - chunkStore chunk.Store - objectClient chunk.ObjectClient - metrics *purgerMetrics - - executePlansChan chan deleteRequestWithLogger - workerJobChan chan workerJob - - // we would only allow processing of singe delete request at a time since delete requests touching same chunks could change the chunk IDs of partially deleted chunks - // and break the purge plan for other requests - inProcessRequests *inProcessRequestsCollection - - // We do not want to limit pulling new delete requests to a fixed interval which otherwise would limit number of delete requests we process per user. - // While loading delete requests if we find more requests from user pending to be processed, we just set their id in usersWithPendingRequests and - // when a user's delete request gets processed we just check this map to see whether we want to load more requests without waiting for next ticker to load new batch. - usersWithPendingRequests map[string]struct{} - usersWithPendingRequestsMtx sync.Mutex - pullNewRequestsChan chan struct{} - - pendingPlansCount map[string]int // per request pending plan count - pendingPlansCountMtx sync.Mutex - - wg sync.WaitGroup -} - -// NewPurger creates a new Purger -func NewPurger(cfg Config, deleteStore *DeleteStore, chunkStore chunk.Store, storageClient chunk.ObjectClient, registerer prometheus.Registerer) (*Purger, error) { - util_log.WarnExperimentalUse("Delete series API") - - purger := Purger{ - cfg: cfg, - deleteStore: deleteStore, - chunkStore: chunkStore, - objectClient: storageClient, - metrics: newPurgerMetrics(registerer), - pullNewRequestsChan: make(chan struct{}, 1), - executePlansChan: make(chan deleteRequestWithLogger, 50), - workerJobChan: make(chan workerJob, 50), - inProcessRequests: newInProcessRequestsCollection(), - usersWithPendingRequests: map[string]struct{}{}, - pendingPlansCount: map[string]int{}, - } - - purger.Service = services.NewBasicService(purger.init, purger.loop, purger.stop) - return &purger, nil -} - -// init starts workers, scheduler and then loads in process delete requests -func (p *Purger) init(ctx context.Context) error { - for i := 0; i < p.cfg.NumWorkers; i++ { - p.wg.Add(1) - go p.worker() - } - - p.wg.Add(1) - go p.jobScheduler(ctx) - - return p.loadInprocessDeleteRequests() -} - -func (p *Purger) loop(ctx context.Context) error { - loadRequests := func() { - status := statusSuccess - - err := p.pullDeleteRequestsToPlanDeletes() - if err != nil { - status = statusFail - level.Error(util_log.Logger).Log("msg", "error pulling delete requests for building plans", "err", err) - } - - p.metrics.loadPendingRequestsAttempsTotal.WithLabelValues(status).Inc() - } - - // load requests on startup instead of waiting for first ticker - loadRequests() - - loadRequestsTicker := time.NewTicker(loadRequestsInterval) - defer loadRequestsTicker.Stop() - - retryFailedRequestsTicker := time.NewTicker(retryFailedRequestsInterval) - defer retryFailedRequestsTicker.Stop() - - for { - select { - case <-loadRequestsTicker.C: - loadRequests() - case <-p.pullNewRequestsChan: - loadRequests() - case <-retryFailedRequestsTicker.C: - p.retryFailedRequests() - case <-ctx.Done(): - return nil - } - } -} - -// Stop waits until all background tasks stop. -func (p *Purger) stop(_ error) error { - p.wg.Wait() - return nil -} - -func (p *Purger) retryFailedRequests() { - userIDsWithFailedRequest := p.inProcessRequests.listUsersWithFailedRequest() - - for _, userID := range userIDsWithFailedRequest { - deleteRequest := p.inProcessRequests.get(userID) - if deleteRequest == nil { - level.Error(util_log.Logger).Log("msg", "expected an in-process delete request", "user", userID) - continue - } - - p.inProcessRequests.unsetFailedRequestForUser(userID) - err := p.resumeStalledRequest(*deleteRequest) - if err != nil { - reqWithLogger := makeDeleteRequestWithLogger(*deleteRequest, util_log.Logger) - level.Error(reqWithLogger.logger).Log("msg", "failed to resume failed request", "err", err) - } - } -} - -func (p *Purger) workerJobCleanup(job workerJob) { - err := p.removeDeletePlan(context.Background(), job.userID, job.deleteRequestID, job.planNo) - if err != nil { - level.Error(job.logger).Log("msg", "error removing delete plan", - "plan_no", job.planNo, "err", err) - return - } - - p.pendingPlansCountMtx.Lock() - p.pendingPlansCount[job.deleteRequestID]-- - - if p.pendingPlansCount[job.deleteRequestID] == 0 { - level.Info(job.logger).Log("msg", "finished execution of all plans, cleaning up and updating status of request") - - err := p.deleteStore.UpdateStatus(context.Background(), job.userID, job.deleteRequestID, StatusProcessed) - if err != nil { - level.Error(job.logger).Log("msg", "error updating delete request status to process", "err", err) - } - - p.metrics.deleteRequestsProcessedTotal.WithLabelValues(job.userID).Inc() - delete(p.pendingPlansCount, job.deleteRequestID) - p.pendingPlansCountMtx.Unlock() - - p.inProcessRequests.remove(job.userID) - - // request loading of more delete request if - // - user has more pending requests and - // - we do not have a pending request to load more requests - p.usersWithPendingRequestsMtx.Lock() - defer p.usersWithPendingRequestsMtx.Unlock() - if _, ok := p.usersWithPendingRequests[job.userID]; ok { - delete(p.usersWithPendingRequests, job.userID) - select { - case p.pullNewRequestsChan <- struct{}{}: - // sent - default: - // already sent - } - } else if len(p.usersWithPendingRequests) == 0 { - // there are no pending requests from any of the users, set the oldest pending request and number of pending requests to 0 - p.metrics.oldestPendingDeleteRequestAgeSeconds.Set(0) - p.metrics.pendingDeleteRequestsCount.Set(0) - } - } else { - p.pendingPlansCountMtx.Unlock() - } -} - -// we send all the delete plans to workerJobChan -func (p *Purger) jobScheduler(ctx context.Context) { - defer p.wg.Done() - - for { - select { - case req := <-p.executePlansChan: - numPlans := numPlans(req.StartTime, req.EndTime) - level.Info(req.logger).Log("msg", "sending jobs to workers for purging data", "num_jobs", numPlans) - - p.pendingPlansCountMtx.Lock() - p.pendingPlansCount[req.RequestID] = numPlans - p.pendingPlansCountMtx.Unlock() - - for i := 0; i < numPlans; i++ { - p.workerJobChan <- workerJob{planNo: i, userID: req.UserID, - deleteRequestID: req.RequestID, logger: req.logger} - } - case <-ctx.Done(): - close(p.workerJobChan) - return - } - } -} - -func (p *Purger) worker() { - defer p.wg.Done() - - for job := range p.workerJobChan { - err := p.executePlan(job.userID, job.deleteRequestID, job.planNo, job.logger) - if err != nil { - p.metrics.deleteRequestsProcessingFailures.WithLabelValues(job.userID).Inc() - level.Error(job.logger).Log("msg", "error executing delete plan", - "plan_no", job.planNo, "err", err) - continue - } - - p.workerJobCleanup(job) - } -} - -func (p *Purger) executePlan(userID, requestID string, planNo int, logger log.Logger) (err error) { - logger = log.With(logger, "plan_no", planNo) - - defer func() { - if err != nil { - p.inProcessRequests.setFailedRequestForUser(userID) - } - }() - - plan, err := p.getDeletePlan(context.Background(), userID, requestID, planNo) - if err != nil { - if err == chunk.ErrStorageObjectNotFound { - level.Info(logger).Log("msg", "plan not found, must have been executed already") - // this means plan was already executed and got removed. Do nothing. - return nil - } - return err - } - - level.Info(logger).Log("msg", "executing plan") - - ctx := user.InjectOrgID(context.Background(), userID) - - for i := range plan.ChunksGroup { - level.Debug(logger).Log("msg", "deleting chunks", "labels", plan.ChunksGroup[i].Labels) - - for _, chunkDetails := range plan.ChunksGroup[i].Chunks { - chunkRef, err := chunk.ParseExternalKey(userID, chunkDetails.ID) - if err != nil { - return err - } - - var partiallyDeletedInterval *model.Interval = nil - if chunkDetails.PartiallyDeletedInterval != nil { - partiallyDeletedInterval = &model.Interval{ - Start: model.Time(chunkDetails.PartiallyDeletedInterval.StartTimestampMs), - End: model.Time(chunkDetails.PartiallyDeletedInterval.EndTimestampMs), - } - } - - err = p.chunkStore.DeleteChunk(ctx, chunkRef.From, chunkRef.Through, chunkRef.UserID, - chunkDetails.ID, cortexpb.FromLabelAdaptersToLabels(plan.ChunksGroup[i].Labels), partiallyDeletedInterval) - if err != nil { - if isMissingChunkErr(err) { - level.Error(logger).Log("msg", "chunk not found for deletion. We may have already deleted it", - "chunk_id", chunkDetails.ID) - continue - } - return err - } - } - - level.Debug(logger).Log("msg", "deleting series", "labels", plan.ChunksGroup[i].Labels) - - // this is mostly required to clean up series ids from series store - err := p.chunkStore.DeleteSeriesIDs(ctx, model.Time(plan.PlanInterval.StartTimestampMs), model.Time(plan.PlanInterval.EndTimestampMs), - userID, cortexpb.FromLabelAdaptersToLabels(plan.ChunksGroup[i].Labels)) - if err != nil { - return err - } - } - - level.Info(logger).Log("msg", "finished execution of plan") - - return -} - -// we need to load all in process delete requests on startup to finish them first -func (p *Purger) loadInprocessDeleteRequests() error { - inprocessRequests, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusBuildingPlan) - if err != nil { - return err - } - - requestsWithDeletingStatus, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusDeleting) - if err != nil { - return err - } - - inprocessRequests = append(inprocessRequests, requestsWithDeletingStatus...) - - for i := range inprocessRequests { - deleteRequest := inprocessRequests[i] - p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) - req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - - level.Info(req.logger).Log("msg", "resuming in process delete requests", "status", deleteRequest.Status) - err = p.resumeStalledRequest(deleteRequest) - if err != nil { - level.Error(req.logger).Log("msg", "failed to resume stalled request", "err", err) - } - - } - - return nil -} - -func (p *Purger) resumeStalledRequest(deleteRequest DeleteRequest) error { - req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - - if deleteRequest.Status == StatusBuildingPlan { - err := p.buildDeletePlan(req) - if err != nil { - p.metrics.deleteRequestsProcessingFailures.WithLabelValues(deleteRequest.UserID).Inc() - return errors.Wrap(err, "failed to build delete plan") - } - - deleteRequest.Status = StatusDeleting - } - - if deleteRequest.Status == StatusDeleting { - level.Info(req.logger).Log("msg", "sending delete request for execution") - p.executePlansChan <- req - } - - return nil -} - -// pullDeleteRequestsToPlanDeletes pulls delete requests which do not have their delete plans built yet and sends them for building delete plans -// after pulling delete requests for building plans, it updates its status to StatusBuildingPlan status to avoid picking this up again next time -func (p *Purger) pullDeleteRequestsToPlanDeletes() error { - deleteRequests, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived) - if err != nil { - return err - } - - pendingDeleteRequestsCount := p.inProcessRequests.len() - now := model.Now() - oldestPendingRequestCreatedAt := model.Time(0) - - // requests which are still being processed are also considered pending - if pendingDeleteRequestsCount != 0 { - oldestInProcessRequest := p.inProcessRequests.getOldest() - if oldestInProcessRequest != nil { - oldestPendingRequestCreatedAt = oldestInProcessRequest.CreatedAt - } - } - - for i := range deleteRequests { - deleteRequest := deleteRequests[i] - - // adding an extra minute here to avoid a race between cancellation of request and picking of the request for processing - if deleteRequest.CreatedAt.Add(p.cfg.DeleteRequestCancelPeriod).Add(time.Minute).After(model.Now()) { - continue - } - - pendingDeleteRequestsCount++ - if oldestPendingRequestCreatedAt == 0 || deleteRequest.CreatedAt.Before(oldestPendingRequestCreatedAt) { - oldestPendingRequestCreatedAt = deleteRequest.CreatedAt - } - - if inprocessDeleteRequest := p.inProcessRequests.get(deleteRequest.UserID); inprocessDeleteRequest != nil { - p.usersWithPendingRequestsMtx.Lock() - p.usersWithPendingRequests[deleteRequest.UserID] = struct{}{} - p.usersWithPendingRequestsMtx.Unlock() - - level.Debug(util_log.Logger).Log("msg", "skipping delete request processing for now since another request from same user is already in process", - "inprocess_request_id", inprocessDeleteRequest.RequestID, - "skipped_request_id", deleteRequest.RequestID, "user_id", deleteRequest.UserID) - continue - } - - err = p.deleteStore.UpdateStatus(context.Background(), deleteRequest.UserID, deleteRequest.RequestID, StatusBuildingPlan) - if err != nil { - return err - } - - deleteRequest.Status = StatusBuildingPlan - p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) - req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - - level.Info(req.logger).Log("msg", "building plan for a new delete request") - - err := p.buildDeletePlan(req) - if err != nil { - p.metrics.deleteRequestsProcessingFailures.WithLabelValues(deleteRequest.UserID).Inc() - - // We do not want to remove this delete request from inProcessRequests to make sure - // we do not move multiple deleting requests in deletion process. - // None of the other delete requests from the user would be considered for processing until then. - level.Error(req.logger).Log("msg", "error building delete plan", "err", err) - return err - } - - level.Info(req.logger).Log("msg", "sending delete request for execution") - p.executePlansChan <- req - } - - // track age of oldest delete request since they are over their cancellation period - oldestPendingRequestAge := time.Duration(0) - if oldestPendingRequestCreatedAt != 0 { - oldestPendingRequestAge = now.Sub(oldestPendingRequestCreatedAt.Add(p.cfg.DeleteRequestCancelPeriod)) - } - p.metrics.oldestPendingDeleteRequestAgeSeconds.Set(float64(oldestPendingRequestAge / time.Second)) - p.metrics.pendingDeleteRequestsCount.Set(float64(pendingDeleteRequestsCount)) - - return nil -} - -// buildDeletePlan builds per day delete plan for given delete requests. -// A days plan will include chunk ids and labels of all the chunks which are supposed to be deleted. -// Chunks are grouped together by labels to avoid storing labels repetitively. -// After building delete plans it updates status of delete request to StatusDeleting and sends it for execution -func (p *Purger) buildDeletePlan(req deleteRequestWithLogger) (err error) { - ctx := context.Background() - ctx = user.InjectOrgID(ctx, req.UserID) - - defer func() { - if err != nil { - p.inProcessRequests.setFailedRequestForUser(req.UserID) - } else { - req.Status = StatusDeleting - p.inProcessRequests.set(req.UserID, &req.DeleteRequest) - } - }() - - perDayTimeRange := splitByDay(req.StartTime, req.EndTime) - level.Info(req.logger).Log("msg", "building delete plan", "num_plans", len(perDayTimeRange)) - - plans := make([][]byte, len(perDayTimeRange)) - includedChunkIDs := map[string]struct{}{} - - for i, planRange := range perDayTimeRange { - chunksGroups := []ChunksGroup{} - - for _, selector := range req.Selectors { - matchers, err := parser.ParseMetricSelector(selector) - if err != nil { - return err - } - - chunks, err := p.chunkStore.Get(ctx, req.UserID, planRange.Start, planRange.End, matchers...) - if err != nil { - return err - } - - var cg []ChunksGroup - cg, includedChunkIDs = groupChunks(chunks, req.StartTime, req.EndTime, includedChunkIDs) - - if len(cg) != 0 { - chunksGroups = append(chunksGroups, cg...) - } - } - - plan := DeletePlan{ - PlanInterval: &Interval{ - StartTimestampMs: int64(planRange.Start), - EndTimestampMs: int64(planRange.End), - }, - ChunksGroup: chunksGroups, - } - - pb, err := proto.Marshal(&plan) - if err != nil { - return err - } - - plans[i] = pb - } - - err = p.putDeletePlans(ctx, req.UserID, req.RequestID, plans) - if err != nil { - return - } - - err = p.deleteStore.UpdateStatus(ctx, req.UserID, req.RequestID, StatusDeleting) - if err != nil { - return - } - - p.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(req.UserID).Add(float64(len(includedChunkIDs))) - - level.Info(req.logger).Log("msg", "built delete plans", "num_plans", len(perDayTimeRange)) - - return -} - -func (p *Purger) putDeletePlans(ctx context.Context, userID, requestID string, plans [][]byte) error { - for i, plan := range plans { - objectKey := buildObjectKeyForPlan(userID, requestID, i) - - err := p.objectClient.PutObject(ctx, objectKey, bytes.NewReader(plan)) - if err != nil { - return err - } - } - - return nil -} - -func (p *Purger) getDeletePlan(ctx context.Context, userID, requestID string, planNo int) (*DeletePlan, error) { - objectKey := buildObjectKeyForPlan(userID, requestID, planNo) - - readCloser, err := p.objectClient.GetObject(ctx, objectKey) - if err != nil { - return nil, err - } - - defer readCloser.Close() - - buf, err := ioutil.ReadAll(readCloser) - if err != nil { - return nil, err - } - - var plan DeletePlan - err = proto.Unmarshal(buf, &plan) - if err != nil { - return nil, err - } - - return &plan, nil -} - -func (p *Purger) removeDeletePlan(ctx context.Context, userID, requestID string, planNo int) error { - objectKey := buildObjectKeyForPlan(userID, requestID, planNo) - return p.objectClient.DeleteObject(ctx, objectKey) -} - -// returns interval per plan -func splitByDay(start, end model.Time) []model.Interval { - numOfDays := numPlans(start, end) - - perDayTimeRange := make([]model.Interval, numOfDays) - startOfNextDay := model.Time(((int64(start) / millisecondPerDay) + 1) * millisecondPerDay) - perDayTimeRange[0] = model.Interval{Start: start, End: startOfNextDay - 1} - - for i := 1; i < numOfDays; i++ { - interval := model.Interval{Start: startOfNextDay} - startOfNextDay += model.Time(millisecondPerDay) - interval.End = startOfNextDay - 1 - perDayTimeRange[i] = interval - } - - perDayTimeRange[numOfDays-1].End = end - - return perDayTimeRange -} - -func numPlans(start, end model.Time) int { - // rounding down start to start of the day - if start%model.Time(millisecondPerDay) != 0 { - start = model.Time((int64(start) / millisecondPerDay) * millisecondPerDay) - } - - // rounding up end to end of the day - if end%model.Time(millisecondPerDay) != 0 { - end = model.Time((int64(end)/millisecondPerDay)*millisecondPerDay + millisecondPerDay) - } - - return int(int64(end-start) / millisecondPerDay) -} - -// groups chunks together by unique label sets i.e all the chunks with same labels would be stored in a group -// chunk details are stored in groups for each unique label set to avoid storing them repetitively for each chunk -func groupChunks(chunks []chunk.Chunk, deleteFrom, deleteThrough model.Time, includedChunkIDs map[string]struct{}) ([]ChunksGroup, map[string]struct{}) { - metricToChunks := make(map[string]ChunksGroup) - - for _, chk := range chunks { - chunkID := chk.ExternalKey() - - if _, ok := includedChunkIDs[chunkID]; ok { - continue - } - // chunk.Metric are assumed to be sorted which should give same value from String() for same series. - // If they stop being sorted then in the worst case we would lose the benefit of grouping chunks to avoid storing labels repetitively. - metricString := chk.Metric.String() - group, ok := metricToChunks[metricString] - if !ok { - group = ChunksGroup{Labels: cortexpb.FromLabelsToLabelAdapters(chk.Metric)} - } - - chunkDetails := ChunkDetails{ID: chunkID} - - if deleteFrom > chk.From || deleteThrough < chk.Through { - partiallyDeletedInterval := Interval{StartTimestampMs: int64(chk.From), EndTimestampMs: int64(chk.Through)} - - if deleteFrom > chk.From { - partiallyDeletedInterval.StartTimestampMs = int64(deleteFrom) - } - - if deleteThrough < chk.Through { - partiallyDeletedInterval.EndTimestampMs = int64(deleteThrough) - } - chunkDetails.PartiallyDeletedInterval = &partiallyDeletedInterval - } - - group.Chunks = append(group.Chunks, chunkDetails) - includedChunkIDs[chunkID] = struct{}{} - metricToChunks[metricString] = group - } - - chunksGroups := make([]ChunksGroup, 0, len(metricToChunks)) - - for _, group := range metricToChunks { - chunksGroups = append(chunksGroups, group) - } - - return chunksGroups, includedChunkIDs -} - -func isMissingChunkErr(err error) bool { - if err == chunk.ErrStorageObjectNotFound { - return true - } - if promqlStorageErr, ok := err.(promql.ErrStorage); ok && promqlStorageErr.Err == chunk.ErrStorageObjectNotFound { - return true - } - - return false -} - -func buildObjectKeyForPlan(userID, requestID string, planNo int) string { - return fmt.Sprintf("%s:%s/%d", userID, requestID, planNo) -} - -func makeDeleteRequestWithLogger(deleteRequest DeleteRequest, l log.Logger) deleteRequestWithLogger { - logger := log.With(l, "user_id", deleteRequest.UserID, "request_id", deleteRequest.RequestID) - return deleteRequestWithLogger{deleteRequest, logger} -} - -// inProcessRequestsCollection stores DeleteRequests which are in process by each user. -// Currently we only allow processing of one delete request per user so it stores single DeleteRequest per user. -type inProcessRequestsCollection struct { - requests map[string]*DeleteRequest - usersWithFailedRequests map[string]struct{} - mtx sync.RWMutex -} - -func newInProcessRequestsCollection() *inProcessRequestsCollection { - return &inProcessRequestsCollection{ - requests: map[string]*DeleteRequest{}, - usersWithFailedRequests: map[string]struct{}{}, - } -} - -func (i *inProcessRequestsCollection) set(userID string, request *DeleteRequest) { - i.mtx.Lock() - defer i.mtx.Unlock() - - i.requests[userID] = request -} - -func (i *inProcessRequestsCollection) get(userID string) *DeleteRequest { - i.mtx.RLock() - defer i.mtx.RUnlock() - - return i.requests[userID] -} - -func (i *inProcessRequestsCollection) remove(userID string) { - i.mtx.Lock() - defer i.mtx.Unlock() - - delete(i.requests, userID) -} - -func (i *inProcessRequestsCollection) len() int { - i.mtx.RLock() - defer i.mtx.RUnlock() - - return len(i.requests) -} - -func (i *inProcessRequestsCollection) getOldest() *DeleteRequest { - i.mtx.RLock() - defer i.mtx.RUnlock() - - var oldestRequest *DeleteRequest - for _, request := range i.requests { - if oldestRequest == nil || request.CreatedAt.Before(oldestRequest.CreatedAt) { - oldestRequest = request - } - } - - return oldestRequest -} - -func (i *inProcessRequestsCollection) setFailedRequestForUser(userID string) { - i.mtx.Lock() - defer i.mtx.Unlock() - - i.usersWithFailedRequests[userID] = struct{}{} -} - -func (i *inProcessRequestsCollection) unsetFailedRequestForUser(userID string) { - i.mtx.Lock() - defer i.mtx.Unlock() - - delete(i.usersWithFailedRequests, userID) -} - -func (i *inProcessRequestsCollection) listUsersWithFailedRequest() []string { - i.mtx.RLock() - defer i.mtx.RUnlock() - - userIDs := make([]string, 0, len(i.usersWithFailedRequests)) - for userID := range i.usersWithFailedRequests { - userIDs = append(userIDs, userID) - } - - return userIDs -} diff --git a/internal/cortex/chunk/purger/purger_test.go b/internal/cortex/chunk/purger/purger_test.go deleted file mode 100644 index b71736a5a6..0000000000 --- a/internal/cortex/chunk/purger/purger_test.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "context" - "fmt" - "sort" - "strings" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql/parser" - "github.com/stretchr/testify/require" - - "github.com/thanos-io/thanos/internal/cortex/chunk" - "github.com/thanos-io/thanos/internal/cortex/chunk/testutils" - "github.com/thanos-io/thanos/internal/cortex/util/flagext" - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" - "github.com/thanos-io/thanos/internal/cortex/util/services" - "github.com/thanos-io/thanos/internal/cortex/util/test" -) - -const ( - userID = "userID" - modelTimeDay = model.Time(millisecondPerDay) - modelTimeHour = model.Time(time.Hour / time.Millisecond) -) - -func setupTestDeleteStore(t *testing.T) *DeleteStore { - var ( - deleteStoreConfig DeleteStoreConfig - tbmConfig chunk.TableManagerConfig - schemaCfg = chunk.DefaultSchemaConfig("", "v10", 0) - ) - flagext.DefaultValues(&deleteStoreConfig) - flagext.DefaultValues(&tbmConfig) - - mockStorage := chunk.NewMockStorage() - - extraTables := []chunk.ExtraTables{{TableClient: mockStorage, Tables: deleteStoreConfig.GetTables()}} - tableManager, err := chunk.NewTableManager(tbmConfig, schemaCfg, 12*time.Hour, mockStorage, nil, extraTables, nil) - require.NoError(t, err) - - require.NoError(t, tableManager.SyncTables(context.Background())) - - deleteStore, err := NewDeleteStore(deleteStoreConfig, mockStorage) - require.NoError(t, err) - - return deleteStore -} - -func setupStoresAndPurger(t *testing.T) (*DeleteStore, chunk.Store, chunk.ObjectClient, *Purger, *prometheus.Registry) { - deleteStore := setupTestDeleteStore(t) - - chunkStore, err := testutils.SetupTestChunkStore() - require.NoError(t, err) - - storageClient, err := testutils.SetupTestObjectStore() - require.NoError(t, err) - - purger, registry := setupPurger(t, deleteStore, chunkStore, storageClient) - - return deleteStore, chunkStore, storageClient, purger, registry -} - -func setupPurger(t *testing.T, deleteStore *DeleteStore, chunkStore chunk.Store, storageClient chunk.ObjectClient) (*Purger, *prometheus.Registry) { - registry := prometheus.NewRegistry() - - var cfg Config - flagext.DefaultValues(&cfg) - - purger, err := NewPurger(cfg, deleteStore, chunkStore, storageClient, registry) - require.NoError(t, err) - - return purger, registry -} - -func buildChunks(from, through model.Time, batchSize int) ([]chunk.Chunk, error) { - var chunks []chunk.Chunk - for ; from < through; from = from.Add(time.Hour) { - // creating batchSize number of chunks chunks per hour - _, testChunks, err := testutils.CreateChunks(0, batchSize, from, from.Add(time.Hour)) - if err != nil { - return nil, err - } - - chunks = append(chunks, testChunks...) - } - - return chunks, nil -} - -var purgePlanTestCases = []struct { - name string - chunkStoreDataInterval model.Interval - deleteRequestInterval model.Interval - expectedNumberOfPlans int - numChunksToDelete int - firstChunkPartialDeletionInterval *Interval - lastChunkPartialDeletionInterval *Interval - batchSize int -}{ - { - name: "deleting whole hour from a one hour data", - chunkStoreDataInterval: model.Interval{End: modelTimeHour}, - deleteRequestInterval: model.Interval{End: modelTimeHour}, - expectedNumberOfPlans: 1, - numChunksToDelete: 1, - }, - { - name: "deleting half a day from a days data", - chunkStoreDataInterval: model.Interval{End: modelTimeDay}, - deleteRequestInterval: model.Interval{End: model.Time(millisecondPerDay / 2)}, - expectedNumberOfPlans: 1, - numChunksToDelete: 12 + 1, // one chunk for each hour + end time touches chunk at boundary - lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(millisecondPerDay / 2), - EndTimestampMs: int64(millisecondPerDay / 2)}, - }, - { - name: "deleting a full day from 2 days data", - chunkStoreDataInterval: model.Interval{End: modelTimeDay * 2}, - deleteRequestInterval: model.Interval{End: modelTimeDay}, - expectedNumberOfPlans: 1, - numChunksToDelete: 24 + 1, // one chunk for each hour + end time touches chunk at boundary - lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: millisecondPerDay, - EndTimestampMs: millisecondPerDay}, - }, - { - name: "deleting 2 days partially from 2 days data", - chunkStoreDataInterval: model.Interval{End: modelTimeDay * 2}, - deleteRequestInterval: model.Interval{Start: model.Time(millisecondPerDay / 2), - End: model.Time(millisecondPerDay + millisecondPerDay/2)}, - expectedNumberOfPlans: 2, - numChunksToDelete: 24 + 2, // one chunk for each hour + start and end time touches chunk at boundary - firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(millisecondPerDay / 2), - EndTimestampMs: int64(millisecondPerDay / 2)}, - lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: millisecondPerDay + millisecondPerDay/2, - EndTimestampMs: millisecondPerDay + millisecondPerDay/2}, - }, - { - name: "deleting 2 days partially, not aligned with hour, from 2 days data", - chunkStoreDataInterval: model.Interval{End: modelTimeDay * 2}, - deleteRequestInterval: model.Interval{Start: model.Time(millisecondPerDay / 2).Add(time.Minute), - End: model.Time(millisecondPerDay + millisecondPerDay/2).Add(-time.Minute)}, - expectedNumberOfPlans: 2, - numChunksToDelete: 24, // one chunk for each hour, no chunks touched at boundary - firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(model.Time(millisecondPerDay / 2).Add(time.Minute)), - EndTimestampMs: int64(model.Time(millisecondPerDay / 2).Add(time.Hour))}, - lastChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(model.Time(millisecondPerDay + millisecondPerDay/2).Add(-time.Hour)), - EndTimestampMs: int64(model.Time(millisecondPerDay + millisecondPerDay/2).Add(-time.Minute))}, - }, - { - name: "deleting data outside of period of existing data", - chunkStoreDataInterval: model.Interval{End: modelTimeDay}, - deleteRequestInterval: model.Interval{Start: model.Time(millisecondPerDay * 2), End: model.Time(millisecondPerDay * 3)}, - expectedNumberOfPlans: 1, - numChunksToDelete: 0, - }, - { - name: "building multi-day chunk and deleting part of it from first day", - chunkStoreDataInterval: model.Interval{Start: modelTimeDay.Add(-30 * time.Minute), End: modelTimeDay.Add(30 * time.Minute)}, - deleteRequestInterval: model.Interval{Start: modelTimeDay.Add(-30 * time.Minute), End: modelTimeDay.Add(-15 * time.Minute)}, - expectedNumberOfPlans: 1, - numChunksToDelete: 1, - firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(modelTimeDay.Add(-30 * time.Minute)), - EndTimestampMs: int64(modelTimeDay.Add(-15 * time.Minute))}, - }, - { - name: "building multi-day chunk and deleting part of it for each day", - chunkStoreDataInterval: model.Interval{Start: modelTimeDay.Add(-30 * time.Minute), End: modelTimeDay.Add(30 * time.Minute)}, - deleteRequestInterval: model.Interval{Start: modelTimeDay.Add(-15 * time.Minute), End: modelTimeDay.Add(15 * time.Minute)}, - expectedNumberOfPlans: 2, - numChunksToDelete: 1, - firstChunkPartialDeletionInterval: &Interval{StartTimestampMs: int64(modelTimeDay.Add(-15 * time.Minute)), - EndTimestampMs: int64(modelTimeDay.Add(15 * time.Minute))}, - }, -} - -func TestPurger_BuildPlan(t *testing.T) { - for _, tc := range purgePlanTestCases { - for batchSize := 1; batchSize <= 5; batchSize++ { - t.Run(fmt.Sprintf("%s/batch-size=%d", tc.name, batchSize), func(t *testing.T) { - deleteStore, chunkStore, storageClient, purger, _ := setupStoresAndPurger(t) - defer func() { - purger.StopAsync() - chunkStore.Stop() - }() - - chunks, err := buildChunks(tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, batchSize) - require.NoError(t, err) - - require.NoError(t, chunkStore.Put(context.Background(), chunks)) - - err = deleteStore.AddDeleteRequest(context.Background(), userID, tc.deleteRequestInterval.Start, - tc.deleteRequestInterval.End, []string{"foo"}) - require.NoError(t, err) - - deleteRequests, err := deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID) - require.NoError(t, err) - - deleteRequest := deleteRequests[0] - requestWithLogger := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - - err = purger.buildDeletePlan(requestWithLogger) - require.NoError(t, err) - planPath := fmt.Sprintf("%s:%s/", userID, deleteRequest.RequestID) - - plans, _, err := storageClient.List(context.Background(), planPath, "/") - require.NoError(t, err) - require.Equal(t, tc.expectedNumberOfPlans, len(plans)) - - numPlans := tc.expectedNumberOfPlans - var nilPurgePlanInterval *Interval - numChunks := 0 - - chunkIDs := map[string]struct{}{} - - for i := range plans { - deletePlan, err := purger.getDeletePlan(context.Background(), userID, deleteRequest.RequestID, i) - require.NoError(t, err) - for _, chunksGroup := range deletePlan.ChunksGroup { - numChunksInGroup := len(chunksGroup.Chunks) - chunks := chunksGroup.Chunks - numChunks += numChunksInGroup - - sort.Slice(chunks, func(i, j int) bool { - chunkI, err := chunk.ParseExternalKey(userID, chunks[i].ID) - require.NoError(t, err) - - chunkJ, err := chunk.ParseExternalKey(userID, chunks[j].ID) - require.NoError(t, err) - - return chunkI.From < chunkJ.From - }) - - for j, chunkDetails := range chunksGroup.Chunks { - chunkIDs[chunkDetails.ID] = struct{}{} - if i == 0 && j == 0 && tc.firstChunkPartialDeletionInterval != nil { - require.Equal(t, *tc.firstChunkPartialDeletionInterval, *chunkDetails.PartiallyDeletedInterval) - } else if i == numPlans-1 && j == numChunksInGroup-1 && tc.lastChunkPartialDeletionInterval != nil { - require.Equal(t, *tc.lastChunkPartialDeletionInterval, *chunkDetails.PartiallyDeletedInterval) - } else { - require.Equal(t, nilPurgePlanInterval, chunkDetails.PartiallyDeletedInterval) - } - } - } - } - - require.Equal(t, tc.numChunksToDelete*batchSize, len(chunkIDs)) - require.Equal(t, float64(tc.numChunksToDelete*batchSize), testutil.ToFloat64(purger.metrics.deleteRequestsChunksSelectedTotal)) - }) - } - } -} - -func TestPurger_ExecutePlan(t *testing.T) { - fooMetricNameMatcher, err := parser.ParseMetricSelector(`foo`) - if err != nil { - t.Fatal(err) - } - - for _, tc := range purgePlanTestCases { - for batchSize := 1; batchSize <= 5; batchSize++ { - t.Run(fmt.Sprintf("%s/batch-size=%d", tc.name, batchSize), func(t *testing.T) { - deleteStore, chunkStore, _, purger, _ := setupStoresAndPurger(t) - defer func() { - purger.StopAsync() - chunkStore.Stop() - }() - - chunks, err := buildChunks(tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, batchSize) - require.NoError(t, err) - - require.NoError(t, chunkStore.Put(context.Background(), chunks)) - - // calculate the expected number of chunks that should be there in store before deletion - chunkStoreDataIntervalTotal := tc.chunkStoreDataInterval.End - tc.chunkStoreDataInterval.Start - numChunksExpected := int(chunkStoreDataIntervalTotal / model.Time(time.Hour/time.Millisecond)) - - // see if store actually has expected number of chunks - chunks, err = chunkStore.Get(context.Background(), userID, tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, fooMetricNameMatcher...) - require.NoError(t, err) - require.Equal(t, numChunksExpected*batchSize, len(chunks)) - - // delete chunks - err = deleteStore.AddDeleteRequest(context.Background(), userID, tc.deleteRequestInterval.Start, - tc.deleteRequestInterval.End, []string{"foo"}) - require.NoError(t, err) - - // get the delete request - deleteRequests, err := deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID) - require.NoError(t, err) - - deleteRequest := deleteRequests[0] - requestWithLogger := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - err = purger.buildDeletePlan(requestWithLogger) - require.NoError(t, err) - - // execute all the plans - for i := 0; i < tc.expectedNumberOfPlans; i++ { - err := purger.executePlan(userID, deleteRequest.RequestID, i, requestWithLogger.logger) - require.NoError(t, err) - } - - // calculate the expected number of chunks that should be there in store after deletion - numChunksExpectedAfterDeletion := 0 - for chunkStart := tc.chunkStoreDataInterval.Start; chunkStart < tc.chunkStoreDataInterval.End; chunkStart += modelTimeHour { - numChunksExpectedAfterDeletion += len(getNonDeletedIntervals(model.Interval{Start: chunkStart, End: chunkStart + modelTimeHour}, tc.deleteRequestInterval)) - } - - // see if store actually has expected number of chunks - chunks, err = chunkStore.Get(context.Background(), userID, tc.chunkStoreDataInterval.Start, tc.chunkStoreDataInterval.End, fooMetricNameMatcher...) - require.NoError(t, err) - require.Equal(t, numChunksExpectedAfterDeletion*batchSize, len(chunks)) - }) - } - } -} - -func TestPurger_Restarts(t *testing.T) { - fooMetricNameMatcher, err := parser.ParseMetricSelector(`foo`) - if err != nil { - t.Fatal(err) - } - - deleteStore, chunkStore, storageClient, purger, _ := setupStoresAndPurger(t) - defer func() { - chunkStore.Stop() - }() - - chunks, err := buildChunks(0, model.Time(0).Add(10*24*time.Hour), 1) - require.NoError(t, err) - - require.NoError(t, chunkStore.Put(context.Background(), chunks)) - - // delete chunks - err = deleteStore.AddDeleteRequest(context.Background(), userID, model.Time(0).Add(24*time.Hour), - model.Time(0).Add(8*24*time.Hour), []string{"foo"}) - require.NoError(t, err) - - // get the delete request - deleteRequests, err := deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID) - require.NoError(t, err) - - deleteRequest := deleteRequests[0] - requestWithLogger := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - err = purger.buildDeletePlan(requestWithLogger) - require.NoError(t, err) - - // stop the existing purger - require.NoError(t, services.StopAndAwaitTerminated(context.Background(), purger)) - - // create a new purger to check whether it picks up in process delete requests - newPurger, _ := setupPurger(t, deleteStore, chunkStore, storageClient) - - // load in process delete requests by calling Run - require.NoError(t, services.StartAndAwaitRunning(context.Background(), newPurger)) - - defer newPurger.StopAsync() - - test.Poll(t, time.Minute, 0, func() interface{} { - return newPurger.inProcessRequests.len() - }) - - // check whether data got deleted from the store since delete request has been processed - chunks, err = chunkStore.Get(context.Background(), userID, 0, model.Time(0).Add(10*24*time.Hour), fooMetricNameMatcher...) - require.NoError(t, err) - - // we are deleting 7 days out of 10 so there should we 3 days data left in store which means 72 chunks - require.Equal(t, 72, len(chunks)) - - deleteRequests, err = deleteStore.GetAllDeleteRequestsForUser(context.Background(), userID) - require.NoError(t, err) - require.Equal(t, StatusProcessed, deleteRequests[0].Status) - - require.Equal(t, float64(1), testutil.ToFloat64(newPurger.metrics.deleteRequestsProcessedTotal)) - require.PanicsWithError(t, "collected 0 metrics instead of exactly 1", func() { - testutil.ToFloat64(newPurger.metrics.deleteRequestsProcessingFailures) - }) -} - -func TestPurger_Metrics(t *testing.T) { - deleteStore, chunkStore, storageClient, purger, registry := setupStoresAndPurger(t) - defer func() { - purger.StopAsync() - chunkStore.Stop() - }() - - // add delete requests without starting purger loops to load and process delete requests. - // add delete request whose createdAt is now - err := deleteStore.AddDeleteRequest(context.Background(), userID, model.Time(0).Add(24*time.Hour), - model.Time(0).Add(2*24*time.Hour), []string{"foo"}) - require.NoError(t, err) - - // add delete request whose createdAt is 2 days back - err = deleteStore.addDeleteRequest(context.Background(), userID, model.Now().Add(-2*24*time.Hour), model.Time(0).Add(24*time.Hour), - model.Time(0).Add(2*24*time.Hour), []string{"foo"}) - require.NoError(t, err) - - // add delete request whose createdAt is 3 days back - err = deleteStore.addDeleteRequest(context.Background(), userID, model.Now().Add(-3*24*time.Hour), model.Time(0).Add(24*time.Hour), - model.Time(0).Add(8*24*time.Hour), []string{"foo"}) - require.NoError(t, err) - - // load new delete requests for processing - require.NoError(t, purger.pullDeleteRequestsToPlanDeletes()) - - // there must be 2 pending delete requests, oldest being 2 days old since its cancellation time is over - require.InDelta(t, float64(2*86400), testutil.ToFloat64(purger.metrics.oldestPendingDeleteRequestAgeSeconds), 1) - require.Equal(t, float64(2), testutil.ToFloat64(purger.metrics.pendingDeleteRequestsCount)) - - // stop the existing purger - require.NoError(t, services.StopAndAwaitTerminated(context.Background(), purger)) - - // create a new purger - purger, registry = setupPurger(t, deleteStore, chunkStore, storageClient) - - // load in process delete requests by starting the service - require.NoError(t, services.StartAndAwaitRunning(context.Background(), purger)) - - defer purger.StopAsync() - - // wait until purger_delete_requests_processed_total starts to show up. - test.Poll(t, 2*time.Second, 1, func() interface{} { - count, err := testutil.GatherAndCount(registry, "cortex_purger_delete_requests_processed_total") - require.NoError(t, err) - return count - }) - - // wait until both the pending delete requests are processed. - test.Poll(t, 2*time.Second, float64(2), func() interface{} { - return testutil.ToFloat64(purger.metrics.deleteRequestsProcessedTotal) - }) - - // wait until oldest pending request age becomes 0 - test.Poll(t, 2*time.Second, float64(0), func() interface{} { - return testutil.ToFloat64(purger.metrics.oldestPendingDeleteRequestAgeSeconds) - }) - - // wait until pending delete requests count becomes 0 - test.Poll(t, 2*time.Second, float64(0), func() interface{} { - return testutil.ToFloat64(purger.metrics.pendingDeleteRequestsCount) - }) -} - -func TestPurger_retryFailedRequests(t *testing.T) { - // setup chunks store - indexMockStorage := chunk.NewMockStorage() - chunksMockStorage := chunk.NewMockStorage() - - deleteStore := setupTestDeleteStore(t) - chunkStore, err := testutils.SetupTestChunkStoreWithClients(indexMockStorage, chunksMockStorage, indexMockStorage) - require.NoError(t, err) - - // create a purger instance - purgerMockStorage := chunk.NewMockStorage() - purger, _ := setupPurger(t, deleteStore, chunkStore, purgerMockStorage) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), purger)) - - defer func() { - require.NoError(t, services.StopAndAwaitTerminated(context.Background(), purger)) - }() - - // add some chunks - chunks, err := buildChunks(0, model.Time(0).Add(3*24*time.Hour), 1) - require.NoError(t, err) - - require.NoError(t, chunkStore.Put(context.Background(), chunks)) - - // add a request to delete some chunks - err = deleteStore.addDeleteRequest(context.Background(), userID, model.Now().Add(-25*time.Hour), model.Time(0).Add(24*time.Hour), - model.Time(0).Add(2*24*time.Hour), []string{"foo"}) - require.NoError(t, err) - - // change purgerMockStorage to allow only reads. This would fail putting plans to the storage and hence fail build plans operation. - purgerMockStorage.SetMode(chunk.MockStorageModeReadOnly) - - // pull requests to process and ensure that it has failed. - err = purger.pullDeleteRequestsToPlanDeletes() - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "permission denied")) - - // there must be 1 delete request in process and the userID must be in failed requests list. - require.NotNil(t, purger.inProcessRequests.get(userID)) - require.Len(t, purger.inProcessRequests.listUsersWithFailedRequest(), 1) - - // now allow writes to purgerMockStorage to allow building plans to succeed. - purgerMockStorage.SetMode(chunk.MockStorageModeReadWrite) - - // but change mode of chunksMockStorage to read only which would deny permission to delete any chunks and in turn - // fail to execute delete plans. - chunksMockStorage.SetMode(chunk.MockStorageModeReadOnly) - - // retry processing of failed requests - purger.retryFailedRequests() - - // the delete request status should now change to StatusDeleting since the building of plan should have succeeded. - test.Poll(t, time.Second, StatusDeleting, func() interface{} { - return purger.inProcessRequests.get(userID).Status - }) - // the request should have failed again since we did not give permission to delete chunks. - test.Poll(t, time.Second, 1, func() interface{} { - return len(purger.inProcessRequests.listUsersWithFailedRequest()) - }) - - // now allow writes to chunksMockStorage so the requests do not fail anymore. - chunksMockStorage.SetMode(chunk.MockStorageModeReadWrite) - - // retry processing of failed requests. - purger.retryFailedRequests() - // there must be no in process requests anymore. - test.Poll(t, time.Second, true, func() interface{} { - return purger.inProcessRequests.get(userID) == nil - }) - // there must be no users having failed requests. - require.Len(t, purger.inProcessRequests.listUsersWithFailedRequest(), 0) -} - -func getNonDeletedIntervals(originalInterval, deletedInterval model.Interval) []model.Interval { - nonDeletedIntervals := []model.Interval{} - if deletedInterval.Start > originalInterval.Start { - nonDeletedIntervals = append(nonDeletedIntervals, model.Interval{Start: originalInterval.Start, End: deletedInterval.Start - 1}) - } - - if deletedInterval.End < originalInterval.End { - nonDeletedIntervals = append(nonDeletedIntervals, model.Interval{Start: deletedInterval.End + 1, End: originalInterval.End}) - } - - return nonDeletedIntervals -} diff --git a/internal/cortex/chunk/purger/request_handler.go b/internal/cortex/chunk/purger/request_handler.go deleted file mode 100644 index 073d66b84e..0000000000 --- a/internal/cortex/chunk/purger/request_handler.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/go-kit/log/level" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql/parser" - - "github.com/thanos-io/thanos/internal/cortex/tenant" - "github.com/thanos-io/thanos/internal/cortex/util" - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" -) - -type deleteRequestHandlerMetrics struct { - deleteRequestsReceivedTotal *prometheus.CounterVec -} - -func newDeleteRequestHandlerMetrics(r prometheus.Registerer) *deleteRequestHandlerMetrics { - m := deleteRequestHandlerMetrics{} - - m.deleteRequestsReceivedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_received_total", - Help: "Number of delete requests received per user", - }, []string{"user"}) - - return &m -} - -// DeleteRequestHandler provides handlers for delete requests -type DeleteRequestHandler struct { - deleteStore *DeleteStore - metrics *deleteRequestHandlerMetrics - deleteRequestCancelPeriod time.Duration -} - -// NewDeleteRequestHandler creates a DeleteRequestHandler -func NewDeleteRequestHandler(deleteStore *DeleteStore, deleteRequestCancelPeriod time.Duration, registerer prometheus.Registerer) *DeleteRequestHandler { - deleteMgr := DeleteRequestHandler{ - deleteStore: deleteStore, - deleteRequestCancelPeriod: deleteRequestCancelPeriod, - metrics: newDeleteRequestHandlerMetrics(registerer), - } - - return &deleteMgr -} - -// AddDeleteRequestHandler handles addition of new delete request -func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - params := r.URL.Query() - match := params["match[]"] - if len(match) == 0 { - http.Error(w, "selectors not set", http.StatusBadRequest) - return - } - - for i := range match { - _, err := parser.ParseMetricSelector(match[i]) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - - startParam := params.Get("start") - startTime := int64(0) - if startParam != "" { - startTime, err = util.ParseTime(startParam) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - - endParam := params.Get("end") - endTime := int64(model.Now()) - - if endParam != "" { - endTime, err = util.ParseTime(endParam) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - if endTime > int64(model.Now()) { - http.Error(w, "deletes in future not allowed", http.StatusBadRequest) - return - } - } - - if startTime > endTime { - http.Error(w, "start time can't be greater than end time", http.StatusBadRequest) - return - } - - if err := dm.deleteStore.AddDeleteRequest(ctx, userID, model.Time(startTime), model.Time(endTime), match); err != nil { - level.Error(util_log.Logger).Log("msg", "error adding delete request to the store", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - dm.metrics.deleteRequestsReceivedTotal.WithLabelValues(userID).Inc() - w.WriteHeader(http.StatusNoContent) -} - -// GetAllDeleteRequestsHandler handles get all delete requests -func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - deleteRequests, err := dm.deleteStore.GetAllDeleteRequestsForUser(ctx, userID) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error getting delete requests from the store", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if err := json.NewEncoder(w).Encode(deleteRequests); err != nil { - level.Error(util_log.Logger).Log("msg", "error marshalling response", "err", err) - http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError) - } -} - -// CancelDeleteRequestHandler handles delete request cancellation -func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - params := r.URL.Query() - requestID := params.Get("request_id") - - deleteRequest, err := dm.deleteStore.GetDeleteRequest(ctx, userID, requestID) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error getting delete request from the store", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if deleteRequest == nil { - http.Error(w, "could not find delete request with given id", http.StatusBadRequest) - return - } - - if deleteRequest.Status != StatusReceived { - http.Error(w, "deletion of request which is in process or already processed is not allowed", http.StatusBadRequest) - return - } - - if deleteRequest.CreatedAt.Add(dm.deleteRequestCancelPeriod).Before(model.Now()) { - http.Error(w, fmt.Sprintf("deletion of request past the deadline of %s since its creation is not allowed", dm.deleteRequestCancelPeriod.String()), http.StatusBadRequest) - return - } - - if err := dm.deleteStore.RemoveDeleteRequest(ctx, userID, requestID, deleteRequest.CreatedAt, deleteRequest.StartTime, deleteRequest.EndTime); err != nil { - level.Error(util_log.Logger).Log("msg", "error cancelling the delete request", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusNoContent) -} diff --git a/internal/cortex/chunk/purger/table_provisioning.go b/internal/cortex/chunk/purger/table_provisioning.go deleted file mode 100644 index aeb9aef16b..0000000000 --- a/internal/cortex/chunk/purger/table_provisioning.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "flag" - - "github.com/thanos-io/thanos/internal/cortex/chunk" -) - -// TableProvisioningConfig holds config for table throuput and autoscaling. Currently only used by DynamoDB. -type TableProvisioningConfig struct { - chunk.ActiveTableProvisionConfig `yaml:",inline"` - TableTags chunk.Tags `yaml:"tags"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -// Adding a separate RegisterFlags here instead of using it from embedded chunk.ActiveTableProvisionConfig to be able to manage defaults separately. -// Defaults for WriteScale and ReadScale are shared for now to avoid adding further complexity since autoscaling is disabled anyways by default. -func (cfg *TableProvisioningConfig) RegisterFlags(argPrefix string, f *flag.FlagSet) { - // default values ActiveTableProvisionConfig - cfg.ProvisionedWriteThroughput = 1 - cfg.ProvisionedReadThroughput = 300 - cfg.ProvisionedThroughputOnDemandMode = false - - cfg.ActiveTableProvisionConfig.RegisterFlags(argPrefix, f) - f.Var(&cfg.TableTags, argPrefix+".tags", "Tag (of the form key=value) to be added to the tables. Supported by DynamoDB") -} - -func (cfg DeleteStoreConfig) GetTables() []chunk.TableDesc { - return []chunk.TableDesc{cfg.ProvisionConfig.BuildTableDesc(cfg.RequestsTableName, cfg.ProvisionConfig.TableTags)} -} diff --git a/internal/cortex/chunk/purger/tenant_deletion_api.go b/internal/cortex/chunk/purger/tenant_deletion_api.go deleted file mode 100644 index c19244665d..0000000000 --- a/internal/cortex/chunk/purger/tenant_deletion_api.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "context" - "net/http" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/objstore" - - "github.com/thanos-io/thanos/internal/cortex/storage/bucket" - cortex_tsdb "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" - "github.com/thanos-io/thanos/internal/cortex/tenant" - "github.com/thanos-io/thanos/internal/cortex/util" -) - -type TenantDeletionAPI struct { - bucketClient objstore.Bucket - logger log.Logger - cfgProvider bucket.TenantConfigProvider -} - -func NewTenantDeletionAPI(storageCfg cortex_tsdb.BlocksStorageConfig, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) (*TenantDeletionAPI, error) { - bucketClient, err := createBucketClient(storageCfg, logger, reg) - if err != nil { - return nil, err - } - - return newTenantDeletionAPI(bucketClient, cfgProvider, logger), nil -} - -func newTenantDeletionAPI(bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *TenantDeletionAPI { - return &TenantDeletionAPI{ - bucketClient: bkt, - cfgProvider: cfgProvider, - logger: logger, - } -} - -func (api *TenantDeletionAPI) DeleteTenant(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - // When Cortex is running, it uses Auth Middleware for checking X-Scope-OrgID and injecting tenant into context. - // Auth Middleware sends http.StatusUnauthorized if X-Scope-OrgID is missing, so we do too here, for consistency. - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID, api.cfgProvider, cortex_tsdb.NewTenantDeletionMark(time.Now())) - if err != nil { - level.Error(api.logger).Log("msg", "failed to write tenant deletion mark", "user", userID, "err", err) - - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - level.Info(api.logger).Log("msg", "tenant deletion mark in blocks storage created", "user", userID) - - w.WriteHeader(http.StatusOK) -} - -type DeleteTenantStatusResponse struct { - TenantID string `json:"tenant_id"` - BlocksDeleted bool `json:"blocks_deleted"` -} - -func (api *TenantDeletionAPI) DeleteTenantStatus(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - result := DeleteTenantStatusResponse{} - result.TenantID = userID - result.BlocksDeleted, err = api.isBlocksForUserDeleted(ctx, userID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - util.WriteJSONResponse(w, result) -} - -func (api *TenantDeletionAPI) isBlocksForUserDeleted(ctx context.Context, userID string) (bool, error) { - var errBlockFound = errors.New("block found") - - userBucket := bucket.NewUserBucketClient(userID, api.bucketClient, api.cfgProvider) - err := userBucket.Iter(ctx, "", func(s string) error { - s = strings.TrimSuffix(s, "/") - - _, err := ulid.Parse(s) - if err != nil { - // not block, keep looking - return nil - } - - // Used as shortcut to stop iteration. - return errBlockFound - }) - - if errors.Is(err, errBlockFound) { - return false, nil - } - - if err != nil { - return false, err - } - - // No blocks found, all good. - return true, nil -} - -func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { - bucketClient, err := bucket.NewClient(context.Background(), cfg.Bucket, "purger", logger, reg) - if err != nil { - return nil, errors.Wrap(err, "create bucket client") - } - - return bucketClient, nil -} diff --git a/internal/cortex/chunk/purger/tenant_deletion_api_test.go b/internal/cortex/chunk/purger/tenant_deletion_api_test.go deleted file mode 100644 index 1bca835a34..0000000000 --- a/internal/cortex/chunk/purger/tenant_deletion_api_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "bytes" - "context" - "net/http" - "net/http/httptest" - "path" - "testing" - - "github.com/go-kit/log" - "github.com/stretchr/testify/require" - "github.com/thanos-io/objstore" - "github.com/weaveworks/common/user" - - "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" -) - -func TestDeleteTenant(t *testing.T) { - bkt := objstore.NewInMemBucket() - api := newTenantDeletionAPI(bkt, nil, log.NewNopLogger()) - - { - resp := httptest.NewRecorder() - api.DeleteTenant(resp, &http.Request{}) - require.Equal(t, http.StatusUnauthorized, resp.Code) - } - - { - ctx := context.Background() - ctx = user.InjectOrgID(ctx, "fake") - - req := &http.Request{} - resp := httptest.NewRecorder() - api.DeleteTenant(resp, req.WithContext(ctx)) - - require.Equal(t, http.StatusOK, resp.Code) - objs := bkt.Objects() - require.NotNil(t, objs[path.Join("fake", tsdb.TenantDeletionMarkPath)]) - } -} - -func TestDeleteTenantStatus(t *testing.T) { - const username = "user" - - for name, tc := range map[string]struct { - objects map[string][]byte - expectedBlocksDeleted bool - }{ - "empty": { - objects: nil, - expectedBlocksDeleted: true, - }, - - "no user objects": { - objects: map[string][]byte{ - "different-user/01EQK4QKFHVSZYVJ908Y7HH9E0/meta.json": []byte("data"), - }, - expectedBlocksDeleted: true, - }, - - "non-block files": { - objects: map[string][]byte{ - "user/deletion-mark.json": []byte("data"), - }, - expectedBlocksDeleted: true, - }, - - "block files": { - objects: map[string][]byte{ - "user/01EQK4QKFHVSZYVJ908Y7HH9E0/meta.json": []byte("data"), - }, - expectedBlocksDeleted: false, - }, - } { - t.Run(name, func(t *testing.T) { - bkt := objstore.NewInMemBucket() - // "upload" objects - for objName, data := range tc.objects { - require.NoError(t, bkt.Upload(context.Background(), objName, bytes.NewReader(data))) - } - - api := newTenantDeletionAPI(bkt, nil, log.NewNopLogger()) - - res, err := api.isBlocksForUserDeleted(context.Background(), username) - require.NoError(t, err) - require.Equal(t, tc.expectedBlocksDeleted, res) - }) - } -} diff --git a/internal/cortex/chunk/purger/tombstones.go b/internal/cortex/chunk/purger/tombstones.go deleted file mode 100644 index 9cc631901e..0000000000 --- a/internal/cortex/chunk/purger/tombstones.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "context" - "sort" - "strconv" - "sync" - "time" - - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" - - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" -) - -const tombstonesReloadDuration = 5 * time.Minute - -type tombstonesLoaderMetrics struct { - cacheGenLoadFailures prometheus.Counter - deleteRequestsLoadFailures prometheus.Counter -} - -func newtombstonesLoaderMetrics(r prometheus.Registerer) *tombstonesLoaderMetrics { - m := tombstonesLoaderMetrics{} - - m.cacheGenLoadFailures = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "tombstones_loader_cache_gen_load_failures_total", - Help: "Total number of failures while loading cache generation number using tombstones loader", - }) - m.deleteRequestsLoadFailures = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "tombstones_loader_cache_delete_requests_load_failures_total", - Help: "Total number of failures while loading delete requests using tombstones loader", - }) - - return &m -} - -// TombstonesSet holds all the pending delete requests for a user -type TombstonesSet struct { - tombstones []DeleteRequest - oldestTombstoneStart, newestTombstoneEnd model.Time // Used as optimization to find whether we want to iterate over tombstones or not -} - -// Used for easier injection of mocks. -type DeleteStoreAPI interface { - getCacheGenerationNumbers(ctx context.Context, user string) (*cacheGenNumbers, error) - GetPendingDeleteRequestsForUser(ctx context.Context, id string) ([]DeleteRequest, error) -} - -// TombstonesLoader loads delete requests and gen numbers from store and keeps checking for updates. -// It keeps checking for changes in gen numbers, which also means changes in delete requests and reloads specific users delete requests. -type TombstonesLoader struct { - tombstones map[string]*TombstonesSet - tombstonesMtx sync.RWMutex - - cacheGenNumbers map[string]*cacheGenNumbers - cacheGenNumbersMtx sync.RWMutex - - deleteStore DeleteStoreAPI - metrics *tombstonesLoaderMetrics - quit chan struct{} -} - -// NewTombstonesLoader creates a TombstonesLoader -func NewTombstonesLoader(deleteStore DeleteStoreAPI, registerer prometheus.Registerer) *TombstonesLoader { - tl := TombstonesLoader{ - tombstones: map[string]*TombstonesSet{}, - cacheGenNumbers: map[string]*cacheGenNumbers{}, - deleteStore: deleteStore, - metrics: newtombstonesLoaderMetrics(registerer), - } - go tl.loop() - - return &tl -} - -// Stop stops TombstonesLoader -func (tl *TombstonesLoader) Stop() { - close(tl.quit) -} - -func (tl *TombstonesLoader) loop() { - if tl.deleteStore == nil { - return - } - - tombstonesReloadTimer := time.NewTicker(tombstonesReloadDuration) - for { - select { - case <-tombstonesReloadTimer.C: - err := tl.reloadTombstones() - if err != nil { - level.Error(util_log.Logger).Log("msg", "error reloading tombstones", "err", err) - } - case <-tl.quit: - return - } - } -} - -func (tl *TombstonesLoader) reloadTombstones() error { - updatedGenNumbers := make(map[string]*cacheGenNumbers) - tl.cacheGenNumbersMtx.RLock() - - // check for updates in loaded gen numbers - for userID, oldGenNumbers := range tl.cacheGenNumbers { - newGenNumbers, err := tl.deleteStore.getCacheGenerationNumbers(context.Background(), userID) - if err != nil { - tl.cacheGenNumbersMtx.RUnlock() - return err - } - - if *oldGenNumbers != *newGenNumbers { - updatedGenNumbers[userID] = newGenNumbers - } - } - - tl.cacheGenNumbersMtx.RUnlock() - - // in frontend we load only cache gen numbers so short circuit here if there are no loaded deleted requests - // first call to GetPendingTombstones would avoid doing this. - tl.tombstonesMtx.RLock() - if len(tl.tombstones) == 0 { - tl.tombstonesMtx.RUnlock() - return nil - } - tl.tombstonesMtx.RUnlock() - - // for all the updated gen numbers, reload delete requests - for userID, genNumbers := range updatedGenNumbers { - err := tl.loadPendingTombstones(userID) - if err != nil { - return err - } - - tl.cacheGenNumbersMtx.Lock() - tl.cacheGenNumbers[userID] = genNumbers - tl.cacheGenNumbersMtx.Unlock() - } - - return nil -} - -// GetPendingTombstones returns all pending tombstones -func (tl *TombstonesLoader) GetPendingTombstones(userID string) (*TombstonesSet, error) { - tl.tombstonesMtx.RLock() - - tombstoneSet, isOK := tl.tombstones[userID] - if isOK { - tl.tombstonesMtx.RUnlock() - return tombstoneSet, nil - } - - tl.tombstonesMtx.RUnlock() - err := tl.loadPendingTombstones(userID) - if err != nil { - return nil, err - } - - tl.tombstonesMtx.RLock() - defer tl.tombstonesMtx.RUnlock() - - return tl.tombstones[userID], nil -} - -// GetPendingTombstones returns all pending tombstones -func (tl *TombstonesLoader) GetPendingTombstonesForInterval(userID string, from, to model.Time) (*TombstonesSet, error) { - allTombstones, err := tl.GetPendingTombstones(userID) - if err != nil { - return nil, err - } - - if !allTombstones.HasTombstonesForInterval(from, to) { - return &TombstonesSet{}, nil - } - - filteredSet := TombstonesSet{oldestTombstoneStart: model.Now()} - - for _, tombstone := range allTombstones.tombstones { - if !intervalsOverlap(model.Interval{Start: from, End: to}, model.Interval{Start: tombstone.StartTime, End: tombstone.EndTime}) { - continue - } - - filteredSet.tombstones = append(filteredSet.tombstones, tombstone) - - if tombstone.StartTime < filteredSet.oldestTombstoneStart { - filteredSet.oldestTombstoneStart = tombstone.StartTime - } - - if tombstone.EndTime > filteredSet.newestTombstoneEnd { - filteredSet.newestTombstoneEnd = tombstone.EndTime - } - } - - return &filteredSet, nil -} - -func (tl *TombstonesLoader) loadPendingTombstones(userID string) error { - if tl.deleteStore == nil { - tl.tombstonesMtx.Lock() - defer tl.tombstonesMtx.Unlock() - - tl.tombstones[userID] = &TombstonesSet{oldestTombstoneStart: 0, newestTombstoneEnd: 0} - return nil - } - - pendingDeleteRequests, err := tl.deleteStore.GetPendingDeleteRequestsForUser(context.Background(), userID) - if err != nil { - tl.metrics.deleteRequestsLoadFailures.Inc() - return errors.Wrap(err, "error loading delete requests") - } - - tombstoneSet := TombstonesSet{tombstones: pendingDeleteRequests, oldestTombstoneStart: model.Now()} - for i := range tombstoneSet.tombstones { - tombstoneSet.tombstones[i].Matchers = make([][]*labels.Matcher, len(tombstoneSet.tombstones[i].Selectors)) - - for j, selector := range tombstoneSet.tombstones[i].Selectors { - tombstoneSet.tombstones[i].Matchers[j], err = parser.ParseMetricSelector(selector) - - if err != nil { - tl.metrics.deleteRequestsLoadFailures.Inc() - return errors.Wrapf(err, "error parsing metric selector") - } - } - - if tombstoneSet.tombstones[i].StartTime < tombstoneSet.oldestTombstoneStart { - tombstoneSet.oldestTombstoneStart = tombstoneSet.tombstones[i].StartTime - } - - if tombstoneSet.tombstones[i].EndTime > tombstoneSet.newestTombstoneEnd { - tombstoneSet.newestTombstoneEnd = tombstoneSet.tombstones[i].EndTime - } - } - - tl.tombstonesMtx.Lock() - defer tl.tombstonesMtx.Unlock() - tl.tombstones[userID] = &tombstoneSet - - return nil -} - -// GetStoreCacheGenNumber returns store cache gen number for a user -func (tl *TombstonesLoader) GetStoreCacheGenNumber(tenantIDs []string) string { - return tl.getCacheGenNumbersPerTenants(tenantIDs).store -} - -// GetResultsCacheGenNumber returns results cache gen number for a user -func (tl *TombstonesLoader) GetResultsCacheGenNumber(tenantIDs []string) string { - return tl.getCacheGenNumbersPerTenants(tenantIDs).results -} - -func (tl *TombstonesLoader) getCacheGenNumbersPerTenants(tenantIDs []string) *cacheGenNumbers { - var result cacheGenNumbers - - if len(tenantIDs) == 0 { - return &result - } - - // keep the maximum value that's currently in result - var maxResults, maxStore int - - for pos, tenantID := range tenantIDs { - numbers := tl.getCacheGenNumbers(tenantID) - - // handle first tenant in the list - if pos == 0 { - // short cut if there is only one tenant - if len(tenantIDs) == 1 { - return numbers - } - - // set first tenant string whatever happens next - result.results = numbers.results - result.store = numbers.store - } - - // set results number string if it's higher than the ones before - if numbers.results != "" { - results, err := strconv.Atoi(numbers.results) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err) - } else if maxResults < results { - maxResults = results - result.results = numbers.results - } - } - - // set store number string if it's higher than the ones before - if numbers.store != "" { - store, err := strconv.Atoi(numbers.store) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error parsing storeCacheGenNumber", "user", tenantID, "err", err) - } else if maxStore < store { - maxStore = store - result.store = numbers.store - } - } - } - - return &result -} - -func (tl *TombstonesLoader) getCacheGenNumbers(userID string) *cacheGenNumbers { - tl.cacheGenNumbersMtx.RLock() - if genNumbers, isOK := tl.cacheGenNumbers[userID]; isOK { - tl.cacheGenNumbersMtx.RUnlock() - return genNumbers - } - - tl.cacheGenNumbersMtx.RUnlock() - - if tl.deleteStore == nil { - tl.cacheGenNumbersMtx.Lock() - defer tl.cacheGenNumbersMtx.Unlock() - - tl.cacheGenNumbers[userID] = &cacheGenNumbers{} - return tl.cacheGenNumbers[userID] - } - - genNumbers, err := tl.deleteStore.getCacheGenerationNumbers(context.Background(), userID) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error loading cache generation numbers", "err", err) - tl.metrics.cacheGenLoadFailures.Inc() - return &cacheGenNumbers{} - } - - tl.cacheGenNumbersMtx.Lock() - defer tl.cacheGenNumbersMtx.Unlock() - - tl.cacheGenNumbers[userID] = genNumbers - return genNumbers -} - -// GetDeletedIntervals returns non-overlapping, sorted deleted intervals. -func (ts TombstonesSet) GetDeletedIntervals(lbls labels.Labels, from, to model.Time) []model.Interval { - if len(ts.tombstones) == 0 || to < ts.oldestTombstoneStart || from > ts.newestTombstoneEnd { - return nil - } - - var deletedIntervals []model.Interval - requestedInterval := model.Interval{Start: from, End: to} - - for i := range ts.tombstones { - overlaps, overlappingInterval := getOverlappingInterval(requestedInterval, - model.Interval{Start: ts.tombstones[i].StartTime, End: ts.tombstones[i].EndTime}) - - if !overlaps { - continue - } - - matches := false - for _, matchers := range ts.tombstones[i].Matchers { - if labels.Selector(matchers).Matches(lbls) { - matches = true - break - } - } - - if !matches { - continue - } - - if overlappingInterval == requestedInterval { - // whole interval deleted - return []model.Interval{requestedInterval} - } - - deletedIntervals = append(deletedIntervals, overlappingInterval) - } - - if len(deletedIntervals) == 0 { - return nil - } - - return mergeIntervals(deletedIntervals) -} - -// Len returns number of tombstones that are there -func (ts TombstonesSet) Len() int { - return len(ts.tombstones) -} - -// HasTombstonesForInterval tells whether there are any tombstones which overlapping given interval -func (ts TombstonesSet) HasTombstonesForInterval(from, to model.Time) bool { - if len(ts.tombstones) == 0 || to < ts.oldestTombstoneStart || from > ts.newestTombstoneEnd { - return false - } - - return true -} - -// sorts and merges overlapping intervals -func mergeIntervals(intervals []model.Interval) []model.Interval { - if len(intervals) <= 1 { - return intervals - } - - mergedIntervals := make([]model.Interval, 0, len(intervals)) - sort.Slice(intervals, func(i, j int) bool { - return intervals[i].Start < intervals[j].Start - }) - - ongoingTrFrom, ongoingTrTo := intervals[0].Start, intervals[0].End - for i := 1; i < len(intervals); i++ { - // if there is no overlap add it to mergedIntervals - if intervals[i].Start > ongoingTrTo { - mergedIntervals = append(mergedIntervals, model.Interval{Start: ongoingTrFrom, End: ongoingTrTo}) - ongoingTrFrom = intervals[i].Start - ongoingTrTo = intervals[i].End - continue - } - - // there is an overlap but check whether existing time range is bigger than the current one - if intervals[i].End > ongoingTrTo { - ongoingTrTo = intervals[i].End - } - } - - // add the last time range - mergedIntervals = append(mergedIntervals, model.Interval{Start: ongoingTrFrom, End: ongoingTrTo}) - - return mergedIntervals -} - -func getOverlappingInterval(interval1, interval2 model.Interval) (bool, model.Interval) { - if interval2.Start > interval1.Start { - interval1.Start = interval2.Start - } - - if interval2.End < interval1.End { - interval1.End = interval2.End - } - - return interval1.Start < interval1.End, interval1 -} - -func intervalsOverlap(interval1, interval2 model.Interval) bool { - if interval1.Start > interval2.End || interval2.Start > interval1.End { - return false - } - - return true -} diff --git a/internal/cortex/chunk/purger/tombstones_test.go b/internal/cortex/chunk/purger/tombstones_test.go deleted file mode 100644 index fc1f5433f7..0000000000 --- a/internal/cortex/chunk/purger/tombstones_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package purger - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql/parser" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTombstonesLoader(t *testing.T) { - deleteRequestSelectors := []string{"foo"} - metric, err := parser.ParseMetric(deleteRequestSelectors[0]) - require.NoError(t, err) - - for _, tc := range []struct { - name string - deleteRequestIntervals []model.Interval - queryForInterval model.Interval - expectedIntervals []model.Interval - }{ - { - name: "no delete requests", - queryForInterval: model.Interval{End: modelTimeDay}, - }, - { - name: "query out of range of delete requests", - deleteRequestIntervals: []model.Interval{ - {End: modelTimeDay}, - }, - queryForInterval: model.Interval{Start: modelTimeDay.Add(time.Hour), End: modelTimeDay * 2}, - }, - { - name: "no overlap but disjoint deleted intervals", - deleteRequestIntervals: []model.Interval{ - {End: modelTimeDay}, - {Start: modelTimeDay.Add(time.Hour), End: modelTimeDay.Add(2 * time.Hour)}, - }, - queryForInterval: model.Interval{End: modelTimeDay.Add(2 * time.Hour)}, - expectedIntervals: []model.Interval{ - {End: modelTimeDay}, - {Start: modelTimeDay.Add(time.Hour), End: modelTimeDay.Add(2 * time.Hour)}, - }, - }, - { - name: "no overlap but continuous deleted intervals", - deleteRequestIntervals: []model.Interval{ - {End: modelTimeDay}, - {Start: modelTimeDay, End: modelTimeDay.Add(2 * time.Hour)}, - }, - queryForInterval: model.Interval{End: modelTimeDay.Add(2 * time.Hour)}, - expectedIntervals: []model.Interval{ - {End: modelTimeDay.Add(2 * time.Hour)}, - }, - }, - { - name: "some overlap in deleted intervals", - deleteRequestIntervals: []model.Interval{ - {End: modelTimeDay}, - {Start: modelTimeDay.Add(-time.Hour), End: modelTimeDay.Add(2 * time.Hour)}, - }, - queryForInterval: model.Interval{End: modelTimeDay.Add(2 * time.Hour)}, - expectedIntervals: []model.Interval{ - {End: modelTimeDay.Add(2 * time.Hour)}, - }, - }, - { - name: "complete overlap in deleted intervals", - deleteRequestIntervals: []model.Interval{ - {End: modelTimeDay}, - {End: modelTimeDay}, - }, - queryForInterval: model.Interval{End: modelTimeDay.Add(2 * time.Hour)}, - expectedIntervals: []model.Interval{ - {End: modelTimeDay}, - }, - }, - { - name: "mix of overlaps in deleted intervals", - deleteRequestIntervals: []model.Interval{ - {End: modelTimeDay}, - {End: modelTimeDay}, - {Start: modelTimeDay.Add(time.Hour), End: modelTimeDay.Add(2 * time.Hour)}, - {Start: modelTimeDay.Add(2 * time.Hour), End: modelTimeDay.Add(24 * time.Hour)}, - {Start: modelTimeDay.Add(23 * time.Hour), End: modelTimeDay * 3}, - }, - queryForInterval: model.Interval{End: modelTimeDay * 10}, - expectedIntervals: []model.Interval{ - {End: modelTimeDay}, - {Start: modelTimeDay.Add(time.Hour), End: modelTimeDay * 3}, - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - deleteStore := setupTestDeleteStore(t) - tombstonesLoader := NewTombstonesLoader(deleteStore, nil) - - // add delete requests - for _, interval := range tc.deleteRequestIntervals { - err := deleteStore.AddDeleteRequest(context.Background(), userID, interval.Start, interval.End, deleteRequestSelectors) - require.NoError(t, err) - } - - // get all delete requests for user - tombstonesAnalyzer, err := tombstonesLoader.GetPendingTombstones(userID) - require.NoError(t, err) - - // verify whether number of delete requests is same as what we added - require.Equal(t, len(tc.deleteRequestIntervals), tombstonesAnalyzer.Len()) - - // if we are expecting to get deleted intervals then HasTombstonesForInterval should return true else false - expectedHasTombstonesForInterval := true - if len(tc.expectedIntervals) == 0 { - expectedHasTombstonesForInterval = false - } - - hasTombstonesForInterval := tombstonesAnalyzer.HasTombstonesForInterval(tc.queryForInterval.Start, tc.queryForInterval.End) - require.Equal(t, expectedHasTombstonesForInterval, hasTombstonesForInterval) - - // get deleted intervals - intervals := tombstonesAnalyzer.GetDeletedIntervals(metric, tc.queryForInterval.Start, tc.queryForInterval.End) - require.Equal(t, len(tc.expectedIntervals), len(intervals)) - - // verify whether we got expected intervals back - for i, interval := range intervals { - require.Equal(t, tc.expectedIntervals[i].Start, interval.Start) - require.Equal(t, tc.expectedIntervals[i].End, interval.End) - } - }) - } -} - -func TestTombstonesLoader_GetCacheGenNumber(t *testing.T) { - s := &store{ - numbers: map[string]*cacheGenNumbers{ - "tenant-a": { - results: "1000", - store: "2050", - }, - "tenant-b": { - results: "1050", - store: "2000", - }, - "tenant-c": { - results: "", - store: "", - }, - "tenant-d": { - results: "results-c", - store: "store-c", - }, - }, - } - tombstonesLoader := NewTombstonesLoader(s, nil) - - for _, tc := range []struct { - name string - expectedResultsCacheGenNumber string - expectedStoreCacheGenNumber string - tenantIDs []string - }{ - { - name: "single tenant with numeric values", - tenantIDs: []string{"tenant-a"}, - expectedResultsCacheGenNumber: "1000", - expectedStoreCacheGenNumber: "2050", - }, - { - name: "single tenant with non-numeric values", - tenantIDs: []string{"tenant-d"}, - expectedResultsCacheGenNumber: "results-c", - expectedStoreCacheGenNumber: "store-c", - }, - { - name: "multiple tenants with numeric values", - tenantIDs: []string{"tenant-a", "tenant-b"}, - expectedResultsCacheGenNumber: "1050", - expectedStoreCacheGenNumber: "2050", - }, - { - name: "multiple tenants with numeric and non-numeric values", - tenantIDs: []string{"tenant-d", "tenant-c", "tenant-b", "tenant-a"}, - expectedResultsCacheGenNumber: "1050", - expectedStoreCacheGenNumber: "2050", - }, - { - name: "no tenants", // not really an expected call, edge case check to avoid any panics - }, - } { - t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.expectedResultsCacheGenNumber, tombstonesLoader.GetResultsCacheGenNumber(tc.tenantIDs)) - assert.Equal(t, tc.expectedStoreCacheGenNumber, tombstonesLoader.GetStoreCacheGenNumber(tc.tenantIDs)) - }) - } -} - -func TestTombstonesReloadDoesntDeadlockOnFailure(t *testing.T) { - s := &store{} - tombstonesLoader := NewTombstonesLoader(s, nil) - tombstonesLoader.getCacheGenNumbers("test") - - s.err = errors.New("error") - require.NotNil(t, tombstonesLoader.reloadTombstones()) - - s.err = nil - require.NotNil(t, tombstonesLoader.getCacheGenNumbers("test2")) -} - -type store struct { - numbers map[string]*cacheGenNumbers - err error -} - -func (f *store) getCacheGenerationNumbers(ctx context.Context, user string) (*cacheGenNumbers, error) { - if f.numbers != nil { - number, ok := f.numbers[user] - if ok { - return number, nil - } - } - return &cacheGenNumbers{}, f.err -} - -func (f *store) GetPendingDeleteRequestsForUser(ctx context.Context, id string) ([]DeleteRequest, error) { - return nil, nil -} diff --git a/internal/cortex/chunk/schema.go b/internal/cortex/chunk/schema.go deleted file mode 100644 index 7abe9b6252..0000000000 --- a/internal/cortex/chunk/schema.go +++ /dev/null @@ -1,970 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package chunk - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "strconv" - "strings" - - "github.com/go-kit/log/level" - jsoniter "github.com/json-iterator/go" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/thanos-io/thanos/internal/cortex/querier/astmapper" - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" -) - -const ( - chunkTimeRangeKeyV1a = 1 - chunkTimeRangeKeyV1 = '1' - chunkTimeRangeKeyV2 = '2' - chunkTimeRangeKeyV3 = '3' - chunkTimeRangeKeyV4 = '4' - chunkTimeRangeKeyV5 = '5' - metricNameRangeKeyV1 = '6' - - // For v9 schema - seriesRangeKeyV1 = '7' - labelSeriesRangeKeyV1 = '8' - // For v11 schema - labelNamesRangeKeyV1 = '9' -) - -var ( - // ErrNotSupported when a schema doesn't support that particular lookup. - ErrNotSupported = errors.New("not supported") - empty = []byte("-") -) - -type hasChunksForIntervalFunc func(userID, seriesID string, from, through model.Time) (bool, error) - -// Schema interfaces define methods to calculate the hash and range keys needed -// to write or read chunks from the external index. - -// BasicSchema has operation shared between StoreSchema and SeriesStoreSchema -type BaseSchema interface { - // When doing a read, use these methods to return the list of entries you should query - GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) - GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) - GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) - FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery -} - -// StoreSchema is a schema used by store -type StoreSchema interface { - BaseSchema - - // When doing a write, use this method to return the list of entries you should write to. - GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) -} - -// SeriesStoreSchema is a schema used by seriesStore -type SeriesStoreSchema interface { - BaseSchema - - // returns cache key string and []IndexEntry per bucket, matched in order - GetCacheKeysAndLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]string, [][]IndexEntry, error) - GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) - - // If the query resulted in series IDs, use this method to find chunks. - GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) - // Returns queries to retrieve all label names of multiple series by id. - GetLabelNamesForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) - - // GetSeriesDeleteEntries returns IndexEntry's for deleting SeriesIDs from SeriesStore. - // Since SeriesIDs are created per bucket, it makes sure that we don't include series entries which are in use by verifying using hasChunksForIntervalFunc i.e - // It checks first and last buckets covered by the time interval to see if a SeriesID still has chunks in the store, - // if yes then it doesn't include IndexEntry's for that bucket for deletion. - GetSeriesDeleteEntries(from, through model.Time, userID string, metric labels.Labels, hasChunksForIntervalFunc hasChunksForIntervalFunc) ([]IndexEntry, error) -} - -// IndexQuery describes a query for entries -type IndexQuery struct { - TableName string - HashValue string - - // One of RangeValuePrefix or RangeValueStart might be set: - // - If RangeValuePrefix is not nil, must read all keys with that prefix. - // - If RangeValueStart is not nil, must read all keys from there onwards. - // - If neither is set, must read all keys for that row. - RangeValuePrefix []byte - RangeValueStart []byte - - // Filters for querying - ValueEqual []byte - - // If the result of this lookup is immutable or not (for caching). - Immutable bool -} - -// IndexEntry describes an entry in the chunk index -type IndexEntry struct { - TableName string - HashValue string - - // For writes, RangeValue will always be set. - RangeValue []byte - - // New for v6 schema, label value is not written as part of the range key. - Value []byte -} - -type schemaBucketsFunc func(from, through model.Time, userID string) []Bucket - -// baseSchema implements BaseSchema given a bucketing function and set of range key callbacks -type baseSchema struct { - buckets schemaBucketsFunc - entries baseEntries -} - -// storeSchema implements StoreSchema given a bucketing function and set of range key callbacks -type storeSchema struct { - baseSchema - entries storeEntries -} - -// seriesStoreSchema implements SeriesStoreSchema given a bucketing function and set of range key callbacks -type seriesStoreSchema struct { - baseSchema - entries seriesStoreEntries -} - -func newStoreSchema(buckets schemaBucketsFunc, entries storeEntries) storeSchema { - return storeSchema{ - baseSchema: baseSchema{buckets: buckets, entries: entries}, - entries: entries, - } -} - -func newSeriesStoreSchema(buckets schemaBucketsFunc, entries seriesStoreEntries) seriesStoreSchema { - return seriesStoreSchema{ - baseSchema: baseSchema{buckets: buckets, entries: entries}, - entries: entries, - } -} - -func (s storeSchema) GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - var result []IndexEntry - - for _, bucket := range s.buckets(from, through, userID) { - entries, err := s.entries.GetWriteEntries(bucket, metricName, labels, chunkID) - if err != nil { - return nil, err - } - result = append(result, entries...) - } - return result, nil -} - -// returns cache key string and []IndexEntry per bucket, matched in order -func (s seriesStoreSchema) GetCacheKeysAndLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]string, [][]IndexEntry, error) { - var keys []string - var indexEntries [][]IndexEntry - - for _, bucket := range s.buckets(from, through, userID) { - key := strings.Join([]string{ - bucket.tableName, - bucket.hashKey, - string(labelsSeriesID(labels)), - }, - "-", - ) - // This is just encoding to remove invalid characters so that we can put them in memcache. - // We're not hashing them as the length of the key is well within memcache bounds. tableName + userid + day + 32Byte(seriesID) - key = hex.EncodeToString([]byte(key)) - keys = append(keys, key) - - entries, err := s.entries.GetLabelWriteEntries(bucket, metricName, labels, chunkID) - if err != nil { - return nil, nil, err - } - indexEntries = append(indexEntries, entries) - } - return keys, indexEntries, nil -} - -func (s seriesStoreSchema) GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - var result []IndexEntry - - for _, bucket := range s.buckets(from, through, userID) { - entries, err := s.entries.GetChunkWriteEntries(bucket, metricName, labels, chunkID) - if err != nil { - return nil, err - } - result = append(result, entries...) - } - return result, nil - -} - -func (s baseSchema) GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) { - var result []IndexQuery - - buckets := s.buckets(from, through, userID) - for _, bucket := range buckets { - entries, err := s.entries.GetReadMetricQueries(bucket, metricName) - if err != nil { - return nil, err - } - result = append(result, entries...) - } - return result, nil -} - -func (s baseSchema) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) { - var result []IndexQuery - - buckets := s.buckets(from, through, userID) - for _, bucket := range buckets { - entries, err := s.entries.GetReadMetricLabelQueries(bucket, metricName, labelName) - if err != nil { - return nil, err - } - result = append(result, entries...) - } - return result, nil -} - -func (s baseSchema) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { - var result []IndexQuery - - buckets := s.buckets(from, through, userID) - for _, bucket := range buckets { - entries, err := s.entries.GetReadMetricLabelValueQueries(bucket, metricName, labelName, labelValue) - if err != nil { - return nil, err - } - result = append(result, entries...) - } - return result, nil -} - -func (s seriesStoreSchema) GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { - var result []IndexQuery - - buckets := s.buckets(from, through, userID) - for _, bucket := range buckets { - entries, err := s.entries.GetChunksForSeries(bucket, seriesID) - if err != nil { - return nil, err - } - result = append(result, entries...) - } - return result, nil -} - -// GetSeriesDeleteEntries returns IndexEntry's for deleting SeriesIDs from SeriesStore. -// Since SeriesIDs are created per bucket, it makes sure that we don't include series entries which are in use by verifying using hasChunksForIntervalFunc i.e -// It checks first and last buckets covered by the time interval to see if a SeriesID still has chunks in the store, -// if yes then it doesn't include IndexEntry's for that bucket for deletion. -func (s seriesStoreSchema) GetSeriesDeleteEntries(from, through model.Time, userID string, metric labels.Labels, hasChunksForIntervalFunc hasChunksForIntervalFunc) ([]IndexEntry, error) { - metricName := metric.Get(model.MetricNameLabel) - if metricName == "" { - return nil, ErrMetricNameLabelMissing - } - - buckets := s.buckets(from, through, userID) - if len(buckets) == 0 { - return nil, nil - } - - seriesID := string(labelsSeriesID(metric)) - - // Only first and last buckets needs to be checked for in-use series ids. - // Only partially deleted first/last deleted bucket needs to be checked otherwise - // not since whole bucket is anyways considered for deletion. - - // Bucket times are relative to the bucket i.e for a per-day bucket - // bucket.from would be the number of milliseconds elapsed since the start of that day. - // If bucket.from is not 0, it means the from param doesn't align with the start of the bucket. - if buckets[0].from != 0 { - bucketStartTime := from - model.Time(buckets[0].from) - hasChunks, err := hasChunksForIntervalFunc(userID, seriesID, bucketStartTime, bucketStartTime+model.Time(buckets[0].bucketSize)-1) - if err != nil { - return nil, err - } - - if hasChunks { - buckets = buckets[1:] - if len(buckets) == 0 { - return nil, nil - } - } - } - - lastBucket := buckets[len(buckets)-1] - - // Similar to bucket.from, bucket.through here is also relative i.e for a per-day bucket - // through would be the number of milliseconds elapsed since the start of that day - // If bucket.through is not equal to max size of bucket, it means the through param doesn't align with the end of the bucket. - if lastBucket.through != lastBucket.bucketSize { - bucketStartTime := through - model.Time(lastBucket.through) - hasChunks, err := hasChunksForIntervalFunc(userID, seriesID, bucketStartTime, bucketStartTime+model.Time(lastBucket.bucketSize)-1) - if err != nil { - return nil, err - } - - if hasChunks { - buckets = buckets[:len(buckets)-1] - if len(buckets) == 0 { - return nil, nil - } - } - } - - var result []IndexEntry - - for _, bucket := range buckets { - entries, err := s.entries.GetLabelWriteEntries(bucket, metricName, metric, "") - if err != nil { - return nil, err - } - result = append(result, entries...) - } - - return result, nil -} - -func (s seriesStoreSchema) GetLabelNamesForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { - var result []IndexQuery - - buckets := s.buckets(from, through, userID) - for _, bucket := range buckets { - entries, err := s.entries.GetLabelNamesForSeries(bucket, seriesID) - if err != nil { - return nil, err - } - result = append(result, entries...) - } - return result, nil -} - -func (s baseSchema) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { - return s.entries.FilterReadQueries(queries, shard) -} - -type baseEntries interface { - GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) - GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) - GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) - FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery -} - -// used by storeSchema -type storeEntries interface { - baseEntries - - GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) -} - -// used by seriesStoreSchema -type seriesStoreEntries interface { - baseEntries - - GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) - GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) - - GetChunksForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) - GetLabelNamesForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) -} - -// original entries: -// - hash key: :: -// - range key: