diff --git a/CHANGELOG.md b/CHANGELOG.md index bf3ebe3dae..80339df795 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,12 +11,24 @@ We use *breaking* word for marking changes that are not backward compatible (rel ## Unreleased. +### Added + +- [#1358](https://github.com/thanos-io/thanos/pull/1358) Added `part_size` configuration option for HTTP multipart requests minimum part size for S3 storage type +- [#1363](https://github.com/thanos-io/thanos/pull/1363) Thanos Receive now exposes `thanos_receive_hashring_nodes` and `thanos_receive_hashring_tenants` metrics to monitor status of hash-rings + +### Changed + +- [#1380](https://github.com/thanos-io/thanos/pull/1380) Upgraded important dependencies: Prometheus to 2.11.1 and TSDB to 0.9.1. Some changes affecting Querier: + - [ENHANCEMENT] Query performance improvement: Efficient iteration and search in HashForLabels and HashWithoutLabels. #5707 + - [ENHANCEMENT] Optimize queries using regexp for set lookups. tsdb#602 + - [BUGFIX] prometheus_tsdb_compactions_failed_total is now incremented on any compaction failure. tsdb#613 + - [BUGFIX] PromQL: Correctly display {__name__="a"}. #5552 - [#1338](https://github.com/thanos-io/thanos/pull/1338) Querier still warns on store API duplicate, but allows a single one from duplicated set. This is gracefully warn about the problematic logic and not disrupt immediately. +- [#1297](https://github.com/improbable-eng/thanos/pull/1297) Added `/-/ready` and `/-/healthy` endpoints to Thanos compact. ### Fixed - [#1327](https://github.com/thanos-io/thanos/pull/1327) `/series` API end-point now properly returns an empty array just like Prometheus if there are no results - - [#1302](https://github.com/thanos-io/thanos/pull/1302) Thanos now efficiently reuses HTTP keep-alive connections ## [v0.6.0](https://github.com/thanos-io/thanos/releases/tag/v0.6.0) - 2019.07.18 @@ -59,7 +71,7 @@ The other `type` you can use is `JAEGER` now. The `config` keys and values are J ### Changed -- [#1284](https://github.com/thanos-io/thanos/pull/1284) Add support for multiple label-sets in Info gRPC service. +- [#1284](https://github.com/thanos-io/thanos/pull/1284) Add support for multiple label-sets in Info gRPC service. This deprecates the single `Labels` slice of the `InfoResponse`, in a future release backward compatible handling for the single set of Labels will be removed. Upgrading to v0.6.0 or higher is advised. *breaking* If you run have duplicate queries in your Querier configuration with hierarchical federation of multiple Queries this PR makes Thanos Querier to detect this case and block all duplicates. Refer to 0.6.1 which at least allows for single replica to work. diff --git a/Makefile b/Makefile index aba7962ef2..a504b32105 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ DOCKER_IMAGE_NAME ?= thanos DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))-$(shell date +%Y-%m-%d)-$(shell git rev-parse --short HEAD) TMP_GOPATH ?= /tmp/thanos-go -GOBIN ?= ${GOPATH}/bin +GOBIN ?= $(firstword $(subst :, ,$GOPATH))/bin GO111MODULE ?= on export GO111MODULE GOPROXY ?= https://proxy.golang.org diff --git a/cmd/thanos/compact.go b/cmd/thanos/compact.go index 2bc659acb9..09e55f3732 100644 --- a/cmd/thanos/compact.go +++ b/cmd/thanos/compact.go @@ -15,7 +15,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/oklog/run" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/tsdb" @@ -23,10 +23,12 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/compact/downsample" + "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" - kingpin "gopkg.in/alecthomas/kingpin.v2" + "gopkg.in/alecthomas/kingpin.v2" ) var ( @@ -49,7 +51,7 @@ func (cs compactionSet) String() string { return strings.Join(result, ", ") } -// levels returns set of compaction levels not higher than specified max compaction level +// levels returns set of compaction levels not higher than specified max compaction level. func (cs compactionSet) levels(maxLevel int) ([]int64, error) { if maxLevel >= len(cs) { return nil, errors.Errorf("level is bigger then default set of %d", len(cs)) @@ -62,13 +64,14 @@ func (cs compactionSet) levels(maxLevel int) ([]int64, error) { return levels, nil } -// maxLevel returns max available compaction level +// maxLevel returns max available compaction level. func (cs compactionSet) maxLevel() int { return len(cs) - 1 } -func registerCompact(m map[string]setupFunc, app *kingpin.Application, name string) { - cmd := app.Command(name, "continuously compacts blocks in an object store bucket") +func registerCompact(m map[string]setupFunc, app *kingpin.Application) { + comp := component.Compact + cmd := app.Command(comp.String(), "continuously compacts blocks in an object store bucket") haltOnError := cmd.Flag("debug.halt-on-error", "Halt the process if a critical compaction error is detected."). Hidden().Default("true").Bool() @@ -110,7 +113,7 @@ func registerCompact(m map[string]setupFunc, app *kingpin.Application, name stri compactionConcurrency := cmd.Flag("compact.concurrency", "Number of goroutines to use when compacting groups."). Default("1").Int() - m[name] = func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ bool) error { + m[comp.String()] = func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ bool) error { return runCompact(g, logger, reg, *httpAddr, *dataDir, @@ -125,7 +128,7 @@ func registerCompact(m map[string]setupFunc, app *kingpin.Application, name stri compact.ResolutionLevel5m: time.Duration(*retention5m), compact.ResolutionLevel1h: time.Duration(*retention1h), }, - name, + comp, *disableDownsampling, *maxCompactionLevel, *blockSyncConcurrency, @@ -147,7 +150,7 @@ func runCompact( wait bool, generateMissingIndexCacheFiles bool, retentionByResolution map[compact.ResolutionLevel]time.Duration, - component string, + component component.Component, disableDownsampling bool, maxCompactionLevel int, blockSyncConcurrency int, @@ -168,12 +171,18 @@ func runCompact( downsampleMetrics := newDownsampleMetrics(reg) + readinessProber := prober.NewProber(component, logger, prometheus.WrapRegistererWithPrefix("thanos_", reg)) + // Initiate default HTTP listener providing metrics endpoint and readiness/liveness probes. + if err := defaultHTTPListener(g, logger, reg, httpBindAddr, readinessProber); err != nil { + return errors.Wrap(err, "create readiness prober") + } + confContentYaml, err := objStoreConfig.Content() if err != nil { return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component) + bkt, err := client.NewBucket(logger, confContentYaml, reg, component.String()) if err != nil { return err } @@ -318,11 +327,8 @@ func runCompact( cancel() }) - if err := metricHTTPListenGroup(g, logger, reg, httpBindAddr); err != nil { - return err - } - level.Info(logger).Log("msg", "starting compact node") + readinessProber.SetReady() return nil } diff --git a/cmd/thanos/main.go b/cmd/thanos/main.go index 1c53440ec3..a44ac43624 100644 --- a/cmd/thanos/main.go +++ b/cmd/thanos/main.go @@ -31,6 +31,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/version" + "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/tracing" "github.com/thanos-io/thanos/pkg/tracing/client" @@ -73,7 +74,7 @@ func main() { registerStore(cmds, app, "store") registerQuery(cmds, app, "query") registerRule(cmds, app, "rule") - registerCompact(cmds, app, "compact") + registerCompact(cmds, app) registerBucket(cmds, app, "bucket") registerDownsample(cmds, app, "downsample") registerReceive(cmds, app, "receive") @@ -122,7 +123,7 @@ func main() { ) prometheus.DefaultRegisterer = metrics - // Memberlist uses go-metrics + // Memberlist uses go-metrics. sink, err := gprom.NewPrometheusSink() if err != nil { fmt.Fprintln(os.Stderr, errors.Wrapf(err, "%s command failed", cmd)) @@ -311,6 +312,7 @@ func defaultGRPCServerOpts(logger log.Logger, reg *prometheus.Registry, tracer o return append(opts, grpc.Creds(credentials.NewTLS(tlsCfg))), nil } +// TODO Remove once all components are migrated to the new defaultHTTPListener. // metricHTTPListenGroup is a run.Group that servers HTTP endpoint with only Prometheus metrics. func metricHTTPListenGroup(g *run.Group, logger log.Logger, reg *prometheus.Registry, httpBindAddr string) error { mux := http.NewServeMux() @@ -330,3 +332,27 @@ func metricHTTPListenGroup(g *run.Group, logger log.Logger, reg *prometheus.Regi }) return nil } + +// defaultHTTPListener starts a run.Group that servers HTTP endpoint with default endpoints providing Prometheus metrics, +// profiling and liveness/readiness probes. +func defaultHTTPListener(g *run.Group, logger log.Logger, reg *prometheus.Registry, httpBindAddr string, readinessProber *prober.Prober) error { + mux := http.NewServeMux() + registerMetrics(mux, reg) + registerProfile(mux) + readinessProber.RegisterInMux(mux) + + l, err := net.Listen("tcp", httpBindAddr) + if err != nil { + return errors.Wrap(err, "listen metrics address") + } + + g.Add(func() error { + level.Info(logger).Log("msg", "listening for metrics", "address", httpBindAddr) + readinessProber.SetHealthy() + return errors.Wrap(http.Serve(l, mux), "serve metrics") + }, func(err error) { + readinessProber.SetNotHealthy(err) + runutil.CloseWithLogOnErr(logger, l, "metric listener") + }) + return nil +} diff --git a/docs/components/store.md b/docs/components/store.md index f0c999cab8..3bc0c1e4ef 100644 --- a/docs/components/store.md +++ b/docs/components/store.md @@ -12,7 +12,6 @@ It keeps a small amount of information about all remote blocks on local disk and ```bash $ thanos store \ --data-dir "/local/state/data/dir" \ - --cluster.peers "thanos-cluster.example.org" \ --objstore.config-file "bucket.yml" ``` diff --git a/docs/storage.md b/docs/storage.md index b9050801e4..eb48e08d64 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -62,6 +62,7 @@ config: insecure_skip_verify: false trace: enable: false + part_size: 0 ``` At a minimum, you will need to provide a value for the `bucket`, `endpoint`, `access_key`, and `secret_key` keys. The rest of the keys are optional. @@ -74,6 +75,8 @@ You can configure the timeout settings for the HTTP client by setting the `http_ Please refer to the documentation of [the Transport type](https://golang.org/pkg/net/http/#Transport) in the `net/http` package for detailed information on what each option does. +`part_size` is specified in bytes and refers to the minimum file size used for multipart uploads, as some custom S3 implementations may have different requirements. A value of `0` means to use a default 128 MiB size. + For debug and testing purposes you can set * `insecure: true` to switch to plain insecure HTTP instead of HTTPS diff --git a/go.mod b/go.mod index 9873a47487..e75e8c131d 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ require ( cloud.google.com/go v0.34.0 github.com/Azure/azure-storage-blob-go v0.7.0 github.com/NYTimes/gziphandler v1.1.1 + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da github.com/cespare/xxhash v1.1.0 github.com/fatih/structtag v1.0.0 @@ -33,8 +34,8 @@ require ( github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v1.0.0 github.com/prometheus/common v0.6.0 - github.com/prometheus/prometheus v2.9.2+incompatible - github.com/prometheus/tsdb v0.8.0 + github.com/prometheus/prometheus v0.0.0-20190710134608-e5b22494857d + github.com/prometheus/tsdb v0.9.1 github.com/uber-go/atomic v1.4.0 // indirect github.com/uber/jaeger-client-go v2.16.0+incompatible github.com/uber/jaeger-lib v2.0.0+incompatible diff --git a/go.sum b/go.sum index 24b25203f3..78f7bb5952 100644 --- a/go.sum +++ b/go.sum @@ -17,6 +17,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -24,6 +26,8 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx github.com/a8m/mark v0.1.1-0.20170507133748-44f2db618845/go.mod h1:c8Mh99Cw82nrsAnPgxQSZHkswVOJF7/MqZb1ZdvriLM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -31,8 +35,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v0.0.0-20180507225419-00862f899353 h1:qFKf58XUUvHaEz0zFkLJsQ4dzoAyrQ8QyhK4nHGHBI4= -github.com/aws/aws-sdk-go v0.0.0-20180507225419-00862f899353/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= @@ -40,9 +44,11 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4= github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= +github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -56,9 +62,10 @@ github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac h1:xrQJVwQCGqDvOO7/0+RyIq5J2M3Q4ZF7Ug/BMQtML1E= -github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -77,8 +84,9 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/gernest/wow v0.1.0/go.mod h1:dEPabJRi5BneI1Nev1VWo0ZlcTWibHWp43qxKms4elY= github.com/getsentry/raven-go v0.1.2/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-ini/ini v1.21.1 h1:+QXUYsI7Tfxc64oD6R5BxU/Aq+UwGkyjH4W/hMNG7bg= -github.com/go-ini/ini v1.21.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= @@ -87,6 +95,27 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -110,23 +139,22 @@ github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20150304233714-bbcb9da2d746 h1:M6d2zDTA4cKXT6OwFsJxlo5tWrAukj3KfvJ1zcBatnA= -github.com/google/gofuzz v0.0.0-20150304233714-bbcb9da2d746/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20180605153948-8b03ce837f34/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gnostic v0.0.0-20180520015035-48a0ecefe2e4 h1:yxHFSapGMUoyn+3v6LiJJxoJhvbDqIq8me0gAWehnSU= -github.com/googleapis/gnostic v0.0.0-20180520015035-48a0ecefe2e4/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e h1:hQpY0g0UGsLKLDs8UJ6xpA2gNCkEdEbvxSPqLItXCpI= github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20181025070259-68e3a13e4117 h1:v9uUYPE4RHQHA0C9XfpAX9uzWQvgIDYjPh6m/mQgrzs= @@ -134,17 +162,17 @@ github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20181025070259-68e3a13e4117/ github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/consul v1.4.4 h1:DR1+5EGgnPsd/LIsK3c9RDvajcsV5GOkGQBSNd3dpn8= -github.com/hashicorp/consul v1.4.4/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -164,10 +192,10 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/influxdata/influxdb v0.0.0-20170331210902-15e594fc09f1/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 h1:/UewZcckqhvnnS0C6r3Sher2hSEbVmM6Ogpcjen08+Y= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -185,12 +213,14 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-go v0.15.6/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI= github.com/lovoo/gcloud-opentracing v0.3.0 h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDLnHp0urPLU= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= @@ -200,15 +230,14 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.8 h1:1QYRAKU3lN5cRfLCkPU08hwvLJFhvjP6MqNMmQz6ZVI= -github.com/miekg/dns v1.1.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.10/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/cli v1.20.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= github.com/minio/minio-go/v6 v6.0.27-0.20190529152532-de69c0e465ed h1:g3DRJpu22jEjs14fSeJ7Crn9vdreiRsn4RtrEsXH/6A= github.com/minio/minio-go/v6 v6.0.27-0.20190529152532-de69c0e465ed/go.mod h1:vaNT59cWULS37E+E9zkuN/BVnKHyXtVGS+b04Boc66Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -218,11 +247,9 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mozillazg/go-cos v0.12.0 h1:b9hUd5HjrDe10BUfkyiLYI1+z4M2kAgKasktszx9pO4= @@ -231,9 +258,9 @@ github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSr github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/oklog v0.0.0-20170918173356-f857583a70c3/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88= @@ -252,8 +279,7 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/peterbourgon/diskv v0.0.0-20180312054125-0646ccaebea1 h1:k/dnb0bixQwWsDLxwr6/w7rtZCVDKdbQnGQkeZGYsws= -github.com/peterbourgon/diskv v0.0.0-20180312054125-0646ccaebea1/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg= github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -263,7 +289,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/alertmanager v0.17.0/go.mod h1:3/vUuD9sDlkVuB2KLczjrlG7aqT09pyK0jfTp/itWS0= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= @@ -273,36 +301,38 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/prometheus v2.9.2+incompatible h1:5QVnXpkSsbbG59TyZ99clRfaHQy2QuIlTv6dEgS66C4= -github.com/prometheus/prometheus v2.9.2+incompatible/go.mod h1:vdLuLLM0uqhLSofrQ7Nev2b/rQUyZ+pkT3zF7LB/i1g= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/prometheus/tsdb v0.8.0 h1:w1tAGxsBMLkuGrFMhqgcCeBkM5d1YI24udArs+aASuQ= -github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g= +github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/prometheus/prometheus v0.0.0-20190710134608-e5b22494857d h1:+89KS5XapfxQ/uKH9S24al72z0gp1N1jrK6HyDbEKWU= +github.com/prometheus/prometheus v0.0.0-20190710134608-e5b22494857d/go.mod h1:11Mk7Gzjuke9GloQr0K9Rltwvz4fGeuU7/YlzqcHCPE= +github.com/prometheus/tsdb v0.9.1 h1:IWaAmWkYlgG7/S4iw4IpAQt5Y35QaZM6/GsZ7GsjAuk= +github.com/prometheus/tsdb v0.9.1/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rlmcpherson/s3gof3r v0.5.0/go.mod h1:s7vv7SMDPInkitQMuZzH615G7yWHdrU2r/Go7Bo71Rs= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13 h1:4AQBn5RJY4WH8t8TLEMZUsWeXHAUcoao42TCAfpEJJE= github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.0.0-20161201235124-341000892f3d/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= +github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -325,6 +355,7 @@ github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQG github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -335,6 +366,7 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -344,6 +376,7 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -352,6 +385,7 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -378,19 +412,20 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20170424234030-8be79e1e0910 h1:bCMaBn7ph495H+x72gEvgcv+mDRd9dElbzo/mVCMxX4= -golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -429,14 +464,11 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20181213150558-05914d821849 h1:WZFcFPXmLR7g5CxQNmjWv0mg8qulJLxDghbzS4pQtzY= -k8s.io/api v0.0.0-20181213150558-05914d821849/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93 h1:tT6oQBi0qwLbbZSfDkdIsb23EwaLY85hoAV4SpXfdao= -k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v2.0.0-alpha.0.0.20181121191925-a47917edff34+incompatible h1:7JnS1I1KbtbearjSCrycUhHSob+KjG6HDWY1GhjkAIU= -k8s.io/client-go v2.0.0-alpha.0.0.20181121191925-a47917edff34+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/klog v0.1.0 h1:I5HMfc/DtuVaGR1KPwUrTc476K8NCqNBldC7H4dYEzk= -k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/katacoda.yaml b/katacoda.yaml new file mode 100644 index 0000000000..65f212bde9 --- /dev/null +++ b/katacoda.yaml @@ -0,0 +1 @@ +scenario_root : tutorials/katacoda diff --git a/pkg/objstore/s3/s3.go b/pkg/objstore/s3/s3.go index a0193fe8a5..3b1cb9cbcb 100644 --- a/pkg/objstore/s3/s3.go +++ b/pkg/objstore/s3/s3.go @@ -32,6 +32,10 @@ import ( // DirDelim is the delimiter used to model a directory structure in an object store bucket. const DirDelim = "/" +// Minimum file size after which an HTTP multipart request should be used to upload objects to storage. +// Set to 128 MiB as in the minio client. +const defaultMinPartSize = 1024 * 1024 * 128 + // Config stores the configuration for s3 bucket. type Config struct { Bucket string `yaml:"bucket"` @@ -45,6 +49,7 @@ type Config struct { PutUserMetadata map[string]string `yaml:"put_user_metadata"` HTTPConfig HTTPConfig `yaml:"http_config"` TraceConfig TraceConfig `yaml:"trace"` + PartSize uint64 `yaml:"part_size"` } type TraceConfig struct { @@ -65,6 +70,7 @@ type Bucket struct { client *minio.Client sse encrypt.ServerSide putUserMetadata map[string]string + partSize uint64 } // parseConfig unmarshals a buffer into a Config with default HTTPConfig values. @@ -81,6 +87,11 @@ func parseConfig(conf []byte) (Config, error) { if config.PutUserMetadata == nil { config.PutUserMetadata = make(map[string]string) } + + if config.PartSize == 0 { + config.PartSize = defaultMinPartSize + } + return config, nil } @@ -174,6 +185,7 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B client: client, sse: sse, putUserMetadata: config.PutUserMetadata, + partSize: config.PartSize, } return bkt, nil } @@ -308,6 +320,7 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { r, fileSize, minio.PutObjectOptions{ + PartSize: b.partSize, ServerSideEncryption: b.sse, UserMetadata: b.putUserMetadata, }, diff --git a/pkg/objstore/s3/s3_test.go b/pkg/objstore/s3/s3_test.go index fe8ad3ff53..1b9b75d457 100644 --- a/pkg/objstore/s3/s3_test.go +++ b/pkg/objstore/s3/s3_test.go @@ -100,3 +100,36 @@ http_config: testutil.Equals(t, "bucket-owner-full-control", cfg2.PutUserMetadata["X-Amz-Acl"]) } + +func TestParseConfig_PartSize(t *testing.T) { + input := []byte(`bucket: "bucket-name" +endpoint: "s3-endpoint" +access_key: "access_key" +insecure: false +signature_version2: false +encrypt_sse: false +secret_key: "secret_key" +http_config: + insecure_skip_verify: false + idle_conn_timeout: 50s`) + + cfg, err := parseConfig(input) + testutil.Ok(t, err) + testutil.Assert(t, cfg.PartSize == 1024*1024*128, "when part size not set it should default to 128MiB") + + input2 := []byte(`bucket: "bucket-name" +endpoint: "s3-endpoint" +access_key: "access_key" +insecure: false +signature_version2: false +encrypt_sse: false +secret_key: "secret_key" +part_size: 104857600 +http_config: + insecure_skip_verify: false + idle_conn_timeout: 50s`) + + cfg2, err := parseConfig(input2) + testutil.Ok(t, err) + testutil.Assert(t, cfg2.PartSize == 1024*1024*100, "when part size should be set to 100MiB") +} diff --git a/pkg/promclient/promclient.go b/pkg/promclient/promclient.go index 054d1efc9a..5eca23b64c 100644 --- a/pkg/promclient/promclient.go +++ b/pkg/promclient/promclient.go @@ -76,7 +76,7 @@ func ExternalLabels(ctx context.Context, logger log.Logger, base *url.URL) (labe } if resp.StatusCode != 200 { - return nil, errors.Errorf("is 'web.enable-admin-api' flag enabled? got non-200 response code: %v, response: %v", resp.StatusCode, string(b)) + return nil, errors.Errorf("got non-200 response code: %v, response: %v", resp.StatusCode, string(b)) } var d struct { @@ -242,7 +242,7 @@ func Snapshot(ctx context.Context, logger log.Logger, base *url.URL, skipHead bo } if resp.StatusCode != 200 { - return "", errors.Errorf("got non-200 response code: %v, response: %v", resp.StatusCode, string(b)) + return "", errors.Errorf("is 'web.enable-admin-api' flag enabled? got non-200 response code: %v, response: %v", resp.StatusCode, string(b)) } var d struct { diff --git a/pkg/query/api/v1.go b/pkg/query/api/v1.go index 7ec9e64f03..ad897a7333 100644 --- a/pkg/query/api/v1.go +++ b/pkg/query/api/v1.go @@ -23,13 +23,11 @@ import ( "math" "net/http" "strconv" - "sync" "time" "github.com/NYTimes/gziphandler" - "github.com/go-kit/kit/log" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -279,22 +277,12 @@ func (api *API) query(r *http.Request) (interface{}, []error, *ApiError) { return nil, nil, apiErr } - var ( - warnmtx sync.Mutex - warnings []error - ) - warningReporter := func(err error) { - warnmtx.Lock() - warnings = append(warnings, err) - warnmtx.Unlock() - } - // We are starting promQL tracing span here, because we have no control over promQL code. span, ctx := tracing.StartSpan(ctx, "promql_instant_query") defer span.Finish() begin := api.now() - qry, err := api.queryEngine.NewInstantQuery(api.queryableCreate(enableDedup, 0, enablePartialResponse, warningReporter), r.FormValue("query"), ts) + qry, err := api.queryEngine.NewInstantQuery(api.queryableCreate(enableDedup, 0, enablePartialResponse), r.FormValue("query"), ts) if err != nil { return nil, nil, &ApiError{errorBadData, err} } @@ -316,7 +304,7 @@ func (api *API) query(r *http.Request) (interface{}, []error, *ApiError) { return &queryData{ ResultType: res.Value.Type(), Result: res.Value, - }, warnings, nil + }, res.Warnings, nil } func (api *API) queryRange(r *http.Request) (interface{}, []error, *ApiError) { @@ -377,23 +365,13 @@ func (api *API) queryRange(r *http.Request) (interface{}, []error, *ApiError) { return nil, nil, apiErr } - var ( - warnmtx sync.Mutex - warnings []error - ) - warningReporter := func(err error) { - warnmtx.Lock() - warnings = append(warnings, err) - warnmtx.Unlock() - } - // We are starting promQL tracing span here, because we have no control over promQL code. span, ctx := tracing.StartSpan(ctx, "promql_range_query") defer span.Finish() begin := api.now() qry, err := api.queryEngine.NewRangeQuery( - api.queryableCreate(enableDedup, maxSourceResolution, enablePartialResponse, warningReporter), + api.queryableCreate(enableDedup, maxSourceResolution, enablePartialResponse), r.FormValue("query"), start, end, @@ -418,7 +396,7 @@ func (api *API) queryRange(r *http.Request) (interface{}, []error, *ApiError) { return &queryData{ ResultType: res.Value.Type(), Result: res.Value, - }, warnings, nil + }, res.Warnings, nil } func (api *API) labelValues(r *http.Request) (interface{}, []error, *ApiError) { @@ -434,17 +412,7 @@ func (api *API) labelValues(r *http.Request) (interface{}, []error, *ApiError) { return nil, nil, apiErr } - var ( - warnmtx sync.Mutex - warnings []error - ) - warningReporter := func(err error) { - warnmtx.Lock() - warnings = append(warnings, err) - warnmtx.Unlock() - } - - q, err := api.queryableCreate(true, 0, enablePartialResponse, warningReporter).Querier(ctx, math.MinInt64, math.MaxInt64) + q, err := api.queryableCreate(true, 0, enablePartialResponse).Querier(ctx, math.MinInt64, math.MaxInt64) if err != nil { return nil, nil, &ApiError{errorExec, err} } @@ -452,7 +420,7 @@ func (api *API) labelValues(r *http.Request) (interface{}, []error, *ApiError) { // TODO(fabxc): add back request context. - vals, err := q.LabelValues(name) + vals, warnings, err := q.LabelValues(name) if err != nil { return nil, nil, &ApiError{errorExec, err} } @@ -515,42 +483,34 @@ func (api *API) series(r *http.Request) (interface{}, []error, *ApiError) { return nil, nil, apiErr } - var ( - warnmtx sync.Mutex - warnings []error - ) - warningReporter := func(err error) { - warnmtx.Lock() - warnings = append(warnings, err) - warnmtx.Unlock() - } - // TODO(bwplotka): Support downsampling? - q, err := api.queryableCreate(enableDedup, 0, enablePartialResponse, warningReporter).Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) + q, err := api.queryableCreate(enableDedup, 0, enablePartialResponse).Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { return nil, nil, &ApiError{errorExec, err} } defer runutil.CloseWithLogOnErr(api.logger, q, "queryable series") - var sets []storage.SeriesSet + var ( + warnings []error + metrics = []labels.Labels{} + sets []storage.SeriesSet + ) for _, mset := range matcherSets { - s, _, err := q.Select(&storage.SelectParams{}, mset...) + s, warns, err := q.Select(&storage.SelectParams{}, mset...) if err != nil { return nil, nil, &ApiError{errorExec, err} } + warnings = append(warnings, warns...) sets = append(sets, s) } set := storage.NewMergeSeriesSet(sets, nil) - - metrics := []labels.Labels{} for set.Next() { metrics = append(metrics, set.At().Labels()) } if set.Err() != nil { return nil, nil, &ApiError{errorExec, set.Err()} } - return metrics, warnings, nil } @@ -627,23 +587,13 @@ func (api *API) labelNames(r *http.Request) (interface{}, []error, *ApiError) { return nil, nil, apiErr } - var ( - warnmtx sync.Mutex - warnings []error - ) - warningReporter := func(err error) { - warnmtx.Lock() - warnings = append(warnings, err) - warnmtx.Unlock() - } - - q, err := api.queryableCreate(true, 0, enablePartialResponse, warningReporter).Querier(ctx, math.MinInt64, math.MaxInt64) + q, err := api.queryableCreate(true, 0, enablePartialResponse).Querier(ctx, math.MinInt64, math.MaxInt64) if err != nil { return nil, nil, &ApiError{errorExec, err} } defer runutil.CloseWithLogOnErr(api.logger, q, "queryable labelNames") - names, err := q.LabelNames() + names, warnings, err := q.LabelNames() if err != nil { return nil, nil, &ApiError{errorExec, err} } diff --git a/pkg/query/api/v1_test.go b/pkg/query/api/v1_test.go index 36fcf06644..f017a6ab0b 100644 --- a/pkg/query/api/v1_test.go +++ b/pkg/query/api/v1_test.go @@ -44,7 +44,7 @@ import ( ) func testQueryableCreator(queryable storage.Queryable) query.QueryableCreator { - return func(_ bool, _ int64, _ bool, _ query.WarningReporter) storage.Queryable { + return func(_ bool, _ int64, _ bool) storage.Queryable { return queryable } } diff --git a/pkg/query/iter.go b/pkg/query/iter.go index 0f16185e17..67710b207c 100644 --- a/pkg/query/iter.go +++ b/pkg/query/iter.go @@ -152,7 +152,7 @@ func (s *chunkSeries) Iterator() storage.SeriesIterator { } sit = newChunkSeriesIterator(its) default: - return errSeriesIterator{err: errors.Errorf("unexpected result aggreagte type %v", s.aggr)} + return errSeriesIterator{err: errors.Errorf("unexpected result aggregate type %v", s.aggr)} } return newBoundedSeriesIterator(sit, s.mint, s.maxt) } diff --git a/pkg/query/querier.go b/pkg/query/querier.go index df66401ff0..7ee7322414 100644 --- a/pkg/query/querier.go +++ b/pkg/query/querier.go @@ -13,22 +13,15 @@ import ( "github.com/thanos-io/thanos/pkg/tracing" ) -// WarningReporter allows to report warnings to frontend layer. -// -// Warning can include partial errors `partialResponse` is enabled. It occurs when only part of the results are ready and -// another is not available because of the failure. -// It is required to be thread-safe. -type WarningReporter func(error) - // QueryableCreator returns implementation of promql.Queryable that fetches data from the proxy store API endpoints. // If deduplication is enabled, all data retrieved from it will be deduplicated along the replicaLabel by default. // maxResolutionMillis controls downsampling resolution that is allowed (specified in milliseconds). // partialResponse controls `partialResponseDisabled` option of StoreAPI and partial response behaviour of proxy. -type QueryableCreator func(deduplicate bool, maxResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable +type QueryableCreator func(deduplicate bool, maxResolutionMillis int64, partialResponse bool) storage.Queryable // NewQueryableCreator creates QueryableCreator. func NewQueryableCreator(logger log.Logger, proxy storepb.StoreServer, replicaLabel string) QueryableCreator { - return func(deduplicate bool, maxResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable { + return func(deduplicate bool, maxResolutionMillis int64, partialResponse bool) storage.Queryable { return &queryable{ logger: logger, replicaLabel: replicaLabel, @@ -36,7 +29,6 @@ func NewQueryableCreator(logger log.Logger, proxy storepb.StoreServer, replicaLa deduplicate: deduplicate, maxResolutionMillis: maxResolutionMillis, partialResponse: partialResponse, - warningReporter: r, } } } @@ -48,12 +40,11 @@ type queryable struct { deduplicate bool maxResolutionMillis int64 partialResponse bool - warningReporter WarningReporter } // Querier returns a new storage querier against the underlying proxy store API. func (q *queryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxResolutionMillis), q.partialResponse, q.warningReporter), nil + return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxResolutionMillis), q.partialResponse), nil } type querier struct { @@ -66,7 +57,6 @@ type querier struct { deduplicate bool maxResolutionMillis int64 partialResponse bool - warningReporter WarningReporter } // newQuerier creates implementation of storage.Querier that fetches data from the proxy @@ -80,14 +70,10 @@ func newQuerier( deduplicate bool, maxResolutionMillis int64, partialResponse bool, - warningReporter WarningReporter, ) *querier { if logger == nil { logger = log.NewNopLogger() } - if warningReporter == nil { - warningReporter = func(error) {} - } ctx, cancel := context.WithCancel(ctx) return &querier{ ctx: ctx, @@ -100,7 +86,6 @@ func newQuerier( deduplicate: deduplicate, maxResolutionMillis: maxResolutionMillis, partialResponse: partialResponse, - warningReporter: warningReporter, } } @@ -191,10 +176,9 @@ func (q *querier) Select(params *storage.SelectParams, ms ...*labels.Matcher) (s return nil, nil, errors.Wrap(err, "proxy Series()") } + var warns storage.Warnings for _, w := range resp.warnings { - // NOTE(bwplotka): We could use warnings return arguments here, however need reporter anyway for LabelValues and LabelNames method, - // so we choose to be consistent and keep reporter. - q.warningReporter(errors.New(w)) + warns = append(warns, errors.New(w)) } if !q.isDedupEnabled() { @@ -204,7 +188,7 @@ func (q *querier) Select(params *storage.SelectParams, ms ...*labels.Matcher) (s maxt: q.maxt, set: newStoreSeriesSet(resp.seriesSet), aggr: resAggr, - }, nil, nil + }, warns, nil } // TODO(fabxc): this could potentially pushed further down into the store API @@ -221,7 +205,7 @@ func (q *querier) Select(params *storage.SelectParams, ms ...*labels.Matcher) (s // The merged series set assembles all potentially-overlapping time ranges // of the same series into a single one. The series are ordered so that equal series // from different replicas are sequential. We can now deduplicate those. - return newDedupSeriesSet(set, q.replicaLabel), nil, nil + return newDedupSeriesSet(set, q.replicaLabel), warns, nil } // sortDedupLabels resorts the set so that the same series with different replica @@ -247,37 +231,39 @@ func sortDedupLabels(set []storepb.Series, replicaLabel string) { } // LabelValues returns all potential values for a label name. -func (q *querier) LabelValues(name string) ([]string, error) { +func (q *querier) LabelValues(name string) ([]string, storage.Warnings, error) { span, ctx := tracing.StartSpan(q.ctx, "querier_label_values") defer span.Finish() resp, err := q.proxy.LabelValues(ctx, &storepb.LabelValuesRequest{Label: name, PartialResponseDisabled: !q.partialResponse}) if err != nil { - return nil, errors.Wrap(err, "proxy LabelValues()") + return nil, nil, errors.Wrap(err, "proxy LabelValues()") } + var warns storage.Warnings for _, w := range resp.Warnings { - q.warningReporter(errors.New(w)) + warns = append(warns, errors.New(w)) } - return resp.Values, nil + return resp.Values, warns, nil } // LabelNames returns all the unique label names present in the block in sorted order. -func (q *querier) LabelNames() ([]string, error) { +func (q *querier) LabelNames() ([]string, storage.Warnings, error) { span, ctx := tracing.StartSpan(q.ctx, "querier_label_names") defer span.Finish() resp, err := q.proxy.LabelNames(ctx, &storepb.LabelNamesRequest{PartialResponseDisabled: !q.partialResponse}) if err != nil { - return nil, errors.Wrap(err, "proxy LabelNames()") + return nil, nil, errors.Wrap(err, "proxy LabelNames()") } + var warns storage.Warnings for _, w := range resp.Warnings { - q.warningReporter(errors.New(w)) + warns = append(warns, errors.New(w)) } - return resp.Names, nil + return resp.Names, warns, nil } func (q *querier) Close() error { diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index 56c9411631..bf82c97049 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -28,7 +28,7 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { queryableCreator := NewQueryableCreator(nil, testProxy, "test") oneHourMillis := int64(1*time.Hour) / int64(time.Millisecond) - queryable := queryableCreator(false, oneHourMillis, false, func(err error) {}) + queryable := queryableCreator(false, oneHourMillis, false) q, err := queryable.Querier(context.Background(), 0, 42) testutil.Ok(t, err) @@ -55,7 +55,7 @@ func TestQuerier_DownsampledData(t *testing.T) { }, } - q := NewQueryableCreator(nil, testProxy, "")(false, 9999999, false, nil) + q := NewQueryableCreator(nil, testProxy, "")(false, 9999999, false) engine := promql.NewEngine( promql.EngineOpts{ @@ -172,7 +172,7 @@ func TestQuerier_Series(t *testing.T) { // Querier clamps the range to [1,300], which should drop some samples of the result above. // The store API allows endpoints to send more data then initially requested. - q := newQuerier(context.Background(), nil, 1, 300, "", testProxy, false, 0, true, nil) + q := newQuerier(context.Background(), nil, 1, 300, "", testProxy, false, 0, true) defer func() { testutil.Ok(t, q.Close()) }() res, _, err := q.Select(&storage.SelectParams{}) diff --git a/pkg/receive/config.go b/pkg/receive/config.go index 7fcd2c7c5b..2a6c6d2686 100644 --- a/pkg/receive/config.go +++ b/pkg/receive/config.go @@ -2,6 +2,8 @@ package receive import ( "context" + "crypto/md5" + "encoding/binary" "encoding/json" "io/ioutil" "os" @@ -33,9 +35,14 @@ type ConfigWatcher struct { logger log.Logger watcher *fsnotify.Watcher - changesCounter prometheus.Counter - errorCounter prometheus.Counter - refreshCounter prometheus.Counter + hashGauge prometheus.Gauge + successGauge prometheus.Gauge + lastSuccessTimeGauge prometheus.Gauge + changesCounter prometheus.Counter + errorCounter prometheus.Counter + refreshCounter prometheus.Counter + hashringNodesGauge *prometheus.GaugeVec + hashringTenantsGauge *prometheus.GaugeVec // last is the last known configuration. last []HashringConfig @@ -60,6 +67,21 @@ func NewConfigWatcher(logger log.Logger, r prometheus.Registerer, path string, i interval: time.Duration(interval), logger: logger, watcher: watcher, + hashGauge: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "thanos_receive_config_hash", + Help: "Hash of the currently loaded hashring configuration file.", + }), + successGauge: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "thanos_receive_config_last_reload_successful", + Help: "Whether the last hashring configuration file reload attempt was successful.", + }), + lastSuccessTimeGauge: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "thanos_receive_config_last_reload_success_timestamp_seconds", + Help: "Timestamp of the last successful hashring configuration file reload.", + }), changesCounter: prometheus.NewCounter( prometheus.CounterOpts{ Name: "thanos_receive_hashrings_file_changes_total", @@ -75,13 +97,30 @@ func NewConfigWatcher(logger log.Logger, r prometheus.Registerer, path string, i Name: "thanos_receive_hashrings_file_refreshes_total", Help: "The number of refreshes of the hashrings configuration file.", }), + hashringNodesGauge: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "thanos_receive_hashring_nodes", + Help: "The number of nodes per hashring.", + }, + []string{"name"}), + hashringTenantsGauge: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "thanos_receive_hashring_tenants", + Help: "The number of tenants per hashring.", + }, + []string{"name"}), } if r != nil { r.MustRegister( + c.hashGauge, + c.successGauge, + c.lastSuccessTimeGauge, c.changesCounter, c.errorCounter, c.refreshCounter, + c.hashringNodesGauge, + c.hashringTenantsGauge, ) } @@ -132,8 +171,13 @@ func (cw *ConfigWatcher) Run(ctx context.Context) { } } -// readFile reads the configured file and returns a configuration. -func (cw *ConfigWatcher) readFile() ([]HashringConfig, error) { +// C returns a chan that gets hashring configuration updates. +func (cw *ConfigWatcher) C() <-chan []HashringConfig { + return cw.ch +} + +// readFile reads the configured file and returns content of configuration file. +func (cw *ConfigWatcher) readFile() ([]byte, error) { fd, err := os.Open(cw.path) if err != nil { return nil, err @@ -144,26 +188,33 @@ func (cw *ConfigWatcher) readFile() ([]HashringConfig, error) { } }() - content, err := ioutil.ReadAll(fd) - if err != nil { - return nil, err - } + return ioutil.ReadAll(fd) +} +// loadConfig loads raw configuration content and returns a configuration. +func (cw *ConfigWatcher) loadConfig(content []byte) ([]HashringConfig, error) { var config []HashringConfig - err = json.Unmarshal(content, &config) + err := json.Unmarshal(content, &config) return config, err } // refresh reads the configured file and sends the hashring configuration on the channel. func (cw *ConfigWatcher) refresh(ctx context.Context) { cw.refreshCounter.Inc() - config, err := cw.readFile() + cfgContent, err := cw.readFile() if err != nil { cw.errorCounter.Inc() level.Error(cw.logger).Log("msg", "failed to read configuration file", "err", err, "path", cw.path) return } + config, err := cw.loadConfig(cfgContent) + if err != nil { + cw.errorCounter.Inc() + level.Error(cw.logger).Log("msg", "failed to load configuration file", "err", err, "path", cw.path) + return + } + // If there was no change to the configuration, return early. if reflect.DeepEqual(cw.last, config) { return @@ -171,6 +222,14 @@ func (cw *ConfigWatcher) refresh(ctx context.Context) { cw.changesCounter.Inc() // Save the last known configuration. cw.last = config + cw.successGauge.Set(1) + cw.lastSuccessTimeGauge.Set(float64(time.Now().Unix())) + cw.hashGauge.Set(hashAsMetricValue(cfgContent)) + + for _, c := range config { + cw.hashringNodesGauge.WithLabelValues(c.Hashring).Set(float64(len(c.Endpoints))) + cw.hashringTenantsGauge.WithLabelValues(c.Hashring).Set(float64(len(c.Tenants))) + } select { case <-ctx.Done(): @@ -207,7 +266,12 @@ func (cw *ConfigWatcher) stop() { level.Debug(cw.logger).Log("msg", "hashring configuration watcher stopped") } -// C returns a chan that gets hashring configuration updates. -func (cw *ConfigWatcher) C() <-chan []HashringConfig { - return cw.ch +// hashAsMetricValue generates metric value from hash of data. +func hashAsMetricValue(data []byte) float64 { + sum := md5.Sum(data) + // We only want 48 bits as a float64 only has a 53 bit mantissa. + smallSum := sum[0:6] + var bytes = make([]byte, 8) + copy(bytes, smallSum) + return float64(binary.LittleEndian.Uint64(bytes)) } diff --git a/pkg/receive/handler.go b/pkg/receive/handler.go index 5e16bdd3ba..36c04a0faf 100644 --- a/pkg/receive/handler.go +++ b/pkg/receive/handler.go @@ -9,6 +9,7 @@ import ( "net" "net/http" "strconv" + "sync" "sync/atomic" "github.com/go-kit/kit/log" @@ -46,10 +47,12 @@ type Handler struct { logger log.Logger receiver *Writer router *route.Router - hashring Hashring options *Options listener net.Listener + mtx sync.RWMutex + hashring Hashring + // Metrics requestDuration *prometheus.HistogramVec requestsTotal *prometheus.CounterVec @@ -57,8 +60,7 @@ type Handler struct { forwardRequestsTotal *prometheus.CounterVec // These fields are uint32 rather than boolean to be able to use atomic functions. - storageReady uint32 - hashringReady uint32 + storageReady uint32 } func NewHandler(logger log.Logger, o *Options) *Handler { @@ -140,20 +142,19 @@ func (h *Handler) StorageReady() { // Hashring sets the hashring for the handler and marks the hashring as ready. // If the hashring is nil, then the hashring is marked as not ready. func (h *Handler) Hashring(hashring Hashring) { - if hashring == nil { - atomic.StoreUint32(&h.hashringReady, 0) - h.hashring = nil - return - } + h.mtx.Lock() + defer h.mtx.Unlock() + h.hashring = hashring - atomic.StoreUint32(&h.hashringReady, 1) } // Verifies whether the server is ready or not. func (h *Handler) isReady() bool { sr := atomic.LoadUint32(&h.storageReady) - hr := atomic.LoadUint32(&h.hashringReady) - return sr > 0 && hr > 0 + h.mtx.RLock() + hr := h.hashring != nil + h.mtx.RUnlock() + return sr > 0 && hr } // Checks if server is ready, calls f if it is, returns 503 if it is not. @@ -259,7 +260,7 @@ func (h *Handler) receive(w http.ResponseWriter, r *http.Request) { // destined for the local node will be written to the receiver. // Time series will be replicated as necessary. if err := h.forward(r.Context(), tenant, rep, &wreq); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + http.Error(w, err.Error(), http.StatusInternalServerError) return } } @@ -275,6 +276,15 @@ func (h *Handler) receive(w http.ResponseWriter, r *http.Request) { func (h *Handler) forward(ctx context.Context, tenant string, r replica, wreq *prompb.WriteRequest) error { wreqs := make(map[string]*prompb.WriteRequest) replicas := make(map[string]replica) + + // It is possible that hashring is ready in testReady() but unready now, + // so need to lock here. + h.mtx.RLock() + if h.hashring == nil { + h.mtx.RUnlock() + return errors.New("hashring is not ready") + } + // Batch all of the time series in the write request // into several smaller write requests that are // grouped by target endpoint. This ensures that @@ -285,6 +295,7 @@ func (h *Handler) forward(ctx context.Context, tenant string, r replica, wreq *p for i := range wreq.Timeseries { endpoint, err := h.hashring.GetN(tenant, &wreq.Timeseries[i], r.n) if err != nil { + h.mtx.RUnlock() return err } if _, ok := wreqs[endpoint]; !ok { @@ -294,6 +305,7 @@ func (h *Handler) forward(ctx context.Context, tenant string, r replica, wreq *p wr := wreqs[endpoint] wr.Timeseries = append(wr.Timeseries, wreq.Timeseries[i]) } + h.mtx.RUnlock() return h.parallelizeRequests(ctx, tenant, replicas, wreqs) } @@ -329,7 +341,11 @@ func (h *Handler) parallelizeRequests(ctx context.Context, tenant string, replic // can be ignored if the replication factor is met. if endpoint == h.options.Endpoint { go func(endpoint string) { - ec <- h.receiver.Receive(wreqs[endpoint]) + err := h.receiver.Receive(wreqs[endpoint]) + if err != nil { + level.Error(h.logger).Log("msg", "storing locally", "err", err, "endpoint", endpoint) + } + ec <- err }(endpoint) continue } @@ -337,13 +353,13 @@ func (h *Handler) parallelizeRequests(ctx context.Context, tenant string, replic go func(endpoint string) { buf, err := proto.Marshal(wreqs[endpoint]) if err != nil { - level.Error(h.logger).Log("msg", "proto marshal error", "err", err, "endpoint", endpoint) + level.Error(h.logger).Log("msg", "marshaling proto", "err", err, "endpoint", endpoint) ec <- err return } req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(snappy.Encode(nil, buf))) if err != nil { - level.Error(h.logger).Log("msg", "create request error", "err", err, "endpoint", endpoint) + level.Error(h.logger).Log("msg", "creating request", "err", err, "endpoint", endpoint) ec <- err return } @@ -365,12 +381,14 @@ func (h *Handler) parallelizeRequests(ctx context.Context, tenant string, replic var res *http.Response res, err = http.DefaultClient.Do(req.WithContext(ctx)) if err != nil { - level.Error(h.logger).Log("msg", "forward request error", "err", err, "endpoint", endpoint) + level.Error(h.logger).Log("msg", "forwarding request", "err", err, "endpoint", endpoint) ec <- err return } if res.StatusCode != http.StatusOK { - ec <- errors.New(res.Status) + err = errors.New(res.Status) + level.Error(h.logger).Log("msg", "forwarding returned non-200 status", "err", err, "endpoint", endpoint) + ec <- err return } ec <- nil @@ -400,14 +418,25 @@ func (h *Handler) replicate(ctx context.Context, tenant string, wreq *prompb.Wri wreqs := make(map[string]*prompb.WriteRequest) replicas := make(map[string]replica) var i uint64 + + // It is possible that hashring is ready in testReady() but unready now, + // so need to lock here. + h.mtx.RLock() + if h.hashring == nil { + h.mtx.RUnlock() + return errors.New("hashring is not ready") + } + for i = 0; i < h.options.ReplicationFactor; i++ { endpoint, err := h.hashring.GetN(tenant, &wreq.Timeseries[0], i) if err != nil { + h.mtx.RUnlock() return err } wreqs[endpoint] = wreq replicas[endpoint] = replica{i, true} } + h.mtx.RUnlock() err := h.parallelizeRequests(ctx, tenant, replicas, wreqs) if errs, ok := err.(terrors.MultiError); ok { diff --git a/pkg/rule/api/v1_test.go b/pkg/rule/api/v1_test.go index a556a952dd..1886a632f8 100644 --- a/pkg/rule/api/v1_test.go +++ b/pkg/rule/api/v1_test.go @@ -79,6 +79,7 @@ func (m rulesRetrieverMock) AlertingRules() []thanosrule.AlertingRule { time.Second, labels.Labels{}, labels.Labels{}, + labels.Labels{}, true, log.NewNopLogger(), ) @@ -88,6 +89,7 @@ func (m rulesRetrieverMock) AlertingRules() []thanosrule.AlertingRule { time.Second, labels.Labels{}, labels.Labels{}, + labels.Labels{}, true, log.NewNopLogger(), ) diff --git a/pkg/rule/rule.go b/pkg/rule/rule.go index d338a5c7e6..cde014310a 100644 --- a/pkg/rule/rule.go +++ b/pkg/rule/rule.go @@ -14,7 +14,7 @@ import ( "github.com/prometheus/prometheus/rules" tsdberrors "github.com/prometheus/tsdb/errors" "github.com/thanos-io/thanos/pkg/store/storepb" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" ) const tmpRuleDir = ".tmp-rules" @@ -112,7 +112,7 @@ func (r RuleGroup) MarshalYAML() (interface{}, error) { // special field in RuleGroup file. func (m *Managers) Update(dataDir string, evalInterval time.Duration, files []string) error { var ( - errs tsdberrors.MultiError + errs = tsdberrors.MultiError{} filesMap = map[storepb.PartialResponseStrategy][]string{} ) @@ -174,7 +174,8 @@ func (m *Managers) Update(dataDir string, evalInterval time.Duration, files []st errs = append(errs, errors.Errorf("no updater found for %v", s)) continue } - if err := updater.Update(evalInterval, fs); err != nil { + // We add external labels in `pkg/alert.Queue`. + if err := updater.Update(evalInterval, fs, nil); err != nil { errs = append(errs, err) continue } diff --git a/tutorials/kubernetes-demo/manifests/thanos-compactor.yaml b/tutorials/kubernetes-demo/manifests/thanos-compactor.yaml index f3174b00a6..0e0d07fa9f 100644 --- a/tutorials/kubernetes-demo/manifests/thanos-compactor.yaml +++ b/tutorials/kubernetes-demo/manifests/thanos-compactor.yaml @@ -35,6 +35,14 @@ spec: ports: - name: http containerPort: 10902 + livenessProbe: + httpGet: + port: 10902 + path: /-/healthy + readinessProbe: + httpGet: + port: 10902 + path: /-/ready resources: limits: cpu: "1" diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod new file mode 100644 index 0000000000..a70670ae21 --- /dev/null +++ b/vendor/github.com/alecthomas/template/go.mod @@ -0,0 +1 @@ +module github.com/alecthomas/template diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go deleted file mode 100644 index a00ba1eb83..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promauto provides constructors for the usual Prometheus metrics that -// return them already registered with the global registry -// (prometheus.DefaultRegisterer). This allows very compact code, avoiding any -// references to the registry altogether, but all the constructors in this -// package will panic if the registration fails. -// -// The following example is a complete program to create a histogram of normally -// distributed random numbers from the math/rand package: -// -// package main -// -// import ( -// "math/rand" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// -// func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } -// } -// -// func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// Prometheus's version of a minimal hello-world program: -// -// package main -// -// import ( -// "fmt" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// func main() { -// http.Handle("/", promhttp.InstrumentHandlerCounter( -// promauto.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hello_requests_total", -// Help: "Total number of hello-world requests by HTTP code.", -// }, -// []string{"code"}, -// ), -// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprint(w, "Hello, world!") -// }), -// )) -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// This appears very handy. So why are these constructors locked away in a -// separate package? There are two caveats: -// -// First, in more complex programs, global state is often quite problematic. -// That's the reason why the metrics constructors in the prometheus package do -// not interact with the global prometheus.DefaultRegisterer on their own. You -// are free to use the Register or MustRegister functions to register them with -// the global prometheus.DefaultRegisterer, but you could as well choose a local -// Registerer (usually created with prometheus.NewRegistry, but there are other -// scenarios, e.g. testing). -// -// The second issue is that registration may fail, e.g. if a metric inconsistent -// with the newly to be registered one is already registered. But how to signal -// and handle a panic in the automatic registration with the default registry? -// The only way is panicking. While panicking on invalid input provided by the -// programmer is certainly fine, things are a bit more subtle in this case: You -// might just add another package to the program, and that package (in its init -// function) happens to register a metric with the same name as your code. Now, -// all of a sudden, either your code or the code of the newly imported package -// panics, depending on initialization order, without any opportunity to handle -// the case gracefully. Even worse is a scenario where registration happens -// later during the runtime (e.g. upon loading some kind of plugin), where the -// panic could be triggered long after the code has been deployed to -// production. A possibility to panic should be explicitly called out by the -// Must… idiom, cf. prometheus.MustRegister. But adding a separate set of -// constructors in the prometheus package called MustRegisterNewCounterVec or -// similar would be quite unwieldy. Adding an extra MustRegister method to each -// metric, returning the registered metric, would result in nice code for those -// using the method, but would pollute every single metric interface for -// everybody avoiding the global registry. -// -// To address both issues, the problematic auto-registering and possibly -// panicking constructors are all in this package with a clear warning -// ahead. And whoever cares about avoiding global state and possibly panicking -// function calls can simply ignore the existence of the promauto package -// altogether. -// -// A final note: There is a similar case in the net/http package of the standard -// library. It has DefaultServeMux as a global instance of ServeMux, and the -// Handle function acts on it, panicking if a handler for the same pattern has -// already been registered. However, one might argue that the whole HTTP routing -// is usually set up closely together in the same package or file, while -// Prometheus metrics tend to be spread widely over the codebase, increasing the -// chance of surprising registration failures. Furthermore, the use of global -// state in net/http has been criticized widely, and some avoid it altogether. -package promauto - -import "github.com/prometheus/client_golang/prometheus" - -// NewCounter works like the function of the same name in the prometheus package -// but it automatically registers the Counter with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. -func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { - c := prometheus.NewCounter(opts) - prometheus.MustRegister(c) - return c -} - -// NewCounterVec works like the function of the same name in the prometheus -// package but it automatically registers the CounterVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec -// panics. -func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { - c := prometheus.NewCounterVec(opts, labelNames) - prometheus.MustRegister(c) - return c -} - -// NewCounterFunc works like the function of the same name in the prometheus -// package but it automatically registers the CounterFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc -// panics. -func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { - g := prometheus.NewCounterFunc(opts, function) - prometheus.MustRegister(g) - return g -} - -// NewGauge works like the function of the same name in the prometheus package -// but it automatically registers the Gauge with the -// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. -func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { - g := prometheus.NewGauge(opts) - prometheus.MustRegister(g) - return g -} - -// NewGaugeVec works like the function of the same name in the prometheus -// package but it automatically registers the GaugeVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. -func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { - g := prometheus.NewGaugeVec(opts, labelNames) - prometheus.MustRegister(g) - return g -} - -// NewGaugeFunc works like the function of the same name in the prometheus -// package but it automatically registers the GaugeFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. -func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { - g := prometheus.NewGaugeFunc(opts, function) - prometheus.MustRegister(g) - return g -} - -// NewSummary works like the function of the same name in the prometheus package -// but it automatically registers the Summary with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. -func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { - s := prometheus.NewSummary(opts) - prometheus.MustRegister(s) - return s -} - -// NewSummaryVec works like the function of the same name in the prometheus -// package but it automatically registers the SummaryVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec -// panics. -func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { - s := prometheus.NewSummaryVec(opts, labelNames) - prometheus.MustRegister(s) - return s -} - -// NewHistogram works like the function of the same name in the prometheus -// package but it automatically registers the Histogram with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. -func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { - h := prometheus.NewHistogram(opts) - prometheus.MustRegister(h) - return h -} - -// NewHistogramVec works like the function of the same name in the prometheus -// package but it automatically registers the HistogramVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec -// panics. -func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { - h := prometheus.NewHistogramVec(opts, labelNames) - prometheus.MustRegister(h) - return h -} diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index be7cb698a0..c1cfd74d94 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -135,8 +135,9 @@ func NewTimestampCollector() *TimestampCollector { var ( fileSDScanDuration = prometheus.NewSummary( prometheus.SummaryOpts{ - Name: "prometheus_sd_file_scan_duration_seconds", - Help: "The duration of the File-SD scan in seconds.", + Name: "prometheus_sd_file_scan_duration_seconds", + Help: "The duration of the File-SD scan in seconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) fileSDReadErrorsCount = prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go index 435f871696..2d79dbe759 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go @@ -131,44 +131,46 @@ func (ls Labels) Hash() uint64 { } // HashForLabels returns a hash value for the labels matching the provided names. -func (ls Labels) HashForLabels(names ...string) uint64 { - b := make([]byte, 0, 1024) - - for _, v := range ls { - for _, n := range names { - if v.Name == n { - b = append(b, v.Name...) - b = append(b, sep) - b = append(b, v.Value...) - b = append(b, sep) - break - } +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + i, j := 0, 0 + for i < len(ls) && j < len(names) { + if names[j] < ls[i].Name { + j++ + } else if ls[i].Name < names[j] { + i++ + } else { + b = append(b, ls[i].Name...) + b = append(b, sep) + b = append(b, ls[i].Value...) + b = append(b, sep) + i++ + j++ } } - return xxhash.Sum64(b) + return xxhash.Sum64(b), b } // HashWithoutLabels returns a hash value for all labels except those matching // the provided names. -func (ls Labels) HashWithoutLabels(names ...string) uint64 { - b := make([]byte, 0, 1024) - -Outer: - for _, v := range ls { - if v.Name == MetricName { - continue +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ } - for _, n := range names { - if v.Name == n { - continue Outer - } + if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) { + continue } - b = append(b, v.Name...) + b = append(b, ls[i].Name...) b = append(b, sep) - b = append(b, v.Value...) + b = append(b, ls[i].Value...) b = append(b, sep) } - return xxhash.Sum64(b) + return xxhash.Sum64(b), b } // Copy returns a copy of the labels. @@ -292,6 +294,13 @@ func NewBuilder(base Labels) *Builder { } } +// Reset clears all current state for the builder +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] +} + // Del deletes the label of the given name. func (b *Builder) Del(ns ...string) *Builder { for _, n := range ns { diff --git a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go index 60faee085e..b89470d38a 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go @@ -16,6 +16,7 @@ package rulefmt import ( "context" "io/ioutil" + "strings" "time" "github.com/pkg/errors" @@ -155,12 +156,16 @@ func testTemplateParsing(rl *Rule) (errs []error) { } // Trying to parse templates. - tmplData := template.AlertTemplateData(make(map[string]string), 0) - defs := "{{$labels := .Labels}}{{$value := .Value}}" + tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0) + defs := []string{ + "{{$labels := .Labels}}", + "{{$externalLabels := .ExternalLabels}}", + "{{$value := .Value}}", + } parseTest := func(text string) error { tmpl := template.NewTemplateExpander( context.TODO(), - defs+text, + strings.Join(append(defs, text), ""), "__alert_"+rl.Alert, tmplData, model.Time(timestamp.FromTime(time.Now())), diff --git a/vendor/github.com/prometheus/prometheus/promql/ast.go b/vendor/github.com/prometheus/prometheus/promql/ast.go index e93cac2b6f..3cc699aa31 100644 --- a/vendor/github.com/prometheus/prometheus/promql/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/ast.go @@ -323,5 +323,6 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { + //nolint: errcheck Walk(inspector(f), node, nil) } diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 283538ccd5..ebba3eaf9e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -243,6 +243,7 @@ func NewEngine(opts EngineOpts) *Engine { Name: "query_duration_seconds", Help: "Query timings", ConstLabels: prometheus.Labels{"slice": "queue_time"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }), queryPrepareTime: prometheus.NewSummary(prometheus.SummaryOpts{ Namespace: namespace, @@ -250,6 +251,7 @@ func NewEngine(opts EngineOpts) *Engine { Name: "query_duration_seconds", Help: "Query timings", ConstLabels: prometheus.Labels{"slice": "prepare_time"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }), queryInnerEval: prometheus.NewSummary(prometheus.SummaryOpts{ Namespace: namespace, @@ -257,6 +259,7 @@ func NewEngine(opts EngineOpts) *Engine { Name: "query_duration_seconds", Help: "Query timings", ConstLabels: prometheus.Labels{"slice": "inner_eval"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }), queryResultSort: prometheus.NewSummary(prometheus.SummaryOpts{ Namespace: namespace, @@ -264,6 +267,7 @@ func NewEngine(opts EngineOpts) *Engine { Name: "query_duration_seconds", Help: "Query timings", ConstLabels: prometheus.Labels{"slice": "result_sort"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }), } metrics.maxConcurrentQueries.Set(float64(opts.MaxConcurrent)) @@ -1519,13 +1523,17 @@ func (ev *evaluator) VectorBinop(op ItemType, lhs, rhs Vector, matching *VectorM // signatureFunc returns a function that calculates the signature for a metric // ignoring the provided labels. If on, then the given labels are only used instead. func signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { - // TODO(fabxc): ensure names are sorted and then use that and sortedness - // of labels by names to speed up the operations below. - // Alternatively, inline the hashing and don't build new label sets. + sort.Strings(names) if on { - return func(lset labels.Labels) uint64 { return lset.HashForLabels(names...) } + return func(lset labels.Labels) uint64 { + h, _ := lset.HashForLabels(make([]byte, 0, 1024), names...) + return h + } + } + return func(lset labels.Labels) uint64 { + h, _ := lset.HashWithoutLabels(make([]byte, 0, 1024), names...) + return h } - return func(lset labels.Labels) uint64 { return lset.HashWithoutLabels(names...) } } // resultMetric returns the metric for the given sample(s) based on the Vector @@ -1718,11 +1726,14 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p } } + sort.Strings(grouping) + lb := labels.NewBuilder(nil) + buf := make([]byte, 0, 1024) for _, s := range vec { metric := s.Metric if op == ItemCountValues { - lb := labels.NewBuilder(metric) + lb.Reset(metric) lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) metric = lb.Labels() } @@ -1731,9 +1742,9 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p groupingKey uint64 ) if without { - groupingKey = metric.HashWithoutLabels(grouping...) + groupingKey, buf = metric.HashWithoutLabels(buf, grouping...) } else { - groupingKey = metric.HashForLabels(grouping...) + groupingKey, buf = metric.HashForLabels(buf, grouping...) } group, ok := result[groupingKey] @@ -1742,7 +1753,7 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p var m labels.Labels if without { - lb := labels.NewBuilder(metric) + lb.Reset(metric) lb.Del(grouping...) lb.Del(labels.MetricName) m = lb.Labels() diff --git a/vendor/github.com/prometheus/prometheus/promql/parse.go b/vendor/github.com/prometheus/prometheus/promql/parse.go index 2a055fbbb1..4bb4a11c6f 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parse.go @@ -326,17 +326,15 @@ var errUnexpected = errors.New("unexpected error") // recover is the handler that turns panics into returns from the top level of Parse. func (p *parser) recover(errp *error) { e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - // Print the stack trace but do not inhibit the running application. - buf := make([]byte, 64<<10) - buf = buf[:runtime.Stack(buf, false)] - - fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf) - *errp = errUnexpected - } else { - *errp = e.(error) - } + if _, ok := e.(runtime.Error); ok { + // Print the stack trace but do not inhibit the running application. + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf) + *errp = errUnexpected + } else if e != nil { + *errp = e.(error) } p.lex.close() } diff --git a/vendor/github.com/prometheus/prometheus/promql/printer.go b/vendor/github.com/prometheus/prometheus/promql/printer.go index 05f6d114df..adcf2e699a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/printer.go +++ b/vendor/github.com/prometheus/prometheus/promql/printer.go @@ -179,8 +179,8 @@ func (node *UnaryExpr) String() string { func (node *VectorSelector) String() string { labelStrings := make([]string, 0, len(node.LabelMatchers)-1) for _, matcher := range node.LabelMatchers { - // Only include the __name__ label if its no equality matching. - if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual { + // Only include the __name__ label if its equality matching and matches the name. + if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name { continue } labelStrings = append(labelStrings, matcher.String()) diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index 7292d287b0..b66d6db88e 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "net/url" + "strings" "sync" "time" @@ -119,6 +120,8 @@ type AlertingRule struct { labels labels.Labels // Non-identifying key/value pairs. annotations labels.Labels + // External labels from the global config. + externalLabels map[string]string // true if old state has been restored. We start persisting samples for ALERT_FOR_STATE // only after the restoration. restored bool @@ -140,17 +143,27 @@ type AlertingRule struct { } // NewAlertingRule constructs a new AlertingRule. -func NewAlertingRule(name string, vec promql.Expr, hold time.Duration, lbls, anns labels.Labels, restored bool, logger log.Logger) *AlertingRule { +func NewAlertingRule( + name string, vec promql.Expr, hold time.Duration, + labels, annotations, externalLabels labels.Labels, + restored bool, logger log.Logger, +) *AlertingRule { + el := make(map[string]string, len(externalLabels)) + for _, lbl := range externalLabels { + el[lbl.Name] = lbl.Value + } + return &AlertingRule{ - name: name, - vector: vec, - holdDuration: hold, - labels: lbls, - annotations: anns, - health: HealthUnknown, - active: map[uint64]*Alert{}, - logger: logger, - restored: restored, + name: name, + vector: vec, + holdDuration: hold, + labels: labels, + annotations: annotations, + externalLabels: el, + health: HealthUnknown, + active: map[uint64]*Alert{}, + logger: logger, + restored: restored, } } @@ -305,15 +318,19 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, l[lbl.Name] = lbl.Value } - tmplData := template.AlertTemplateData(l, smpl.V) + tmplData := template.AlertTemplateData(l, r.externalLabels, smpl.V) // Inject some convenience variables that are easier to remember for users // who are not used to Go's templating system. - defs := "{{$labels := .Labels}}{{$value := .Value}}" + defs := []string{ + "{{$labels := .Labels}}", + "{{$externalLabels := .ExternalLabels}}", + "{{$value := .Value}}", + } expand := func(text string) string { tmpl := template.NewTemplateExpander( ctx, - defs+text, + strings.Join(append(defs, text), ""), "__alert_"+r.Name(), tmplData, model.Time(timestamp.FromTime(ts)), @@ -449,7 +466,7 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { } func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { - alerts := make([]*Alert, 0) + alerts := []*Alert{} r.ForEachActiveAlert(func(alert *Alert) { if alert.needsSending(ts, resendDelay) { alert.LastSentAt = ts diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 96766b2abf..e13e06bd71 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -38,7 +38,7 @@ import ( "github.com/prometheus/prometheus/storage" ) -// RuleHealth describes the health state of a target. +// RuleHealth describes the health state of a rule. type RuleHealth string // The possible health states of a rule based on the last execution. @@ -73,15 +73,16 @@ type Metrics struct { groupRules *prometheus.GaugeVec } -// NewGroupMetrics makes a new Metrics and registers them with then provided registerer, +// NewGroupMetrics makes a new Metrics and registers them with the provided registerer, // if not nil. func NewGroupMetrics(reg prometheus.Registerer) *Metrics { m := &Metrics{ evalDuration: prometheus.NewSummary( prometheus.SummaryOpts{ - Namespace: namespace, - Name: "rule_evaluation_duration_seconds", - Help: "The duration for a rule to execute.", + Namespace: namespace, + Name: "rule_evaluation_duration_seconds", + Help: "The duration for a rule to execute.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }), evalFailures: prometheus.NewCounter( prometheus.CounterOpts{ @@ -355,12 +356,44 @@ func (g *Group) stop() { func (g *Group) hash() uint64 { l := labels.New( - labels.Label{"name", g.name}, - labels.Label{"file", g.file}, + labels.Label{Name: "name", Value: g.name}, + labels.Label{Name: "file", Value: g.file}, ) return l.Hash() } +// AlertingRules returns the list of the group's alerting rules. +func (g *Group) AlertingRules() []*AlertingRule { + g.mtx.Lock() + defer g.mtx.Unlock() + + var alerts []*AlertingRule + for _, rule := range g.rules { + if alertingRule, ok := rule.(*AlertingRule); ok { + alerts = append(alerts, alertingRule) + } + } + sort.Slice(alerts, func(i, j int) bool { + return alerts[i].State() > alerts[j].State() || + (alerts[i].State() == alerts[j].State() && + alerts[i].Name() < alerts[j].Name()) + }) + return alerts +} + +// HasAlertingRules returns true if the group contains at least one AlertingRule. +func (g *Group) HasAlertingRules() bool { + g.mtx.Lock() + defer g.mtx.Unlock() + + for _, rule := range g.rules { + if _, ok := rule.(*AlertingRule); ok { + return true + } + } + return false +} + // GetEvaluationDuration returns the time in seconds it took to evaluate the rule group. func (g *Group) GetEvaluationDuration() time.Duration { g.mtx.Lock() @@ -751,11 +784,11 @@ func (m *Manager) Stop() { // Update the rule manager's state as the config requires. If // loading the new rules failed the old rule set is restored. -func (m *Manager) Update(interval time.Duration, files []string) error { +func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels) error { m.mtx.Lock() defer m.mtx.Unlock() - groups, errs := m.LoadGroups(interval, files...) + groups, errs := m.LoadGroups(interval, externalLabels, files...) if errs != nil { for _, e := range errs { level.Error(m.logger).Log("msg", "loading groups failed", "err", e) @@ -803,7 +836,9 @@ func (m *Manager) Update(interval time.Duration, files []string) error { } // LoadGroups reads groups from a list of files. -func (m *Manager) LoadGroups(interval time.Duration, filenames ...string) (map[string]*Group, []error) { +func (m *Manager) LoadGroups( + interval time.Duration, externalLabels labels.Labels, filenames ...string, +) (map[string]*Group, []error) { groups := make(map[string]*Group) shouldRestore := !m.restored @@ -834,6 +869,7 @@ func (m *Manager) LoadGroups(interval time.Duration, filenames ...string) (map[s time.Duration(r.For), labels.FromMap(r.Labels), labels.FromMap(r.Annotations), + externalLabels, m.restored, log.With(m.logger, "alert", r.Alert), )) @@ -868,7 +904,6 @@ func (m *Manager) RuleGroups() []*Group { rgs = append(rgs, g) } - // Sort rule groups by file, then by name. sort.Slice(rgs, func(i, j int) bool { if rgs[i].file != rgs[j].file { return rgs[i].file < rgs[j].file @@ -903,6 +938,7 @@ func (m *Manager) AlertingRules() []*AlertingRule { alerts = append(alerts, alertingRule) } } + return alerts } diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 81ae99dd6f..41ccff5a5f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -253,16 +253,28 @@ func (q *mergeQuerier) Select(params *SelectParams, matchers ...*labels.Matcher) } // LabelValues returns all potential values for a label name. -func (q *mergeQuerier) LabelValues(name string) ([]string, error) { +func (q *mergeQuerier) LabelValues(name string) ([]string, Warnings, error) { var results [][]string + var warnings Warnings for _, querier := range q.queriers { - values, err := querier.LabelValues(name) + values, wrn, err := querier.LabelValues(name) + + if wrn != nil { + warnings = append(warnings, wrn...) + } if err != nil { - return nil, err + q.failedQueriers[querier] = struct{}{} + // If the error source isn't the primary querier, return the error as a warning and continue. + if querier != q.primaryQuerier { + warnings = append(warnings, err) + continue + } else { + return nil, nil, err + } } results = append(results, values) } - return mergeStringSlices(results), nil + return mergeStringSlices(results), warnings, nil } func (q *mergeQuerier) IsFailedSet(set SeriesSet) bool { @@ -310,13 +322,25 @@ func mergeTwoStringSlices(a, b []string) []string { } // LabelNames returns all the unique label names present in the block in sorted order. -func (q *mergeQuerier) LabelNames() ([]string, error) { +func (q *mergeQuerier) LabelNames() ([]string, Warnings, error) { labelNamesMap := make(map[string]struct{}) + var warnings Warnings for _, b := range q.queriers { - names, err := b.LabelNames() + names, wrn, err := b.LabelNames() + if wrn != nil { + warnings = append(warnings, wrn...) + } + if err != nil { - return nil, errors.Wrap(err, "LabelNames() from Querier") + // If the error source isn't the primary querier, return the error as a warning and continue. + if b != q.primaryQuerier { + warnings = append(warnings, err) + continue + } else { + return nil, nil, errors.Wrap(err, "LabelNames() from Querier") + } } + for _, name := range names { labelNamesMap[name] = struct{}{} } @@ -328,7 +352,7 @@ func (q *mergeQuerier) LabelNames() ([]string, error) { } sort.Strings(labelNames) - return labelNames, nil + return labelNames, warnings, nil } // Close releases the resources of the Querier. diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index fea1c91668..688bad44c3 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -55,10 +55,10 @@ type Querier interface { Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error) // LabelValues returns all potential values for a label name. - LabelValues(name string) ([]string, error) + LabelValues(name string) ([]string, Warnings, error) // LabelNames returns all the unique label names present in the block in sorted order. - LabelNames() ([]string, error) + LabelNames() ([]string, Warnings, error) // Close releases the resources of the Querier. Close() error diff --git a/vendor/github.com/prometheus/prometheus/storage/noop.go b/vendor/github.com/prometheus/prometheus/storage/noop.go index 1c1fed4fed..acdd79f4b6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/noop.go +++ b/vendor/github.com/prometheus/prometheus/storage/noop.go @@ -30,12 +30,12 @@ func (noopQuerier) Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warning return NoopSeriesSet(), nil, nil } -func (noopQuerier) LabelValues(name string) ([]string, error) { - return nil, nil +func (noopQuerier) LabelValues(name string) ([]string, Warnings, error) { + return nil, nil, nil } -func (noopQuerier) LabelNames() ([]string, error) { - return nil, nil +func (noopQuerier) LabelNames() ([]string, Warnings, error) { + return nil, nil, nil } func (noopQuerier) Close() error { diff --git a/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go b/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go index 10d15f7067..42cb343536 100644 --- a/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go +++ b/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go @@ -130,6 +130,9 @@ type Options struct { // When true it disables the overlapping blocks check. // This in-turn enables vertical compaction and vertical query merge. AllowOverlappingBlocks bool + + // When true records in the WAL will be compressed. + WALCompression bool } var ( @@ -195,6 +198,7 @@ func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*t BlockRanges: rngs, NoLockfile: opts.NoLockfile, AllowOverlappingBlocks: opts.AllowOverlappingBlocks, + WALCompression: opts.WALCompression, }) if err != nil { return nil, err @@ -253,9 +257,15 @@ func (q querier) Select(_ *storage.SelectParams, oms ...*labels.Matcher) (storag return seriesSet{set: set}, nil, nil } -func (q querier) LabelValues(name string) ([]string, error) { return q.q.LabelValues(name) } -func (q querier) LabelNames() ([]string, error) { return q.q.LabelNames() } -func (q querier) Close() error { return q.q.Close() } +func (q querier) LabelValues(name string) ([]string, storage.Warnings, error) { + v, err := q.q.LabelValues(name) + return v, nil, err +} +func (q querier) LabelNames() ([]string, storage.Warnings, error) { + v, err := q.q.LabelNames() + return v, nil, err +} +func (q querier) Close() error { return q.q.Close() } type seriesSet struct { set tsdb.SeriesSet diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index ef6212780f..0eb0ed2be4 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -28,9 +28,9 @@ import ( text_template "text/template" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/strutil" ) @@ -243,6 +243,9 @@ func NewTemplateExpander( } return fmt.Sprintf("%.4g%ss", v, prefix) }, + "humanizePercentage": func(v float64) string { + return fmt.Sprintf("%.4g%%", v*100) + }, "humanizeTimestamp": func(v float64) string { if math.IsNaN(v) || math.IsInf(v, 0) { return fmt.Sprintf("%.4g", v) @@ -261,13 +264,15 @@ func NewTemplateExpander( } // AlertTemplateData returns the interface to be used in expanding the template. -func AlertTemplateData(labels map[string]string, value float64) interface{} { +func AlertTemplateData(labels map[string]string, externalLabels map[string]string, value float64) interface{} { return struct { - Labels map[string]string - Value float64 + Labels map[string]string + ExternalLabels map[string]string + Value float64 }{ - Labels: labels, - Value: value, + Labels: labels, + ExternalLabels: externalLabels, + Value: value, } } diff --git a/vendor/github.com/prometheus/tsdb/CHANGELOG.md b/vendor/github.com/prometheus/tsdb/CHANGELOG.md index 610899d72f..9d057a3bc3 100644 --- a/vendor/github.com/prometheus/tsdb/CHANGELOG.md +++ b/vendor/github.com/prometheus/tsdb/CHANGELOG.md @@ -1,7 +1,26 @@ -## master / unreleased +## Master / unreleased +## 0.9.1 + + - [CHANGE] LiveReader metrics are now injected rather than global. + +## 0.9.0 + + - [FEATURE] Provide option to compress WAL records using Snappy. [#609](https://github.com/prometheus/tsdb/pull/609) + - [BUGFIX] Re-calculate block size when calling `block.Delete`. + - [BUGFIX] Re-encode all head chunks at compaction that are open (being appended to) or outside the Maxt block range. This avoids writing out corrupt data. It happens when snapshotting with the head included. + - [BUGFIX] Improved handling of multiple refs for the same series in WAL reading. + - [BUGFIX] `prometheus_tsdb_compactions_failed_total` is now incremented on any compaction failure. + - [CHANGE] The meta file `BlockStats` no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before. + - [CHANGE] Create new clean segment when starting the WAL. + - [CHANGE] Renamed metric from `prometheus_tsdb_wal_reader_corruption_errors` to `prometheus_tsdb_wal_reader_corruption_errors_total`. + - [ENHANCEMENT] Improved atomicity of .tmp block replacement during compaction for usual case. + - [ENHANCEMENT] Improved postings intersection matching. + - [ENHANCEMENT] Reduced disk usage for WAL for small setups. + - [ENHANCEMENT] Optimize queries using regexp for set lookups. ## 0.8.0 + - [BUGFIX] Calling `Close` more than once on a querier returns an error instead of a panic. - [BUGFIX] Don't panic and recover nicely when running out of disk space. - [BUGFIX] Correctly handle empty labels. @@ -11,9 +30,11 @@ - [FEATURE] Added `currentSegment` metric for the current WAL segment it is being written to. ## 0.7.1 + - [ENHANCEMENT] Reduce memory usage in mergedPostings.Seek ## 0.7.0 + - [CHANGE] tsdb now requires golang 1.12 or higher. - [REMOVED] `chunks.NewReader` is removed as it wasn't used anywhere. - [REMOVED] `FromData` is considered unused so was removed. @@ -29,12 +50,15 @@ - [ENHANCEMENT] PostListings and NotMatcher now public. ## 0.6.1 + - [BUGFIX] Update `last` after appending a non-overlapping chunk in `chunks.MergeOverlappingChunks`. [#539](https://github.com/prometheus/tsdb/pull/539) ## 0.6.0 + - [CHANGE] `AllowOverlappingBlock` is now `AllowOverlappingBlocks`. ## 0.5.0 + - [FEATURE] Time-ovelapping blocks are now allowed. [#370](https://github.com/prometheus/tsdb/pull/370) - Disabled by default and can be enabled via `AllowOverlappingBlock` option. - Added `MergeChunks` function in `chunkenc/xor.go` to merge 2 time-overlapping chunks. @@ -50,6 +74,7 @@ - [BUGFIX] LiveReader can get into an infinite loop on corrupt WALs. ## 0.4.0 + - [CHANGE] New `WALSegmentSize` option to override the `DefaultOptions.WALSegmentSize`. Added to allow using smaller wal files. For example using tmpfs on a RPI to minimise the SD card wear out from the constant WAL writes. As part of this change the `DefaultOptions.WALSegmentSize` constant was also exposed. - [CHANGE] Empty blocks are not written during compaction [#374](https://github.com/prometheus/tsdb/pull/374) - [FEATURE] Size base retention through `Options.MaxBytes`. As part of this change: @@ -61,9 +86,11 @@ - [FEATURE] Add new `LiveReader` to WAL pacakge. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read. ## 0.3.1 + - [BUGFIX] Fixed most windows test and some actual bugs for unclosed file readers. ## 0.3.0 + - [CHANGE] `LastCheckpoint()` used to return just the segment name and now it returns the full relative path. - [CHANGE] `NewSegmentsRangeReader()` can now read over miltiple wal ranges by using the new `SegmentRange{}` struct. - [CHANGE] `CorruptionErr{}` now also exposes the Segment `Dir` which is added when displaying any errors. diff --git a/vendor/github.com/prometheus/tsdb/Makefile.common b/vendor/github.com/prometheus/tsdb/Makefile.common index c7f9ea64ff..48d2ff84e9 100644 --- a/vendor/github.com/prometheus/tsdb/Makefile.common +++ b/vendor/github.com/prometheus/tsdb/Makefile.common @@ -69,7 +69,7 @@ else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif -PROMU_VERSION ?= 0.4.0 +PROMU_VERSION ?= 0.5.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := @@ -86,6 +86,8 @@ endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 @@ -210,9 +212,10 @@ common-tarball: promu common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ - . + $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) @@ -247,7 +250,9 @@ proto: ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif ifdef GOVENDOR diff --git a/vendor/github.com/prometheus/tsdb/block.go b/vendor/github.com/prometheus/tsdb/block.go index 1b6e79d9da..6a8237f1fc 100644 --- a/vendor/github.com/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/tsdb/block.go @@ -151,12 +151,6 @@ type Appendable interface { Appender() Appender } -// SizeReader returns the size of the object in bytes. -type SizeReader interface { - // Size returns the size in bytes. - Size() int64 -} - // BlockMeta provides meta information about a block. type BlockMeta struct { // Unique identifier for the block and its contents. Changes on compaction. @@ -183,7 +177,6 @@ type BlockStats struct { NumSeries uint64 `json:"numSeries,omitempty"` NumChunks uint64 `json:"numChunks,omitempty"` NumTombstones uint64 `json:"numTombstones,omitempty"` - NumBytes int64 `json:"numBytes,omitempty"` } // BlockDesc describes a block by ULID and time range. @@ -214,24 +207,24 @@ const metaFilename = "meta.json" func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } -func readMetaFile(dir string) (*BlockMeta, error) { +func readMetaFile(dir string) (*BlockMeta, int64, error) { b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename)) if err != nil { - return nil, err + return nil, 0, err } var m BlockMeta if err := json.Unmarshal(b, &m); err != nil { - return nil, err + return nil, 0, err } if m.Version != 1 { - return nil, errors.Errorf("unexpected meta file version %d", m.Version) + return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version) } - return &m, nil + return &m, int64(len(b)), nil } -func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) error { +func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) { meta.Version = 1 // Make any changes to the file appear atomic. @@ -245,26 +238,32 @@ func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) error { f, err := os.Create(tmp) if err != nil { - return err + return 0, err } - enc := json.NewEncoder(f) - enc.SetIndent("", "\t") + jsonMeta, err := json.MarshalIndent(meta, "", "\t") + if err != nil { + return 0, err + } var merr tsdb_errors.MultiError - if merr.Add(enc.Encode(meta)); merr.Err() != nil { + n, err := f.Write(jsonMeta) + if err != nil { + merr.Add(err) merr.Add(f.Close()) - return merr.Err() + return 0, merr.Err() } + // Force the kernel to persist the file on disk to avoid data loss if the host crashes. - if merr.Add(f.Sync()); merr.Err() != nil { + if err := f.Sync(); err != nil { + merr.Add(err) merr.Add(f.Close()) - return merr.Err() + return 0, merr.Err() } if err := f.Close(); err != nil { - return err + return 0, err } - return fileutil.Replace(tmp, path) + return int64(n), fileutil.Replace(tmp, path) } // Block represents a directory of time series data covering a continuous time range. @@ -285,6 +284,11 @@ type Block struct { tombstones TombstoneReader logger log.Logger + + numBytesChunks int64 + numBytesIndex int64 + numBytesTombstone int64 + numBytesMeta int64 } // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used @@ -302,7 +306,7 @@ func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, er err = merr.Err() } }() - meta, err := readMetaFile(dir) + meta, sizeMeta, err := readMetaFile(dir) if err != nil { return nil, err } @@ -319,43 +323,28 @@ func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, er } closers = append(closers, ir) - tr, tsr, err := readTombstones(dir) + tr, sizeTomb, err := readTombstones(dir) if err != nil { return nil, err } closers = append(closers, tr) - // TODO refactor to set this at block creation time as - // that would be the logical place for a block size to be calculated. - bs := blockSize(cr, ir, tsr) - meta.Stats.NumBytes = bs - err = writeMetaFile(logger, dir, meta) - if err != nil { - level.Warn(logger).Log("msg", "couldn't write the meta file for the block size", "block", dir, "err", err) - } - pb = &Block{ - dir: dir, - meta: *meta, - chunkr: cr, - indexr: ir, - tombstones: tr, - symbolTableSize: ir.SymbolTableSize(), - logger: logger, + dir: dir, + meta: *meta, + chunkr: cr, + indexr: ir, + tombstones: tr, + symbolTableSize: ir.SymbolTableSize(), + logger: logger, + numBytesChunks: cr.Size(), + numBytesIndex: ir.Size(), + numBytesTombstone: sizeTomb, + numBytesMeta: sizeMeta, } return pb, nil } -func blockSize(rr ...SizeReader) int64 { - var total int64 - for _, r := range rr { - if r != nil { - total += r.Size() - } - } - return total -} - // Close closes the on-disk block. It blocks as long as there are readers reading from the block. func (pb *Block) Close() error { pb.mtx.Lock() @@ -390,7 +379,9 @@ func (pb *Block) MinTime() int64 { return pb.meta.MinTime } func (pb *Block) MaxTime() int64 { return pb.meta.MaxTime } // Size returns the number of bytes that the block takes up. -func (pb *Block) Size() int64 { return pb.meta.Stats.NumBytes } +func (pb *Block) Size() int64 { + return pb.numBytesChunks + pb.numBytesIndex + pb.numBytesTombstone + pb.numBytesMeta +} // ErrClosing is returned when a block is in the process of being closed. var ErrClosing = errors.New("block is closing") @@ -437,7 +428,12 @@ func (pb *Block) GetSymbolTableSize() uint64 { func (pb *Block) setCompactionFailed() error { pb.meta.Compaction.Failed = true - return writeMetaFile(pb.logger, pb.dir, &pb.meta) + n, err := writeMetaFile(pb.logger, pb.dir, &pb.meta) + if err != nil { + return err + } + pb.numBytesMeta = n + return nil } type blockIndexReader struct { @@ -457,7 +453,10 @@ func (r blockIndexReader) LabelValues(names ...string) (index.StringTuples, erro func (r blockIndexReader) Postings(name, value string) (index.Postings, error) { p, err := r.ir.Postings(name, value) - return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + if err != nil { + return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + } + return p, nil } func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { @@ -465,11 +464,10 @@ func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { } func (r blockIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { - return errors.Wrapf( - r.ir.Series(ref, lset, chks), - "block: %s", - r.b.Meta().ULID, - ) + if err := r.ir.Series(ref, lset, chks); err != nil { + return errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + } + return nil } func (r blockIndexReader) LabelIndices() ([][]string, error) { @@ -561,10 +559,17 @@ Outer: pb.tombstones = stones pb.meta.Stats.NumTombstones = pb.tombstones.Total() - if err := writeTombstoneFile(pb.logger, pb.dir, pb.tombstones); err != nil { + n, err := writeTombstoneFile(pb.logger, pb.dir, pb.tombstones) + if err != nil { return err } - return writeMetaFile(pb.logger, pb.dir, &pb.meta) + pb.numBytesTombstone = n + n, err = writeMetaFile(pb.logger, pb.dir, &pb.meta) + if err != nil { + return err + } + pb.numBytesMeta = n + return nil } // CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones). diff --git a/vendor/github.com/prometheus/tsdb/checkpoint.go b/vendor/github.com/prometheus/tsdb/checkpoint.go index d8dee28aa8..eccfa62be3 100644 --- a/vendor/github.com/prometheus/tsdb/checkpoint.go +++ b/vendor/github.com/prometheus/tsdb/checkpoint.go @@ -135,7 +135,7 @@ func Checkpoint(w *wal.WAL, from, to int, keep func(id uint64) bool, mint int64) if err := os.MkdirAll(cpdirtmp, 0777); err != nil { return nil, errors.Wrap(err, "create checkpoint dir") } - cp, err := wal.New(nil, nil, cpdirtmp) + cp, err := wal.New(nil, nil, cpdirtmp, w.CompressionEnabled()) if err != nil { return nil, errors.Wrap(err, "open checkpoint") } diff --git a/vendor/github.com/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/tsdb/chunks/chunks.go index 70cb119c52..9ce8c57dae 100644 --- a/vendor/github.com/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/tsdb/chunks/chunks.go @@ -51,7 +51,9 @@ type Meta struct { Ref uint64 Chunk chunkenc.Chunk - MinTime, MaxTime int64 // time range the data covers + // Time range the data covers. + // When MaxTime == math.MaxInt64 the chunk is still open and being appended to. + MinTime, MaxTime int64 } // writeHash writes the chunk encoding and raw data into the provided hash. @@ -218,7 +220,7 @@ func MergeOverlappingChunks(chks []Meta) ([]Meta, error) { // So never overlaps with newChks[last-1] or anything before that. if c.MinTime > newChks[last].MaxTime { newChks = append(newChks, c) - last += 1 + last++ continue } nc := &newChks[last] diff --git a/vendor/github.com/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/tsdb/compact.go index c0948bbf3b..e19b7ed769 100644 --- a/vendor/github.com/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/tsdb/compact.go @@ -84,7 +84,6 @@ type LeveledCompactor struct { type compactorMetrics struct { ran prometheus.Counter populatingBlocks prometheus.Gauge - failed prometheus.Counter overlappingBlocks prometheus.Counter duration prometheus.Histogram chunkSize prometheus.Histogram @@ -103,10 +102,6 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { Name: "prometheus_tsdb_compaction_populating_block", Help: "Set to 1 when a block is currently being written to the disk.", }) - m.failed = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_compactions_failed_total", - Help: "Total number of compactions that failed for the partition.", - }) m.overlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_vertical_compactions_total", Help: "Total number of compactions done on overlapping blocks.", @@ -136,7 +131,6 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { r.MustRegister( m.ran, m.populatingBlocks, - m.failed, m.overlappingBlocks, m.duration, m.chunkRange, @@ -184,7 +178,7 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) { var dms []dirMeta for _, dir := range dirs { - meta, err := readMetaFile(dir) + meta, _, err := readMetaFile(dir) if err != nil { return nil, err } @@ -386,7 +380,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u start := time.Now() for _, d := range dirs { - meta, err := readMetaFile(d) + meta, _, err := readMetaFile(d) if err != nil { return uid, err } @@ -426,12 +420,14 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u if meta.Stats.NumSamples == 0 { for _, b := range bs { b.meta.Compaction.Deletable = true - if err = writeMetaFile(c.logger, b.dir, &b.meta); err != nil { + n, err := writeMetaFile(c.logger, b.dir, &b.meta) + if err != nil { level.Error(c.logger).Log( "msg", "Failed to write 'Deletable' to meta file after compaction", "ulid", b.meta.ULID, ) } + b.numBytesMeta = n } uid = ulid.ULID{} level.Info(c.logger).Log( @@ -541,9 +537,6 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe if err := os.RemoveAll(tmp); err != nil { level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error()) } - if err != nil { - c.metrics.failed.Inc() - } c.metrics.ran.Inc() c.metrics.duration.Observe(time.Since(t).Seconds()) }(time.Now()) @@ -609,12 +602,12 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe return nil } - if err = writeMetaFile(c.logger, tmp, meta); err != nil { + if _, err = writeMetaFile(c.logger, tmp, meta); err != nil { return errors.Wrap(err, "write merged meta") } // Create an empty tombstones file. - if err := writeTombstoneFile(c.logger, tmp, newMemTombstones()); err != nil { + if _, err := writeTombstoneFile(c.logger, tmp, newMemTombstones()); err != nil { return errors.Wrap(err, "write new tombstones file") } @@ -764,6 +757,21 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } for i, chk := range chks { + // Re-encode head chunks that are still open (being appended to) or + // outside the compacted MaxTime range. + // The chunk.Bytes() method is not safe for open chunks hence the re-encoding. + // This happens when snapshotting the head block. + // + // Block time range is half-open: [meta.MinTime, meta.MaxTime) and + // chunks are closed hence the chk.MaxTime >= meta.MaxTime check. + // + // TODO think how to avoid the typecasting to verify when it is head block. + if _, isHeadChunk := chk.Chunk.(*safeChunk); isHeadChunk && chk.MaxTime >= meta.MaxTime { + dranges = append(dranges, Interval{Mint: meta.MaxTime, Maxt: math.MaxInt64}) + + } else + // Sanity check for disk blocks. + // chk.MaxTime == meta.MaxTime shouldn't happen as well, but will brake many users so not checking for that. if chk.MinTime < meta.MinTime || chk.MaxTime > meta.MaxTime { return errors.Errorf("found chunk with minTime: %d maxTime: %d outside of compacted minTime: %d maxTime: %d", chk.MinTime, chk.MaxTime, meta.MinTime, meta.MaxTime) @@ -781,12 +789,21 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } it := &deletedIterator{it: chk.Chunk.Iterator(), intervals: dranges} + + var ( + t int64 + v float64 + ) for it.Next() { - ts, v := it.At() - app.Append(ts, v) + t, v = it.At() + app.Append(t, v) + } + if err := it.Err(); err != nil { + return errors.Wrap(err, "iterate chunk while re-encoding") } chks[i].Chunk = newChunk + chks[i].MaxTime = t } } diff --git a/vendor/github.com/prometheus/tsdb/db.go b/vendor/github.com/prometheus/tsdb/db.go index 52b21c2fd1..e07f7d3e74 100644 --- a/vendor/github.com/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/tsdb/db.go @@ -51,6 +51,7 @@ var DefaultOptions = &Options{ BlockRanges: ExponentialBlockRanges(int64(2*time.Hour)/1e6, 3, 5), NoLockfile: false, AllowOverlappingBlocks: false, + WALCompression: false, } // Options of the DB storage. @@ -80,6 +81,9 @@ type Options struct { // Overlapping blocks are allowed if AllowOverlappingBlocks is true. // This in-turn enables vertical compaction and vertical query merge. AllowOverlappingBlocks bool + + // WALCompression will turn on Snappy compression for records on the WAL. + WALCompression bool } // Appender allows appending a batch of data. It must be completed with a @@ -147,6 +151,7 @@ type dbMetrics struct { reloads prometheus.Counter reloadsFailed prometheus.Counter compactionsTriggered prometheus.Counter + compactionsFailed prometheus.Counter timeRetentionCount prometheus.Counter compactionsSkipped prometheus.Counter startTime prometheus.GaugeFunc @@ -191,6 +196,10 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Name: "prometheus_tsdb_compactions_triggered_total", Help: "Total number of triggered compactions for the partition.", }) + m.compactionsFailed = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_compactions_failed_total", + Help: "Total number of compactions that failed for the partition.", + }) m.timeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_time_retentions_total", Help: "The number of times that blocks were deleted because the maximum time limit was exceeded.", @@ -231,6 +240,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { m.reloadsFailed, m.timeRetentionCount, m.compactionsTriggered, + m.compactionsFailed, m.startTime, m.tombCleanTimer, m.blocksBytes, @@ -300,7 +310,7 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db if opts.WALSegmentSize > 0 { segmentSize = opts.WALSegmentSize } - wlog, err = wal.NewSize(l, r, filepath.Join(dir, "wal"), segmentSize) + wlog, err = wal.NewSize(l, r, filepath.Join(dir, "wal"), segmentSize, opts.WALCompression) if err != nil { return nil, err } @@ -322,8 +332,12 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db minValidTime = blocks[len(blocks)-1].Meta().MaxTime } - if err := db.head.Init(minValidTime); err != nil { - return nil, errors.Wrap(err, "read WAL") + if initErr := db.head.Init(minValidTime); initErr != nil { + db.head.metrics.walCorruptionsTotal.Inc() + level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) + if err := wlog.Repair(initErr); err != nil { + return nil, errors.Wrap(err, "repair corrupted WAL") + } } go db.run() @@ -411,6 +425,11 @@ func (a dbAppender) Commit() error { func (db *DB) compact() (err error) { db.cmtx.Lock() defer db.cmtx.Unlock() + defer func() { + if err != nil { + db.metrics.compactionsFailed.Inc() + } + }() // Check whether we have pending head blocks that are ready to be persisted. // They have the highest priority. for { @@ -610,7 +629,7 @@ func (db *DB) openBlocks() (blocks []*Block, corrupted map[ulid.ULID]error, err corrupted = make(map[ulid.ULID]error) for _, dir := range dirs { - meta, err := readMetaFile(dir) + meta, _, err := readMetaFile(dir) if err != nil { level.Error(db.logger).Log("msg", "not a block dir", "dir", dir) continue @@ -924,8 +943,20 @@ func (db *DB) Snapshot(dir string, withHead bool) error { if !withHead { return nil } - _, err := db.compactor.Write(dir, db.head, db.head.MinTime(), db.head.MaxTime(), nil) - return errors.Wrap(err, "snapshot head block") + + mint := db.head.MinTime() + maxt := db.head.MaxTime() + head := &rangeHead{ + head: db.head, + mint: mint, + maxt: maxt, + } + // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). + // Because of this block intervals are always +1 than the total samples it includes. + if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil { + return errors.Wrap(err, "snapshot head block") + } + return nil } // Querier returns a new querier over the data partition for the given time range. diff --git a/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go b/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go index c55a2b81d4..4088f522ae 100644 --- a/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go +++ b/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go @@ -128,9 +128,19 @@ func Rename(from, to string) error { // Replace moves a file or directory to a new location and deletes any previous data. // It is not atomic. func Replace(from, to string) error { - if err := os.RemoveAll(to); err != nil { - return err + // Remove destination only if it is a dir otherwise leave it to os.Rename + // as it replaces the destination file and is atomic. + { + f, err := os.Stat(to) + if !os.IsNotExist(err) { + if err == nil && f.IsDir() { + if err := os.RemoveAll(to); err != nil { + return err + } + } + } } + if err := os.Rename(from, to); err != nil { return err } diff --git a/vendor/github.com/prometheus/tsdb/go.mod b/vendor/github.com/prometheus/tsdb/go.mod index 02f3cf9e78..ccdd437247 100644 --- a/vendor/github.com/prometheus/tsdb/go.mod +++ b/vendor/github.com/prometheus/tsdb/go.mod @@ -1,28 +1,14 @@ module github.com/prometheus/tsdb require ( - github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect - github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect github.com/cespare/xxhash v1.1.0 - github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 github.com/go-kit/kit v0.8.0 - github.com/go-logfmt/logfmt v0.3.0 // indirect - github.com/go-stack/stack v1.8.0 // indirect - github.com/gogo/protobuf v1.1.1 // indirect - github.com/golang/protobuf v1.2.0 // indirect - github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/golang/snappy v0.0.1 github.com/oklog/ulid v1.3.1 github.com/pkg/errors v0.8.0 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v0.9.1 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect - github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce // indirect - github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect - github.com/stretchr/testify v1.2.2 // indirect - golang.org/x/sync v0.0.0-20181108010431-42b317875d0f - golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8 + github.com/prometheus/client_golang v1.0.0 + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 + golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 gopkg.in/alecthomas/kingpin.v2 v2.2.6 ) diff --git a/vendor/github.com/prometheus/tsdb/go.sum b/vendor/github.com/prometheus/tsdb/go.sum index 266fbe96b0..365fa5ecf4 100644 --- a/vendor/github.com/prometheus/tsdb/go.sum +++ b/vendor/github.com/prometheus/tsdb/go.sum @@ -6,8 +6,11 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZq github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4= @@ -22,10 +25,20 @@ github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= @@ -34,19 +47,37 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce h1:X0jFYGnHemYDIW6jlc+fSI8f9Cg+jqCnClYP2WgZT/A= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8 h1:YoY1wS6JYVRpIfFngRf2HHo9R9dAne3xbkGOQ5rJXjU= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/prometheus/tsdb/head.go b/vendor/github.com/prometheus/tsdb/head.go index 92619a6401..5e2eae8581 100644 --- a/vendor/github.com/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/tsdb/head.go @@ -14,6 +14,7 @@ package tsdb import ( + "fmt" "math" "runtime" "sort" @@ -140,8 +141,9 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Help: "Total number of chunks removed in the head", }) m.gcDuration = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "prometheus_tsdb_head_gc_duration_seconds", - Help: "Runtime of garbage collection in the head block.", + Name: "prometheus_tsdb_head_gc_duration_seconds", + Help: "Runtime of garbage collection in the head block.", + Objectives: map[float64]float64{}, }) m.maxTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "prometheus_tsdb_head_max_time", @@ -156,8 +158,9 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { return float64(h.MinTime()) }) m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "prometheus_tsdb_wal_truncate_duration_seconds", - Help: "Duration of WAL truncation.", + Name: "prometheus_tsdb_wal_truncate_duration_seconds", + Help: "Duration of WAL truncation.", + Objectives: map[float64]float64{}, }) m.walCorruptionsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_corruptions_total", @@ -312,7 +315,7 @@ func (h *Head) updateMinMaxTime(mint, maxt int64) { } } -func (h *Head) loadWAL(r *wal.Reader) error { +func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64) (err error) { // Track number of samples that referenced a series we don't know about // for error reporting. var unknownRefs uint64 @@ -321,13 +324,26 @@ func (h *Head) loadWAL(r *wal.Reader) error { // They are connected through a ring of channels which ensures that all sample batches // read from the WAL are processed in order. var ( - wg sync.WaitGroup - n = runtime.GOMAXPROCS(0) - inputs = make([]chan []RefSample, n) - outputs = make([]chan []RefSample, n) + wg sync.WaitGroup + multiRefLock sync.Mutex + n = runtime.GOMAXPROCS(0) + inputs = make([]chan []RefSample, n) + outputs = make([]chan []RefSample, n) ) wg.Add(n) + defer func() { + // For CorruptionErr ensure to terminate all workers before exiting. + if _, ok := err.(*wal.CorruptionErr); ok { + for i := 0; i < n; i++ { + close(inputs[i]) + for range outputs[i] { + } + } + wg.Wait() + } + }() + for i := 0; i < n; i++ { outputs[i] = make(chan []RefSample, 300) inputs[i] = make(chan []RefSample, 300) @@ -345,9 +361,12 @@ func (h *Head) loadWAL(r *wal.Reader) error { samples []RefSample tstones []Stone allStones = newMemTombstones() - err error ) - defer allStones.Close() + defer func() { + if err := allStones.Close(); err != nil { + level.Warn(h.logger).Log("msg", "closing memTombstones during wal read", "err", err) + } + }() for r.Next() { series, samples, tstones = series[:0], samples[:0], tstones[:0] rec := r.Record() @@ -363,7 +382,14 @@ func (h *Head) loadWAL(r *wal.Reader) error { } } for _, s := range series { - h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels) + series, created := h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels) + + if !created { + // There's already a different ref for this series. + multiRefLock.Lock() + multiRef[s.Ref] = series.ref + multiRefLock.Unlock() + } if h.lastSeriesID < s.Ref { h.lastSeriesID = s.Ref @@ -398,6 +424,9 @@ func (h *Head) loadWAL(r *wal.Reader) error { shards[i] = buf[:0] } for _, sam := range samples[:m] { + if r, ok := multiRef[sam.Ref]; ok { + sam.Ref = r + } mod := sam.Ref % uint64(n) shards[mod] = append(shards[mod], sam) } @@ -436,9 +465,6 @@ func (h *Head) loadWAL(r *wal.Reader) error { } } } - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") - } // Signal termination to each worker and wait for it to close its output channel. for i := 0; i < n; i++ { @@ -448,6 +474,10 @@ func (h *Head) loadWAL(r *wal.Reader) error { } wg.Wait() + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } + if err := allStones.Iter(func(ref uint64, dranges Intervals) error { return h.chunkRewrite(ref, dranges) }); err != nil { @@ -477,37 +507,49 @@ func (h *Head) Init(minValidTime int64) error { if err != nil && err != ErrNotFound { return errors.Wrap(err, "find last checkpoint") } + multiRef := map[uint64]uint64{} if err == nil { sr, err := wal.NewSegmentsReader(dir) if err != nil { return errors.Wrap(err, "open checkpoint") } - defer sr.Close() + defer func() { + if err := sr.Close(); err != nil { + level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + }() // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. - if err := h.loadWAL(wal.NewReader(sr)); err != nil { + if err := h.loadWAL(wal.NewReader(sr), multiRef); err != nil { return errors.Wrap(err, "backfill checkpoint") } startFrom++ } - // Backfill segments from the last checkpoint onwards - sr, err := wal.NewSegmentsRangeReader(wal.SegmentRange{Dir: h.wal.Dir(), First: startFrom, Last: -1}) + // Find the last segment. + _, last, err := h.wal.Segments() if err != nil { - return errors.Wrap(err, "open WAL segments") + return errors.Wrap(err, "finding WAL segments") } - err = h.loadWAL(wal.NewReader(sr)) - sr.Close() // Close the reader so that if there was an error the repair can remove the corrupted file under Windows. - if err == nil { - return nil - } - level.Warn(h.logger).Log("msg", "encountered WAL error, attempting repair", "err", err) - h.metrics.walCorruptionsTotal.Inc() - if err := h.wal.Repair(err); err != nil { - return errors.Wrap(err, "repair corrupted WAL") + // Backfill segments from the most recent checkpoint onwards. + for i := startFrom; i <= last; i++ { + s, err := wal.OpenReadSegment(wal.SegmentName(h.wal.Dir(), i)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + } + + sr := wal.NewSegmentBufReader(s) + err = h.loadWAL(wal.NewReader(sr), multiRef) + if err := sr.Close(); err != nil { + level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + if err != nil { + return err + } } + return nil } @@ -553,6 +595,12 @@ func (h *Head) Truncate(mint int64) (err error) { if err != nil { return errors.Wrap(err, "get segment range") } + // Start a new segment, so low ingestion volume TSDB don't have more WAL than + // needed. + err = h.wal.NextSegment() + if err != nil { + return errors.Wrap(err, "next segment") + } last-- // Never consider last segment for checkpoint. if last < 0 { return nil // no segments yet. @@ -1250,9 +1298,15 @@ func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks if !c.OverlapsClosedInterval(h.mint, h.maxt) { continue } + // Set the head chunks as open (being appended to). + maxTime := c.maxTime + if s.headChunk == c { + maxTime = math.MaxInt64 + } + *chks = append(*chks, chunks.Meta{ MinTime: c.minTime, - MaxTime: c.maxTime, + MaxTime: maxTime, Ref: packChunkID(s.ref, uint64(s.chunkID(i))), }) } diff --git a/vendor/github.com/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/tsdb/index/postings.go index bb7b5837af..cef2d886e0 100644 --- a/vendor/github.com/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/tsdb/index/postings.go @@ -303,68 +303,68 @@ func Intersect(its ...Postings) Postings { if len(its) == 1 { return its[0] } - - l := len(its) / 2 - a := Intersect(its[:l]...) - b := Intersect(its[l:]...) - - if a == EmptyPostings() || b == EmptyPostings() { - return EmptyPostings() + for _, p := range its { + if p == EmptyPostings() { + return EmptyPostings() + } } - return newIntersectPostings(a, b) + + return newIntersectPostings(its...) } type intersectPostings struct { - a, b Postings - cur uint64 + arr []Postings + cur uint64 } -func newIntersectPostings(a, b Postings) *intersectPostings { - return &intersectPostings{a: a, b: b} +func newIntersectPostings(its ...Postings) *intersectPostings { + return &intersectPostings{arr: its} } func (it *intersectPostings) At() uint64 { return it.cur } -func (it *intersectPostings) doNext(id uint64) bool { +func (it *intersectPostings) doNext() bool { +Loop: for { - if !it.b.Seek(id) { - return false - } - if vb := it.b.At(); vb != id { - if !it.a.Seek(vb) { + for _, p := range it.arr { + if !p.Seek(it.cur) { return false } - id = it.a.At() - if vb != id { - continue + if p.At() > it.cur { + it.cur = p.At() + continue Loop } } - it.cur = id return true } } func (it *intersectPostings) Next() bool { - if !it.a.Next() { - return false + for _, p := range it.arr { + if !p.Next() { + return false + } + if p.At() > it.cur { + it.cur = p.At() + } } - return it.doNext(it.a.At()) + return it.doNext() } func (it *intersectPostings) Seek(id uint64) bool { - if !it.a.Seek(id) { - return false - } - return it.doNext(it.a.At()) + it.cur = id + return it.doNext() } func (it *intersectPostings) Err() error { - if it.a.Err() != nil { - return it.a.Err() + for _, p := range it.arr { + if p.Err() != nil { + return p.Err() + } } - return it.b.Err() + return nil } // Merge returns a new iterator over the union of the input iterators. diff --git a/vendor/github.com/prometheus/tsdb/labels/selector.go b/vendor/github.com/prometheus/tsdb/labels/selector.go index a0565f57e3..c94ebb3321 100644 --- a/vendor/github.com/prometheus/tsdb/labels/selector.go +++ b/vendor/github.com/prometheus/tsdb/labels/selector.go @@ -63,14 +63,15 @@ func NewEqualMatcher(name, value string) Matcher { return &EqualMatcher{name: name, value: value} } -type regexpMatcher struct { +type RegexpMatcher struct { name string re *regexp.Regexp } -func (m regexpMatcher) Name() string { return m.name } -func (m regexpMatcher) Matches(v string) bool { return m.re.MatchString(v) } -func (m regexpMatcher) String() string { return fmt.Sprintf("%s=~%q", m.name, m.re.String()) } +func (m RegexpMatcher) Name() string { return m.name } +func (m RegexpMatcher) Matches(v string) bool { return m.re.MatchString(v) } +func (m RegexpMatcher) String() string { return fmt.Sprintf("%s=~%q", m.name, m.re.String()) } +func (m RegexpMatcher) Value() string { return m.re.String() } // NewRegexpMatcher returns a new matcher verifying that a value matches // the regular expression pattern. @@ -79,7 +80,7 @@ func NewRegexpMatcher(name, pattern string) (Matcher, error) { if err != nil { return nil, err } - return ®expMatcher{name: name, re: re}, nil + return &RegexpMatcher{name: name, re: re}, nil } // NewMustRegexpMatcher returns a new matcher verifying that a value matches @@ -90,7 +91,7 @@ func NewMustRegexpMatcher(name, pattern string) Matcher { if err != nil { panic(err) } - return ®expMatcher{name: name, re: re} + return &RegexpMatcher{name: name, re: re} } diff --git a/vendor/github.com/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/tsdb/querier.go index 9d99de0837..253102b0ed 100644 --- a/vendor/github.com/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/tsdb/querier.go @@ -17,6 +17,7 @@ import ( "fmt" "sort" "strings" + "unicode/utf8" "github.com/pkg/errors" "github.com/prometheus/tsdb/chunkenc" @@ -266,6 +267,62 @@ func (q *blockQuerier) Close() error { return merr.Err() } +// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped. +var regexMetaCharacterBytes [16]byte + +// isRegexMetaCharacter reports whether byte b needs to be escaped. +func isRegexMetaCharacter(b byte) bool { + return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0 +} + +func init() { + for _, b := range []byte(`.+*?()|[]{}^$`) { + regexMetaCharacterBytes[b%16] |= 1 << (b / 16) + } +} + +func findSetMatches(pattern string) []string { + // Return empty matches if the wrapper from Prometheus is missing. + if len(pattern) < 6 || pattern[:4] != "^(?:" || pattern[len(pattern)-2:] != ")$" { + return nil + } + escaped := false + sets := []*strings.Builder{&strings.Builder{}} + for i := 4; i < len(pattern)-2; i++ { + if escaped { + switch { + case isRegexMetaCharacter(pattern[i]): + sets[len(sets)-1].WriteByte(pattern[i]) + case pattern[i] == '\\': + sets[len(sets)-1].WriteByte('\\') + default: + return nil + } + escaped = false + } else { + switch { + case isRegexMetaCharacter(pattern[i]): + if pattern[i] == '|' { + sets = append(sets, &strings.Builder{}) + } else { + return nil + } + case pattern[i] == '\\': + escaped = true + default: + sets[len(sets)-1].WriteByte(pattern[i]) + } + } + } + matches := make([]string, 0, len(sets)) + for _, s := range sets { + if s.Len() > 0 { + matches = append(matches, s.String()) + } + } + return matches +} + // PostingsForMatchers assembles a single postings iterator against the index reader // based on the given matchers. func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, error) { @@ -346,6 +403,14 @@ func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error return ix.Postings(em.Name(), em.Value()) } + // Fast-path for set matching. + if em, ok := m.(*labels.RegexpMatcher); ok { + setMatches := findSetMatches(em.Value()) + if len(setMatches) > 0 { + return postingsForSetMatcher(ix, em.Name(), setMatches) + } + } + tpls, err := ix.LabelValues(m.Name()) if err != nil { return nil, err @@ -411,6 +476,18 @@ func inversePostingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings return index.Merge(rit...), nil } +func postingsForSetMatcher(ix IndexReader, name string, matches []string) (index.Postings, error) { + var its []index.Postings + for _, match := range matches { + if it, err := ix.Postings(name, match); err == nil { + its = append(its, it) + } else { + return nil, err + } + } + return index.Merge(its...), nil +} + func mergeStrings(a, b []string) []string { maxl := len(a) if len(b) > len(a) { diff --git a/vendor/github.com/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/tsdb/repair.go index 38138b12a5..1d299047a0 100644 --- a/vendor/github.com/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/tsdb/repair.go @@ -109,7 +109,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { } // Reset version of meta.json to 1. meta.Version = 1 - if err := writeMetaFile(logger, d, meta); err != nil { + if _, err := writeMetaFile(logger, d, meta); err != nil { return wrapErr(err, d) } } diff --git a/vendor/github.com/prometheus/tsdb/testutil/directory.go b/vendor/github.com/prometheus/tsdb/testutil/directory.go index d3c9c926f1..e74b342b07 100644 --- a/vendor/github.com/prometheus/tsdb/testutil/directory.go +++ b/vendor/github.com/prometheus/tsdb/testutil/directory.go @@ -16,6 +16,7 @@ package testutil import ( "io/ioutil" "os" + "path/filepath" ) const ( @@ -127,3 +128,18 @@ func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) { return } + +// DirSize returns the size in bytes of all files in a directory. +func DirSize(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return nil + }) + return size, err +} diff --git a/vendor/github.com/prometheus/tsdb/tombstones.go b/vendor/github.com/prometheus/tsdb/tombstones.go index 220af49007..d7b76230c0 100644 --- a/vendor/github.com/prometheus/tsdb/tombstones.go +++ b/vendor/github.com/prometheus/tsdb/tombstones.go @@ -54,14 +54,15 @@ type TombstoneReader interface { Close() error } -func writeTombstoneFile(logger log.Logger, dir string, tr TombstoneReader) error { +func writeTombstoneFile(logger log.Logger, dir string, tr TombstoneReader) (int64, error) { path := filepath.Join(dir, tombstoneFilename) tmp := path + ".tmp" hash := newCRC32() + var size int f, err := os.Create(tmp) if err != nil { - return err + return 0, err } defer func() { if f != nil { @@ -79,10 +80,11 @@ func writeTombstoneFile(logger log.Logger, dir string, tr TombstoneReader) error // Write the meta. buf.PutBE32(MagicTombstone) buf.PutByte(tombstoneFormatV1) - _, err = f.Write(buf.Get()) + n, err := f.Write(buf.Get()) if err != nil { - return err + return 0, err } + size += n mw := io.MultiWriter(f, hash) @@ -94,32 +96,34 @@ func writeTombstoneFile(logger log.Logger, dir string, tr TombstoneReader) error buf.PutVarint64(iv.Mint) buf.PutVarint64(iv.Maxt) - _, err = mw.Write(buf.Get()) + n, err = mw.Write(buf.Get()) if err != nil { return err } + size += n } return nil }); err != nil { - return fmt.Errorf("error writing tombstones: %v", err) + return 0, fmt.Errorf("error writing tombstones: %v", err) } - _, err = f.Write(hash.Sum(nil)) + n, err = f.Write(hash.Sum(nil)) if err != nil { - return err + return 0, err } + size += n var merr tsdb_errors.MultiError if merr.Add(f.Sync()); merr.Err() != nil { merr.Add(f.Close()) - return merr.Err() + return 0, merr.Err() } if err = f.Close(); err != nil { - return err + return 0, err } f = nil - return fileutil.Replace(tmp, path) + return int64(size), fileutil.Replace(tmp, path) } // Stone holds the information on the posting and time-range @@ -129,41 +133,37 @@ type Stone struct { intervals Intervals } -func readTombstones(dir string) (TombstoneReader, SizeReader, error) { +func readTombstones(dir string) (TombstoneReader, int64, error) { b, err := ioutil.ReadFile(filepath.Join(dir, tombstoneFilename)) if os.IsNotExist(err) { - return newMemTombstones(), nil, nil + return newMemTombstones(), 0, nil } else if err != nil { - return nil, nil, err - } - - sr := &TombstoneFile{ - size: int64(len(b)), + return nil, 0, err } if len(b) < 5 { - return nil, sr, errors.Wrap(encoding.ErrInvalidSize, "tombstones header") + return nil, 0, errors.Wrap(encoding.ErrInvalidSize, "tombstones header") } d := &encoding.Decbuf{B: b[:len(b)-4]} // 4 for the checksum. if mg := d.Be32(); mg != MagicTombstone { - return nil, sr, fmt.Errorf("invalid magic number %x", mg) + return nil, 0, fmt.Errorf("invalid magic number %x", mg) } if flag := d.Byte(); flag != tombstoneFormatV1 { - return nil, sr, fmt.Errorf("invalid tombstone format %x", flag) + return nil, 0, fmt.Errorf("invalid tombstone format %x", flag) } if d.Err() != nil { - return nil, sr, d.Err() + return nil, 0, d.Err() } // Verify checksum. hash := newCRC32() if _, err := hash.Write(d.Get()); err != nil { - return nil, sr, errors.Wrap(err, "write to hash") + return nil, 0, errors.Wrap(err, "write to hash") } if binary.BigEndian.Uint32(b[len(b)-4:]) != hash.Sum32() { - return nil, sr, errors.New("checksum did not match") + return nil, 0, errors.New("checksum did not match") } stonesMap := newMemTombstones() @@ -173,13 +173,13 @@ func readTombstones(dir string) (TombstoneReader, SizeReader, error) { mint := d.Varint64() maxt := d.Varint64() if d.Err() != nil { - return nil, sr, d.Err() + return nil, 0, d.Err() } stonesMap.addInterval(k, Interval{mint, maxt}) } - return stonesMap, sr, nil + return stonesMap, int64(len(b)), nil } type memTombstones struct { @@ -230,16 +230,6 @@ func (t *memTombstones) addInterval(ref uint64, itvs ...Interval) { } } -// TombstoneFile holds information about the tombstone file. -type TombstoneFile struct { - size int64 -} - -// Size returns the tombstone file size. -func (t *TombstoneFile) Size() int64 { - return t.size -} - func (*memTombstones) Close() error { return nil } diff --git a/vendor/github.com/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/tsdb/wal.go index 86b3bf79cd..49f55fe404 100644 --- a/vendor/github.com/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/tsdb/wal.go @@ -65,8 +65,9 @@ func newWalMetrics(wal *SegmentWAL, r prometheus.Registerer) *walMetrics { m := &walMetrics{} m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "prometheus_tsdb_wal_fsync_duration_seconds", - Help: "Duration of WAL fsync.", + Name: "prometheus_tsdb_wal_fsync_duration_seconds", + Help: "Duration of WAL fsync.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) m.corruptions = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_corruptions_total", @@ -1245,7 +1246,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { if err := os.RemoveAll(tmpdir); err != nil { return errors.Wrap(err, "cleanup replacement dir") } - repl, err := wal.New(logger, nil, tmpdir) + repl, err := wal.New(logger, nil, tmpdir, false) if err != nil { return errors.Wrap(err, "open new WAL") } diff --git a/vendor/github.com/prometheus/tsdb/wal/live_reader.go b/vendor/github.com/prometheus/tsdb/wal/live_reader.go index 8394bfd08d..94175e7917 100644 --- a/vendor/github.com/prometheus/tsdb/wal/live_reader.go +++ b/vendor/github.com/prometheus/tsdb/wal/live_reader.go @@ -22,28 +22,46 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" ) -var ( - readerCorruptionErrors = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_reader_corruption_errors", - Help: "Errors encountered when reading the WAL.", - }, []string{"error"}) -) +// liveReaderMetrics holds all metrics exposed by the LiveReader. +type liveReaderMetrics struct { + readerCorruptionErrors *prometheus.CounterVec +} + +// LiveReaderMetrics instatiates, registers and returns metrics to be injected +// at LiveReader instantiation. +func NewLiveReaderMetrics(reg prometheus.Registerer) *liveReaderMetrics { + m := &liveReaderMetrics{ + readerCorruptionErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_reader_corruption_errors_total", + Help: "Errors encountered when reading the WAL.", + }, []string{"error"}), + } + + if reg != nil { + reg.Register(m.readerCorruptionErrors) + } + + return m +} // NewLiveReader returns a new live reader. -func NewLiveReader(logger log.Logger, r io.Reader) *LiveReader { - return &LiveReader{ - logger: logger, - rdr: r, +func NewLiveReader(logger log.Logger, metrics *liveReaderMetrics, r io.Reader) *LiveReader { + lr := &LiveReader{ + logger: logger, + rdr: r, + metrics: metrics, // Until we understand how they come about, make readers permissive // to records spanning pages. permissive: true, } + + return lr } // LiveReader reads WAL records from an io.Reader. It allows reading of WALs @@ -54,6 +72,7 @@ type LiveReader struct { rdr io.Reader err error rec []byte + snappyBuf []byte hdr [recordHeaderSize]byte buf [pageSize]byte readIndex int // Index in buf to start at for next read. @@ -68,6 +87,8 @@ type LiveReader struct { // does. Until we track down why, set permissive to true to tolerate it. // NB the non-ive Reader implementation allows for this. permissive bool + + metrics *liveReaderMetrics } // Err returns any errors encountered reading the WAL. io.EOFs are not terminal @@ -166,11 +187,18 @@ func (r *LiveReader) buildRecord() (bool, error) { return false, nil } - rt := recType(r.hdr[0]) + rt := recTypeFromHeader(r.hdr[0]) if rt == recFirst || rt == recFull { r.rec = r.rec[:0] + r.snappyBuf = r.snappyBuf[:0] + } + + compressed := r.hdr[0]&snappyMask != 0 + if compressed { + r.snappyBuf = append(r.snappyBuf, temp...) + } else { + r.rec = append(r.rec, temp...) } - r.rec = append(r.rec, temp...) if err := validateRecord(rt, r.index); err != nil { r.index = 0 @@ -178,6 +206,16 @@ func (r *LiveReader) buildRecord() (bool, error) { } if rt == recLast || rt == recFull { r.index = 0 + if compressed && len(r.snappyBuf) > 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + r.rec = r.rec[:cap(r.rec)] + r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + if err != nil { + return false, err + } + } return true, nil } // Only increment i for non-zero records since we use it @@ -258,7 +296,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { if !r.permissive { return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize) } - readerCorruptionErrors.WithLabelValues("record_span_page").Inc() + r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc() level.Warn(r.logger).Log("msg", "record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) } if recordHeaderSize+length > pageSize { diff --git a/vendor/github.com/prometheus/tsdb/wal/reader.go b/vendor/github.com/prometheus/tsdb/wal/reader.go index 297463b001..7612f8775f 100644 --- a/vendor/github.com/prometheus/tsdb/wal/reader.go +++ b/vendor/github.com/prometheus/tsdb/wal/reader.go @@ -19,6 +19,7 @@ import ( "hash/crc32" "io" + "github.com/golang/snappy" "github.com/pkg/errors" ) @@ -27,6 +28,7 @@ type Reader struct { rdr io.Reader err error rec []byte + snappyBuf []byte buf [pageSize]byte total int64 // Total bytes processed. curRecTyp recType // Used for checking that the last record is not torn. @@ -45,7 +47,7 @@ func (r *Reader) Next() bool { // The last WAL segment record shouldn't be torn(should be full or last). // The last record would be torn after a crash just before // the last record part could be persisted to disk. - if recType(r.curRecTyp) == recFirst || recType(r.curRecTyp) == recMiddle { + if r.curRecTyp == recFirst || r.curRecTyp == recMiddle { r.err = errors.New("last record is torn") } return false @@ -61,6 +63,7 @@ func (r *Reader) next() (err error) { buf := r.buf[recordHeaderSize:] r.rec = r.rec[:0] + r.snappyBuf = r.snappyBuf[:0] i := 0 for { @@ -68,7 +71,8 @@ func (r *Reader) next() (err error) { return errors.Wrap(err, "read first header byte") } r.total++ - r.curRecTyp = recType(hdr[0]) + r.curRecTyp = recTypeFromHeader(hdr[0]) + compressed := hdr[0]&snappyMask != 0 // Gobble up zero bytes. if r.curRecTyp == recPageTerm { @@ -123,12 +127,25 @@ func (r *Reader) next() (err error) { if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc { return errors.Errorf("unexpected checksum %x, expected %x", c, crc) } - r.rec = append(r.rec, buf[:length]...) + + if compressed { + r.snappyBuf = append(r.snappyBuf, buf[:length]...) + } else { + r.rec = append(r.rec, buf[:length]...) + } if err := validateRecord(r.curRecTyp, i); err != nil { return err } if r.curRecTyp == recLast || r.curRecTyp == recFull { + if compressed && len(r.snappyBuf) > 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + r.rec = r.rec[:cap(r.rec)] + r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + return err + } return nil } diff --git a/vendor/github.com/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/tsdb/wal/wal.go index 46504f0d97..39daba975e 100644 --- a/vendor/github.com/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/tsdb/wal/wal.go @@ -29,6 +29,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/tsdb/fileutil" @@ -165,6 +166,8 @@ type WAL struct { stopc chan chan struct{} actorc chan func() closed bool // To allow calling Close() more than once without blocking. + compress bool + snappyBuf []byte fsyncDuration prometheus.Summary pageFlushes prometheus.Counter @@ -175,13 +178,13 @@ type WAL struct { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string) (*WAL, error) { - return NewSize(logger, reg, dir, DefaultSegmentSize) +func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WAL, error) { + return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new WAL over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int) (*WAL, error) { +func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WAL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -198,10 +201,12 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi page: &page{}, actorc: make(chan func(), 100), stopc: make(chan chan struct{}), + compress: compress, } w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "prometheus_tsdb_wal_fsync_duration_seconds", - Help: "Duration of WAL fsync.", + Name: "prometheus_tsdb_wal_fsync_duration_seconds", + Help: "Duration of WAL fsync.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_page_flushes_total", @@ -228,34 +233,35 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } _, j, err := w.Segments() + // Index of the Segment we want to open and write to. + writeSegmentIndex := 0 if err != nil { return nil, errors.Wrap(err, "get segment range") } - // Fresh dir, no segments yet. - if j == -1 { - segment, err := CreateSegment(w.dir, 0) - if err != nil { - return nil, err - } + // If some segments already exist create one with a higher index than the last segment. + if j != -1 { + writeSegmentIndex = j + 1 + } - if err := w.setSegment(segment); err != nil { - return nil, err - } - } else { - segment, err := OpenWriteSegment(logger, w.dir, j) - if err != nil { - return nil, err - } + segment, err := CreateSegment(w.dir, writeSegmentIndex) + if err != nil { + return nil, err + } - if err := w.setSegment(segment); err != nil { - return nil, err - } + if err := w.setSegment(segment); err != nil { + return nil, err } + go w.run() return w, nil } +// CompressionEnabled returns if compression is enabled on this WAL. +func (w *WAL) CompressionEnabled() bool { + return w.compress +} + // Dir returns the directory of the WAL. func (w *WAL) Dir() string { return w.dir @@ -363,6 +369,9 @@ func (w *WAL) Repair(origErr error) error { } // We expect an error here from r.Err(), so nothing to handle. + // We need to pad to the end of the last page in the repaired segment + w.flushPage(true) + // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. @@ -372,6 +381,20 @@ func (w *WAL) Repair(origErr error) error { if err := os.Remove(tmpfn); err != nil { return errors.Wrap(err, "delete corrupted segment") } + + // Explicitly close the the segment we just repaired to avoid issues with Windows. + s.Close() + + // We always want to start writing to a new Segment rather than an existing + // Segment, which is handled by NewSize, but earlier in Repair we're deleting + // all segments that come after the corrupted Segment. Recreate a new Segment here. + s, err = CreateSegment(w.dir, cerr.Segment+1) + if err != nil { + return err + } + if err := w.setSegment(s); err != nil { + return err + } return nil } @@ -380,6 +403,13 @@ func SegmentName(dir string, i int) string { return filepath.Join(dir, fmt.Sprintf("%08d", i)) } +// NextSegment creates the next segment and closes the previous one. +func (w *WAL) NextSegment() error { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.nextSegment() +} + // nextSegment creates the next segment and closes the previous one. func (w *WAL) nextSegment() error { // Only flush the current page if it actually holds data. @@ -455,6 +485,14 @@ func (w *WAL) flushPage(clear bool) error { return nil } +// First Byte of header format: +// [ 4 bits unallocated] [1 bit snappy compression flag] [ 3 bit record type ] + +const ( + snappyMask = 1 << 3 + recTypeMask = snappyMask - 1 +) + type recType uint8 const ( @@ -465,6 +503,10 @@ const ( recLast recType = 4 // Final fragment of a record. ) +func recTypeFromHeader(header byte) recType { + return recType(header & recTypeMask) +} + func (t recType) String() string { switch t { case recPageTerm: @@ -525,6 +567,19 @@ func (w *WAL) log(rec []byte, final bool) error { } } + compressed := false + if w.compress && len(rec) > 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + w.snappyBuf = w.snappyBuf[:cap(w.snappyBuf)] + w.snappyBuf = snappy.Encode(w.snappyBuf, rec) + if len(w.snappyBuf) < len(rec) { + rec = w.snappyBuf + compressed = true + } + } + // Populate as many pages as necessary to fit the record. // Be careful to always do one pass to ensure we write zero-length records. for i := 0; i == 0 || len(rec) > 0; i++ { @@ -548,6 +603,9 @@ func (w *WAL) log(rec []byte, final bool) error { default: typ = recMiddle } + if compressed { + typ |= snappyMask + } buf[0] = byte(typ) crc := crc32.Checksum(part, castagnoliTable) @@ -710,7 +768,7 @@ func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { segs = append(segs, s) } } - return newSegmentBufReader(segs...), nil + return NewSegmentBufReader(segs...), nil } // segmentBufReader is a buffered reader that reads in multiples of pages. @@ -725,7 +783,7 @@ type segmentBufReader struct { off int // Offset of read data into current segment. } -func newSegmentBufReader(segs ...*Segment) *segmentBufReader { +func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { return &segmentBufReader{ buf: bufio.NewReaderSize(segs[0], 16*pageSize), segs: segs, diff --git a/vendor/modules.txt b/vendor/modules.txt index f4eab947e4..2aa007fb73 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -13,7 +13,7 @@ github.com/Azure/azure-pipeline-go/pipeline github.com/Azure/azure-storage-blob-go/azblob # github.com/NYTimes/gziphandler v1.1.1 github.com/NYTimes/gziphandler -# github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc +# github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 github.com/alecthomas/template github.com/alecthomas/template/parse # github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf @@ -132,7 +132,6 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/testutil -github.com/prometheus/client_golang/prometheus/promauto # github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/client_model/go # github.com/prometheus/common v0.6.0 @@ -144,7 +143,7 @@ github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg # github.com/prometheus/procfs v0.0.2 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs -# github.com/prometheus/prometheus v2.9.2+incompatible +# github.com/prometheus/prometheus v0.0.0-20190710134608-e5b22494857d github.com/prometheus/prometheus/discovery/file github.com/prometheus/prometheus/discovery/targetgroup github.com/prometheus/prometheus/pkg/labels @@ -161,7 +160,7 @@ github.com/prometheus/prometheus/pkg/gate github.com/prometheus/prometheus/template github.com/prometheus/prometheus/util/stats github.com/prometheus/prometheus/util/testutil -# github.com/prometheus/tsdb v0.8.0 +# github.com/prometheus/tsdb v0.9.1 github.com/prometheus/tsdb github.com/prometheus/tsdb/chunkenc github.com/prometheus/tsdb/errors