diff --git a/CHANGELOG.md b/CHANGELOG.md index 2383be3121..87f4b571f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re * [#5654](https://github.com/thanos-io/thanos/pull/5654) Query: add `--grpc-compression` flag that controls the compression used in gRPC client. With the flag it is now possible to compress the traffic between Query and StoreAPI nodes - you get lower network usage in exchange for a bit higher CPU/RAM usage. - [#5650](https://github.com/thanos-io/thanos/pull/5650) Query Frontend: Add sharded queries metrics. - [#5658](https://github.com/thanos-io/thanos/pull/5658) Query Frontend: Introduce new optional parameters (`query-range.min-split-interval`, `query-range.max-split-interval`, `query-range.horizontal-shards`) to implement more dynamic horizontal query splitting. +- [#5721](https://github.com/thanos-io/thanos/pull/5721) Store: Add metric `thanos_bucket_store_empty_postings_total` for number of empty postings when fetching series. ### Changed diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 2d7f270feb..1fd7494e01 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -120,6 +120,7 @@ type bucketStoreMetrics struct { chunkSizeBytes prometheus.Histogram queriesDropped *prometheus.CounterVec seriesRefetches prometheus.Counter + emptyPostingCount prometheus.Counter cachedPostingsCompressions *prometheus.CounterVec cachedPostingsCompressionErrors *prometheus.CounterVec @@ -255,6 +256,11 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.emptyPostingCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_empty_postings_total", + Help: "Total number of empty postings when fetching block series.", + }) + return &m } @@ -789,6 +795,7 @@ func blockSeries( minTime, maxTime int64, // Series must have data in this time range to be returned. loadAggregates []storepb.Aggr, // List of aggregates to load when loading chunks. shardMatcher *storepb.ShardMatcher, + emptyPostingsCount prometheus.Counter, ) (storepb.SeriesSet, *queryStats, error) { ps, err := indexr.ExpandedPostings(ctx, matchers) if err != nil { @@ -796,6 +803,7 @@ func blockSeries( } if len(ps) == 0 { + emptyPostingsCount.Inc() return storepb.EmptySeriesSet(), indexr.stats, nil } @@ -1086,6 +1094,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie req.MinTime, req.MaxTime, req.Aggregates, shardMatcher, + s.metrics.emptyPostingCount, ) if err != nil { return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) @@ -1293,7 +1302,21 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq result = strutil.MergeSlices(res, extRes) } else { - seriesSet, _, err := blockSeries(newCtx, b.extLset, indexr, nil, reqSeriesMatchersNoExtLabels, nil, seriesLimiter, true, req.Start, req.End, nil, nil) + seriesSet, _, err := blockSeries( + newCtx, + b.extLset, + indexr, + nil, + reqSeriesMatchersNoExtLabels, + nil, + seriesLimiter, + true, + req.Start, + req.End, + nil, + nil, + s.metrics.emptyPostingCount, + ) if err != nil { return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) } @@ -1447,7 +1470,21 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR } result = res } else { - seriesSet, _, err := blockSeries(newCtx, b.extLset, indexr, nil, reqSeriesMatchersNoExtLabels, nil, seriesLimiter, true, req.Start, req.End, nil, nil) + seriesSet, _, err := blockSeries( + newCtx, + b.extLset, + indexr, + nil, + reqSeriesMatchersNoExtLabels, + nil, + seriesLimiter, + true, + req.Start, + req.End, + nil, + nil, + s.metrics.emptyPostingCount, + ) if err != nil { return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) } diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 6a54f76b8e..4da878ff4a 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -29,6 +29,8 @@ import ( "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" @@ -36,10 +38,10 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/encoding" - "github.com/thanos-io/objstore/providers/filesystem" "go.uber.org/atomic" "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/filesystem" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/indexheader" @@ -2286,6 +2288,11 @@ func benchmarkBlockSeriesWithConcurrency(b *testing.B, concurrency int, blockMet // No limits. chunksLimiter := NewChunksLimiterFactory(0)(nil) seriesLimiter := NewSeriesLimiterFactory(0)(nil) + dummyCounter := promauto.NewCounter(prometheus.CounterOpts{ + Name: "dummy", + Help: "dummy help", + }) + ctx := context.Background() // Run multiple workers to execute the queries. wg := sync.WaitGroup{} @@ -2319,7 +2326,7 @@ func benchmarkBlockSeriesWithConcurrency(b *testing.B, concurrency int, blockMet indexReader := blk.indexReader() chunkReader := blk.chunkReader() - seriesSet, _, err := blockSeries(context.Background(), nil, indexReader, chunkReader, matchers, chunksLimiter, seriesLimiter, req.SkipChunks, req.MinTime, req.MaxTime, req.Aggregates, nil) + seriesSet, _, err := blockSeries(ctx, nil, indexReader, chunkReader, matchers, chunksLimiter, seriesLimiter, req.SkipChunks, req.MinTime, req.MaxTime, req.Aggregates, nil, dummyCounter) testutil.Ok(b, err) // Ensure at least 1 series has been returned (as expected).