From 202fb4d46f35b91577bbc977d46c27f0e6f15e90 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Sat, 19 Dec 2020 06:41:38 +0000 Subject: [PATCH] query: Added PromQL tests framework supporing multiple stores. First step towars Query pushdown! (#3631) * store: Added inprocess server to client. Signed-off-by: Bartlomiej Plotka * Added initial PromQL acceptance tests. * Improved logging. * Improved debug matching info. * Fixed important matching bug. * Found reset bug, added issue https://github.com/thanos-io/thanos/issues/3644 and commented code. Signed-off-by: Bartlomiej Plotka --- cmd/thanos/rule.go | 2 +- pkg/api/query/v1_test.go | 6 +- pkg/compact/downsample/downsample.go | 3 +- pkg/promclient/promclient.go | 4 +- pkg/query/iter.go | 1 + pkg/query/querier.go | 3 +- pkg/query/querier_test.go | 11 +- pkg/query/query_test.go | 121 +++ pkg/query/storeset_test.go | 22 +- pkg/query/test.go | 621 ++++++++++++ .../promql/prometheus/aggregators.test | 499 ++++++++++ .../testdata/promql/prometheus/collision.test | 22 + .../testdata/promql/prometheus/functions.test | 892 ++++++++++++++++++ .../promql/prometheus/histograms.test | 193 ++++ .../testdata/promql/prometheus/literals.test | 59 ++ .../testdata/promql/prometheus/operators.test | 469 +++++++++ .../testdata/promql/prometheus/selectors.test | 201 ++++ .../testdata/promql/prometheus/staleness.test | 51 + .../testdata/promql/prometheus/subquery.test | 117 +++ .../testdata/promql/thanos/aggregators.test | 220 +++++ pkg/receive/multitsdb.go | 2 +- pkg/store/bucket.go | 8 +- pkg/store/local.go | 9 +- pkg/store/prometheus.go | 47 +- pkg/store/prometheus_test.go | 2 - pkg/store/proxy.go | 132 ++- pkg/store/proxy_test.go | 93 +- pkg/store/storepb/custom.go | 9 +- pkg/store/storepb/custom_test.go | 4 +- pkg/store/storepb/inprocess.go | 97 ++ pkg/store/storepb/inprocess_test.go | 265 ++++++ pkg/store/tsdb.go | 20 +- pkg/store/tsdb_test.go | 16 +- test/e2e/query_frontend_test.go | 5 +- test/e2e/query_test.go | 5 +- 35 files changed, 4023 insertions(+), 208 deletions(-) create mode 100644 pkg/query/test.go create mode 100644 pkg/query/testdata/promql/prometheus/aggregators.test create mode 100644 pkg/query/testdata/promql/prometheus/collision.test create mode 100644 pkg/query/testdata/promql/prometheus/functions.test create mode 100644 pkg/query/testdata/promql/prometheus/histograms.test create mode 100644 pkg/query/testdata/promql/prometheus/literals.test create mode 100644 pkg/query/testdata/promql/prometheus/operators.test create mode 100644 pkg/query/testdata/promql/prometheus/selectors.test create mode 100644 pkg/query/testdata/promql/prometheus/staleness.test create mode 100644 pkg/query/testdata/promql/prometheus/subquery.test create mode 100644 pkg/query/testdata/promql/thanos/aggregators.test create mode 100644 pkg/store/storepb/inprocess.go create mode 100644 pkg/store/storepb/inprocess_test.go diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 13240b966c..6d5c840b63 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -548,7 +548,7 @@ func runRule( // Start gRPC server. { - tsdbStore := store.NewTSDBStore(logger, reg, db, component.Rule, lset) + tsdbStore := store.NewTSDBStore(logger, db, component.Rule, lset) tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), grpcCert, grpcKey, grpcClientCA) if err != nil { diff --git a/pkg/api/query/v1_test.go b/pkg/api/query/v1_test.go index b5b12f3960..9273d5111f 100644 --- a/pkg/api/query/v1_test.go +++ b/pkg/api/query/v1_test.go @@ -180,7 +180,7 @@ func TestQueryEndpoints(t *testing.T) { baseAPI: &baseAPI.BaseAPI{ Now: func() time.Time { return now }, }, - queryableCreate: query.NewQueryableCreator(nil, nil, store.NewTSDBStore(nil, nil, db, component.Query, nil), 2, timeout), + queryableCreate: query.NewQueryableCreator(nil, nil, store.NewTSDBStore(nil, db, component.Query, nil), 2, timeout), queryEngine: func(int64) *promql.Engine { return qe }, @@ -674,7 +674,7 @@ func TestMetadataEndpoints(t *testing.T) { baseAPI: &baseAPI.BaseAPI{ Now: func() time.Time { return now }, }, - queryableCreate: query.NewQueryableCreator(nil, nil, store.NewTSDBStore(nil, nil, db, component.Query, nil), 2, timeout), + queryableCreate: query.NewQueryableCreator(nil, nil, store.NewTSDBStore(nil, db, component.Query, nil), 2, timeout), queryEngine: func(int64) *promql.Engine { return qe }, @@ -684,7 +684,7 @@ func TestMetadataEndpoints(t *testing.T) { baseAPI: &baseAPI.BaseAPI{ Now: func() time.Time { return now }, }, - queryableCreate: query.NewQueryableCreator(nil, nil, store.NewTSDBStore(nil, nil, db, component.Query, nil), 2, timeout), + queryableCreate: query.NewQueryableCreator(nil, nil, store.NewTSDBStore(nil, db, component.Query, nil), 2, timeout), queryEngine: func(int64) *promql.Engine { return qe }, diff --git a/pkg/compact/downsample/downsample.go b/pkg/compact/downsample/downsample.go index 909252aee9..8d271b3ee6 100644 --- a/pkg/compact/downsample/downsample.go +++ b/pkg/compact/downsample/downsample.go @@ -513,7 +513,7 @@ func downsampleAggrBatch(chks []*AggrChunk, buf *[]sample, resolution int64) (ch return chk, err } - // Handle counters by reading them properly. + // Handle counters by applying resets directly. acs := make([]chunkenc.Iterator, 0, len(chks)) for _, achk := range chks { c, err := achk.Get(AggrCounter) @@ -580,6 +580,7 @@ type sample struct { // It handles overlapped chunks (removes overlaps). // NOTE: It is important to deduplicate with care ensuring that you don't hit // issue https://github.com/thanos-io/thanos/issues/2401#issuecomment-621958839. +// NOTE(bwplotka): This hides resets from PromQL engine. This means it will not work for PromQL resets function. type ApplyCounterResetsSeriesIterator struct { chks []chunkenc.Iterator i int // Current chunk. diff --git a/pkg/promclient/promclient.go b/pkg/promclient/promclient.go index 89d1673e4b..4a4cc52557 100644 --- a/pkg/promclient/promclient.go +++ b/pkg/promclient/promclient.go @@ -653,12 +653,12 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string // SeriesInGRPC returns the labels from Prometheus series API. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []storepb.LabelMatcher, startTime, endTime int64) ([]map[string]string, error) { +func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]map[string]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/series") q := u.Query() - q.Add("match[]", storepb.MatchersToString(matchers...)) + q.Add("match[]", storepb.PromMatchersToString(matchers...)) q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) u.RawQuery = q.Encode() diff --git a/pkg/query/iter.go b/pkg/query/iter.go index 48189e6a7d..3c641c360f 100644 --- a/pkg/query/iter.go +++ b/pkg/query/iter.go @@ -185,6 +185,7 @@ func (s *chunkSeries) Iterator() chunkenc.Iterator { for _, c := range s.chunks { its = append(its, getFirstIterator(c.Counter, c.Raw)) } + // TODO(bwplotka): This breaks resets function. See https://github.com/thanos-io/thanos/issues/3644 sit = downsample.NewApplyCounterResetsIterator(its...) default: return errSeriesIterator{err: errors.Errorf("unexpected result aggregate type %v", s.aggrs)} diff --git a/pkg/query/querier.go b/pkg/query/querier.go index 00e1c1d612..c054dd2774 100644 --- a/pkg/query/querier.go +++ b/pkg/query/querier.go @@ -255,7 +255,7 @@ func (q *querier) Select(_ bool, hints *storage.SelectHints, ms ...*labels.Match } func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms ...*labels.Matcher) (storage.SeriesSet, error) { - sms, err := storepb.TranslatePromMatchers(ms...) + sms, err := storepb.PromMatchersToMatchers(ms...) if err != nil { return nil, errors.Wrap(err, "convert matchers") } @@ -265,6 +265,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . // TODO(bwplotka): Pass it using the SeriesRequest instead of relying on context. ctx = context.WithValue(ctx, store.StoreMatcherKey, q.storeDebugMatchers) + // TODO(bwplotka): Use inprocess gRPC. resp := &seriesServer{ctx: ctx} if err := q.proxy.Series(&storepb.SeriesRequest{ MinTime: hints.Start, diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index c93ab57cfc..b9437a59bb 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -27,7 +27,6 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/store/labelpb" @@ -41,7 +40,7 @@ type sample struct { } func TestQueryableCreator_MaxResolution(t *testing.T) { - testProxy := &storeServer{resps: []*storepb.SeriesResponse{}} + testProxy := &testStoreServer{resps: []*storepb.SeriesResponse{}} queryableCreator := NewQueryableCreator(nil, nil, testProxy, 2, 5*time.Second) oneHourMillis := int64(1*time.Hour) / int64(time.Millisecond) @@ -60,7 +59,7 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { // Tests E2E how PromQL works with downsampled data. func TestQuerier_DownsampledData(t *testing.T) { - testProxy := &storeServer{ + testProxy := &testStoreServer{ resps: []*storepb.SeriesResponse{ storeSeriesResponse(t, labels.FromStrings("__name__", "a", "zzz", "a", "aaa", "bbb"), []sample{{99, 1}, {199, 5}}), // Downsampled chunk from Store. storeSeriesResponse(t, labels.FromStrings("__name__", "a", "zzz", "b", "bbbb", "eee"), []sample{{99, 3}, {199, 8}}), // Downsampled chunk from Store. @@ -411,7 +410,7 @@ func TestQuerier_Select(t *testing.T) { }{ { name: "select overlapping data with partial error", - storeAPI: &storeServer{ + storeAPI: &testStoreServer{ resps: []*storepb.SeriesResponse{ storeSeriesResponse(t, labels.FromStrings("a", "a"), []sample{{0, 0}, {2, 1}, {3, 2}}), storepb.NewWarnSeriesResponse(errors.New("partial error")), @@ -1468,14 +1467,14 @@ func BenchmarkDedupSeriesIterator(b *testing.B) { }) } -type storeServer struct { +type testStoreServer struct { // This field just exist to pseudo-implement the unused methods of the interface. storepb.StoreServer resps []*storepb.SeriesResponse } -func (s *storeServer) Series(_ *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { +func (s *testStoreServer) Series(_ *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { for _, resp := range s.resps { err := srv.Send(resp) if err != nil { diff --git a/pkg/query/query_test.go b/pkg/query/query_test.go index d72a6d72dd..792236f2e7 100644 --- a/pkg/query/query_test.go +++ b/pkg/query/query_test.go @@ -4,11 +4,132 @@ package query import ( + "context" + "fmt" + "os" + "path/filepath" "testing" + "time" + "github.com/go-kit/kit/log" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/store" + "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/testutil" ) func TestMain(m *testing.M) { testutil.TolerantVerifyLeakMain(m) } + +type inProcessClient struct { + t testing.TB + + name string + + storepb.StoreClient + extLset labels.Labels +} + +func (i inProcessClient) LabelSets() []labels.Labels { + return []labels.Labels{i.extLset} +} + +func (i inProcessClient) TimeRange() (mint int64, maxt int64) { + r, err := i.Info(context.TODO(), &storepb.InfoRequest{}) + testutil.Ok(i.t, err) + return r.MinTime, r.MaxTime +} + +func (i inProcessClient) String() string { return i.name } +func (i inProcessClient) Addr() string { return i.name } + +func TestQuerier_Proxy(t *testing.T) { + files, err := filepath.Glob("testdata/promql/**/*.test") + testutil.Ok(t, err) + testutil.Equals(t, 10, len(files), "%v", files) + + logger := log.NewLogfmtLogger(os.Stderr) + t.Run("proxy", func(t *testing.T) { + var clients []store.Client + q := NewQueryableCreator( + logger, + nil, + store.NewProxyStore(logger, nil, func() []store.Client { return clients }, + component.Debug, nil, 5*time.Minute), + 1000000, + 5*time.Minute, + ) + + createQueryableFn := func(stores []*testStore) storage.Queryable { + clients = clients[:0] + for i, st := range stores { + m, err := storepb.PromMatchersToMatchers(st.matchers...) + testutil.Ok(t, err) + + // TODO(bwplotka): Parse external labels. + clients = append(clients, inProcessClient{ + t: t, + StoreClient: storepb.ServerAsClient(SelectedStore(store.NewTSDBStore(logger, st.storage.DB, component.Debug, nil), m, st.mint, st.maxt), 0), + name: fmt.Sprintf("store number %v", i), + }) + } + return q(true, nil, nil, 0, false, false) + } + + for _, fn := range files { + t.Run(fn, func(t *testing.T) { + te, err := newTestFromFile(t, fn) + testutil.Ok(t, err) + testutil.Ok(t, te.run(createQueryableFn)) + te.close() + }) + } + }) +} + +// SelectStore allows wrapping another storeAPI with additional time and matcher selection. +type SelectStore struct { + matchers []storepb.LabelMatcher + + storepb.StoreServer + mint, maxt int64 +} + +// SelectedStore wraps given store with SelectStore. +func SelectedStore(wrapped storepb.StoreServer, matchers []storepb.LabelMatcher, mint, maxt int64) *SelectStore { + return &SelectStore{ + StoreServer: wrapped, + matchers: matchers, + mint: mint, + maxt: maxt, + } +} + +func (s *SelectStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { + resp, err := s.StoreServer.Info(ctx, r) + if err != nil { + return nil, err + } + if resp.MinTime < s.mint { + resp.MinTime = s.mint + } + if resp.MaxTime > s.maxt { + resp.MaxTime = s.maxt + } + // TODO(bwplotka): Match labelsets and expose only those? + return resp, nil +} + +func (s *SelectStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + if r.MinTime < s.mint { + r.MinTime = s.mint + } + if r.MaxTime > s.maxt { + r.MaxTime = s.maxt + } + r.Matchers = append(r.Matchers, s.matchers...) + return s.StoreServer.Series(r, srv) +} diff --git a/pkg/query/storeset_test.go b/pkg/query/storeset_test.go index e94230e100..9429a6e44c 100644 --- a/pkg/query/storeset_test.go +++ b/pkg/query/storeset_test.go @@ -29,29 +29,29 @@ var testGRPCOpts = []grpc.DialOption{ grpc.WithInsecure(), } -type testStore struct { +type mockedStore struct { infoDelay time.Duration info storepb.InfoResponse } -func (s *testStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { +func (s *mockedStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { if s.infoDelay > 0 { time.Sleep(s.infoDelay) } return &s.info, nil } -func (s *testStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { +func (s *mockedStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { return status.Error(codes.Unimplemented, "not implemented") } -func (s *testStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( +func (s *mockedStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( *storepb.LabelNamesResponse, error, ) { return nil, status.Error(codes.Unimplemented, "not implemented") } -func (s *testStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( +func (s *mockedStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( *storepb.LabelValuesResponse, error, ) { return nil, status.Error(codes.Unimplemented, "not implemented") @@ -84,7 +84,7 @@ func startTestStores(storeMetas []testStoreMeta) (*testStores, error) { srv := grpc.NewServer() - storeSrv := &testStore{ + storeSrv := &mockedStore{ info: storepb.InfoResponse{ LabelSets: meta.extlsetFn(listener.Addr().String()), MaxTime: meta.maxTime, @@ -1012,12 +1012,12 @@ func TestUpdateStoreStateLastError(t *testing.T) { storeStatuses: map[string]*StoreStatus{}, } mockStoreRef := &storeRef{ - addr: "testStore", + addr: "mockedStore", } mockStoreSet.updateStoreStatus(mockStoreRef, tc.InputError) - b, err := json.Marshal(mockStoreSet.storeStatuses["testStore"].LastError) + b, err := json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) testutil.Ok(t, err) testutil.Equals(t, tc.ExpectedLastErr, string(b)) } @@ -1028,19 +1028,19 @@ func TestUpdateStoreStateForgetsPreviousErrors(t *testing.T) { storeStatuses: map[string]*StoreStatus{}, } mockStoreRef := &storeRef{ - addr: "testStore", + addr: "mockedStore", } mockStoreSet.updateStoreStatus(mockStoreRef, errors.New("test err")) - b, err := json.Marshal(mockStoreSet.storeStatuses["testStore"].LastError) + b, err := json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) testutil.Ok(t, err) testutil.Equals(t, `"test err"`, string(b)) // updating status without and error should clear the previous one. mockStoreSet.updateStoreStatus(mockStoreRef, nil) - b, err = json.Marshal(mockStoreSet.storeStatuses["testStore"].LastError) + b, err = json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) testutil.Ok(t, err) testutil.Equals(t, `null`, string(b)) } diff --git a/pkg/query/test.go b/pkg/query/test.go new file mode 100644 index 0000000000..3871094b98 --- /dev/null +++ b/pkg/query/test.go @@ -0,0 +1,621 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package query + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/teststorage" +) + +var ( + minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. + + patSpace = regexp.MustCompile("[\t ]+") + // TODO(bwplotka): Parse external labels. + patStore = regexp.MustCompile(`^store\s+([{}=_"a-zA-Z0-9]+)\s+([0-9mds]+)\s+([0-9mds]+)$`) + patLoad = regexp.MustCompile(`^load\s+(.+?)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) +) + +const ( + epsilon = 0.000001 // Relative error allowed for sample values. +) + +var testStartTime = time.Unix(0, 0).UTC() + +func durationMilliseconds(d time.Duration) int64 { + return int64(d / (time.Millisecond / time.Nanosecond)) +} + +func timeMilliseconds(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +type test struct { + testing.TB + + cmds []interface{} + rootEngine *promql.Engine + stores []*testStore + + ctx context.Context + cancelCtx context.CancelFunc +} + +type testStore struct { + storeCmd + + storage *teststorage.TestStorage + + ctx context.Context + cancelCtx context.CancelFunc +} + +func newTestStore(t testing.TB, cmd *storeCmd) *testStore { + s := &testStore{ + storeCmd: *cmd, + storage: teststorage.New(t), + } + s.ctx, s.cancelCtx = context.WithCancel(context.Background()) + return s +} + +// close closes resources associated with the testStore. +func (s *testStore) close(t testing.TB) { + s.cancelCtx() + + if err := s.storage.Close(); err != nil { + t.Fatalf("closing test storage: %s", err) + } +} + +// NewTest returns an initialized empty Test. +// It's compatible with promql.Test, allowing additionally multi StoreAPIs for query pushdown testing. +// TODO(bwplotka): Move to unittest and add add support for multi-store upstream. See: https://github.com/prometheus/prometheus/pull/8300 +func newTest(t testing.TB, input string) (*test, error) { + cmds, err := parse(input) + if err != nil { + return nil, err + } + + te := &test{TB: t, cmds: cmds} + te.reset() + return te, err +} + +func newTestFromFile(t testing.TB, filename string) (*test, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return newTest(t, string(content)) +} + +// reset the current test storage of all inserted samples. +func (t *test) reset() { + if t.cancelCtx != nil { + t.cancelCtx() + } + t.ctx, t.cancelCtx = context.WithCancel(context.Background()) + + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, + } + t.rootEngine = promql.NewEngine(opts) + + for _, s := range t.stores { + s.close(t.TB) + } + t.stores = t.stores[:0] +} + +// close closes resources associated with the Test. +func (t *test) close() { + t.cancelCtx() + for _, s := range t.stores { + s.close(t.TB) + } +} + +// getLines returns trimmed lines after removing the comments. +func getLines(input string) []string { + lines := strings.Split(input, "\n") + for i, l := range lines { + l = strings.TrimSpace(l) + if strings.HasPrefix(l, "#") { + l = "" + } + lines[i] = l + } + return lines +} + +// parse parses the given input and returns command sequence. +func parse(input string) (cmds []interface{}, err error) { + lines := getLines(input) + + // Scan for steps line by line. + for i := 0; i < len(lines); i++ { + l := lines[i] + if len(l) == 0 { + continue + } + var cmd interface{} + + switch c := strings.ToLower(patSpace.Split(l, 2)[0]); { + case c == "clear": + cmd = &clearCmd{} + case c == "load": + i, cmd, err = ParseLoad(lines, i) + case strings.HasPrefix(c, "eval"): + i, cmd, err = ParseEval(lines, i) + case c == "store": + i, cmd, err = ParseStore(lines, i) + default: + return nil, raise(i, "invalid command %q", l) + } + if err != nil { + return nil, err + } + cmds = append(cmds, cmd) + } + return cmds, nil +} + +func raise(line int, format string, v ...interface{}) error { + return &parser.ParseErr{ + LineOffset: line, + Err: errors.Errorf(format, v...), + } +} + +// run executes the command sequence of the test. Until the maximum error number +// is reached, evaluation errors do not terminate execution. +func (t *test) run(createQueryableFn func([]*testStore) storage.Queryable) error { + for _, cmd := range t.cmds { + if err := t.exec(cmd, createQueryableFn); err != nil { + return err + } + } + return nil +} + +// exec processes a single step of the test. +func (t *test) exec(tc interface{}, createQueryableFn func([]*testStore) storage.Queryable) error { + switch cmd := tc.(type) { + case *clearCmd: + t.reset() + case *storeCmd: + t.stores = append(t.stores, newTestStore(t.TB, tc.(*storeCmd))) + + case *loadCmd: + if len(t.stores) == 0 { + t.stores = append(t.stores, newTestStore(t.TB, newStoreCmd(nil, math.MinInt64, math.MaxInt64))) + } + + app := t.stores[len(t.stores)-1].storage.Appender(t.ctx) + if err := cmd.Append(app); err != nil { + _ = app.Rollback() + return err + } + if err := app.Commit(); err != nil { + return err + } + + case *evalCmd: + if err := cmd.Eval(t.ctx, t.rootEngine, createQueryableFn(t.stores)); err != nil { + return err + } + + default: + return errors.Errorf("pkg/query.Test.exec: unknown test command type %v", cmd) + } + return nil +} + +// storeCmd is a command that appends new storage with filter. +type storeCmd struct { + matchers []*labels.Matcher + mint, maxt int64 +} + +func newStoreCmd(matchers []*labels.Matcher, mint, maxt int64) *storeCmd { + return &storeCmd{ + matchers: matchers, + mint: mint, + maxt: maxt, + } +} + +func (cmd storeCmd) String() string { + return "store" +} + +// ParseStore parses store statements. +func ParseStore(lines []string, i int) (int, *storeCmd, error) { + if !patStore.MatchString(lines[i]) { + return i, nil, raise(i, "invalid store command. (store )") + } + parts := patStore.FindStringSubmatch(lines[i]) + + m, err := parser.ParseMetricSelector(parts[1]) + if err != nil { + return i, nil, raise(i, "invalid matcher definition %q: %s", parts[1], err) + } + + offset, err := model.ParseDuration(parts[2]) + if err != nil { + return i, nil, raise(i, "invalid mint definition %q: %s", parts[2], err) + } + mint := testStartTime.Add(time.Duration(offset)) + + offset, err = model.ParseDuration(parts[3]) + if err != nil { + return i, nil, raise(i, "invalid maxt definition %q: %s", parts[3], err) + } + maxt := testStartTime.Add(time.Duration(offset)) + return i, newStoreCmd(m, timestamp.FromTime(mint), timestamp.FromTime(maxt)), nil +} + +// ParseLoad parses load statements. +func ParseLoad(lines []string, i int) (int, *loadCmd, error) { + if !patLoad.MatchString(lines[i]) { + return i, nil, raise(i, "invalid load command. (load )") + } + parts := patLoad.FindStringSubmatch(lines[i]) + + gap, err := model.ParseDuration(parts[1]) + if err != nil { + return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) + } + cmd := newLoadCmd(time.Duration(gap)) + for i+1 < len(lines) { + i++ + defLine := lines[i] + if len(defLine) == 0 { + i-- + break + } + metric, vals, err := parser.ParseSeriesDesc(defLine) + if err != nil { + if perr, ok := err.(*parser.ParseErr); ok { + perr.LineOffset = i + } + return i, nil, err + } + cmd.set(metric, vals...) + } + return i, cmd, nil +} + +// ParseEval parses eval statements. +func ParseEval(lines []string, i int) (int, *evalCmd, error) { + if !patEvalInstant.MatchString(lines[i]) { + return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at ] ") + } + parts := patEvalInstant.FindStringSubmatch(lines[i]) + var ( + mod = parts[1] + at = parts[2] + expr = parts[3] + ) + _, err := parser.ParseExpr(expr) + if err != nil { + if perr, ok := err.(*parser.ParseErr); ok { + perr.LineOffset = i + posOffset := parser.Pos(strings.Index(lines[i], expr)) + perr.PositionRange.Start += posOffset + perr.PositionRange.End += posOffset + perr.Query = lines[i] + } + return i, nil, err + } + + offset, err := model.ParseDuration(at) + if err != nil { + return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) + } + ts := testStartTime.Add(time.Duration(offset)) + + cmd := newEvalCmd(expr, ts, i+1) + switch mod { + case "ordered": + cmd.ordered = true + case "fail": + cmd.fail = true + } + + for j := 1; i+1 < len(lines); j++ { + i++ + defLine := lines[i] + if len(defLine) == 0 { + i-- + break + } + if f, err := parseNumber(defLine); err == nil { + cmd.expect(0, nil, parser.SequenceValue{Value: f}) + break + } + metric, vals, err := parser.ParseSeriesDesc(defLine) + if err != nil { + if perr, ok := err.(*parser.ParseErr); ok { + perr.LineOffset = i + } + return i, nil, err + } + + // Currently, we are not expecting any matrices. + if len(vals) > 1 { + return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed") + } + cmd.expect(j, metric, vals...) + } + return i, cmd, nil +} + +func parseNumber(s string) (float64, error) { + n, err := strconv.ParseInt(s, 0, 64) + f := float64(n) + if err != nil { + f, err = strconv.ParseFloat(s, 64) + } + if err != nil { + return 0, errors.Wrap(err, "error parsing number") + } + return f, nil +} + +// loadCmd is a command that loads sequences of sample values for specific +// metrics into the storage. +type loadCmd struct { + gap time.Duration + metrics map[uint64]labels.Labels + defs map[uint64][]promql.Point +} + +func newLoadCmd(gap time.Duration) *loadCmd { + return &loadCmd{ + gap: gap, + metrics: map[uint64]labels.Labels{}, + defs: map[uint64][]promql.Point{}, + } +} + +func (cmd loadCmd) String() string { + return "load" +} + +// set a sequence of sample values for the given metric. +func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) { + h := m.Hash() + + samples := make([]promql.Point, 0, len(vals)) + ts := testStartTime + for _, v := range vals { + if !v.Omitted { + samples = append(samples, promql.Point{ + T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond), + V: v.Value, + }) + } + ts = ts.Add(cmd.gap) + } + cmd.defs[h] = samples + cmd.metrics[h] = m +} + +// Append the defined time series to the storage. +func (cmd *loadCmd) Append(a storage.Appender) error { + for h, smpls := range cmd.defs { + m := cmd.metrics[h] + + for _, s := range smpls { + if _, err := a.Add(m, s.T, s.V); err != nil { + return err + } + } + } + return nil +} + +// evalCmd is a command that evaluates an expression for the given time (range) +// and expects a specific result. +type evalCmd struct { + expr string + start time.Time + line int + + fail, ordered bool + + metrics map[uint64]labels.Labels + expected map[uint64]entry +} + +type entry struct { + pos int + vals []parser.SequenceValue +} + +func (e entry) String() string { + return fmt.Sprintf("%d: %s", e.pos, e.vals) +} + +func newEvalCmd(expr string, start time.Time, line int) *evalCmd { + return &evalCmd{ + expr: expr, + start: start, + line: line, + + metrics: map[uint64]labels.Labels{}, + expected: map[uint64]entry{}, + } +} + +func (ev *evalCmd) String() string { + return "eval" +} + +// expect adds a new metric with a sequence of values to the set of expected +// results for the query. +func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...parser.SequenceValue) { + if m == nil { + ev.expected[0] = entry{pos: pos, vals: vals} + return + } + h := m.Hash() + ev.metrics[h] = m + ev.expected[h] = entry{pos: pos, vals: vals} +} + +// samplesAlmostEqual returns true if the two sample lines only differ by a +// small relative error in their sample value. +func almostEqual(a, b float64) bool { + // NaN has no equality but for testing we still want to know whether both values + // are NaN. + if math.IsNaN(a) && math.IsNaN(b) { + return true + } + + // Cf. http://floating-point-gui.de/errors/comparison/ + if a == b { + return true + } + + diff := math.Abs(a - b) + + if a == 0 || b == 0 || diff < minNormal { + return diff < epsilon*minNormal + } + return diff/(math.Abs(a)+math.Abs(b)) < epsilon +} + +// compareResult compares the result value with the defined expectation. +func (ev *evalCmd) compareResult(result parser.Value) error { + switch val := result.(type) { + case promql.Matrix: + return errors.New("received range result on instant evaluation") + + case promql.Vector: + seen := map[uint64]bool{} + for pos, v := range val { + fp := v.Metric.Hash() + if _, ok := ev.metrics[fp]; !ok { + return errors.Errorf("unexpected metric %s in result", v.Metric) + } + exp := ev.expected[fp] + if ev.ordered && exp.pos != pos+1 { + return errors.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1) + } + if !almostEqual(exp.vals[0].Value, v.V) { + return errors.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.V) + } + + seen[fp] = true + } + for fp, expVals := range ev.expected { + if !seen[fp] { + details := fmt.Sprintln("vector result", len(val), ev.expr) + for _, ss := range val { + details += fmt.Sprintln(" ", ss.Metric, ss.Point) + } + return errors.Errorf("expected metric %s with %v not found; details: %v", ev.metrics[fp], expVals, details) + } + } + + case promql.Scalar: + if !almostEqual(ev.expected[0].vals[0].Value, val.V) { + return errors.Errorf("expected Scalar %v but got %v", val.V, ev.expected[0].vals[0].Value) + } + + default: + panic(errors.Errorf("promql.Test.compareResult: unexpected result type %T", result)) + } + return nil +} + +func (ev *evalCmd) Eval(ctx context.Context, queryEngine *promql.Engine, queryable storage.Queryable) error { + q, err := queryEngine.NewInstantQuery(queryable, ev.expr, ev.start) + if err != nil { + return err + } + defer q.Close() + + res := q.Exec(ctx) + if res.Err != nil { + if ev.fail { + return nil + } + return errors.Wrapf(res.Err, "error evaluating query %q (line %d)", ev.expr, ev.line) + } + if res.Err == nil && ev.fail { + return errors.Errorf("expected error evaluating query %q (line %d) but got none", ev.expr, ev.line) + } + + err = ev.compareResult(res.Value) + if err != nil { + return errors.Wrapf(err, "error in %s %s", ev, ev.expr) + } + + // Check query returns same result in range mode, + // by checking against the middle step. + q, err = queryEngine.NewRangeQuery(queryable, ev.expr, ev.start.Add(-time.Minute), ev.start.Add(time.Minute), time.Minute) + if err != nil { + return err + } + rangeRes := q.Exec(ctx) + if rangeRes.Err != nil { + return errors.Wrapf(rangeRes.Err, "error evaluating query %q (line %d) in range mode", ev.expr, ev.line) + } + defer q.Close() + if ev.ordered { + // Ordering isn't defined for range queries. + return nil + } + mat := rangeRes.Value.(promql.Matrix) + vec := make(promql.Vector, 0, len(mat)) + for _, series := range mat { + for _, point := range series.Points { + if point.T == timeMilliseconds(ev.start) { + vec = append(vec, promql.Sample{Metric: series.Metric, Point: point}) + break + } + } + } + if _, ok := res.Value.(promql.Scalar); ok { + err = ev.compareResult(promql.Scalar{V: vec[0].Point.V}) + } else { + err = ev.compareResult(vec) + } + if err != nil { + return errors.Wrapf(err, "error in %s %s (line %d) rande mode", ev, ev.expr, ev.line) + } + return nil +} + +// clearCmd is a command that wipes the test's storage state. +type clearCmd struct{} + +func (cmd clearCmd) String() string { + return "clear" +} diff --git a/pkg/query/testdata/promql/prometheus/aggregators.test b/pkg/query/testdata/promql/prometheus/aggregators.test new file mode 100644 index 0000000000..cda2e7f4e0 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/aggregators.test @@ -0,0 +1,499 @@ +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + +load 5m + foo{job="api-server", instance="0", region="europe"} 0+90x10 + foo{job="api-server"} 0+100x10 + +# Simple sum. +eval instant at 50m SUM BY (group) (http_requests{job="api-server"}) + {group="canary"} 700 + {group="production"} 300 + +eval instant at 50m SUM BY (group) (((http_requests{job="api-server"}))) + {group="canary"} 700 + {group="production"} 300 + +# Test alternative "by"-clause order. +eval instant at 50m sum by (group) (http_requests{job="api-server"}) + {group="canary"} 700 + {group="production"} 300 + +# Simple average. +eval instant at 50m avg by (group) (http_requests{job="api-server"}) + {group="canary"} 350 + {group="production"} 150 + +# Simple count. +eval instant at 50m count by (group) (http_requests{job="api-server"}) + {group="canary"} 2 + {group="production"} 2 + +# Simple without. +eval instant at 50m sum without (instance) (http_requests{job="api-server"}) + {group="canary",job="api-server"} 700 + {group="production",job="api-server"} 300 + +# Empty by. +eval instant at 50m sum by () (http_requests{job="api-server"}) + {} 1000 + +# No by/without. +eval instant at 50m sum(http_requests{job="api-server"}) + {} 1000 + +# Empty without. +eval instant at 50m sum without () (http_requests{job="api-server",group="production"}) + {group="production",job="api-server",instance="0"} 100 + {group="production",job="api-server",instance="1"} 200 + +# Without with mismatched and missing labels. Do not do this. +eval instant at 50m sum without (instance) (http_requests{job="api-server"} or foo) + {group="canary",job="api-server"} 700 + {group="production",job="api-server"} 300 + {region="europe",job="api-server"} 900 + {job="api-server"} 1000 + +# Lower-cased aggregation operators should work too. +eval instant at 50m sum(http_requests) by (job) + min(http_requests) by (job) + max(http_requests) by (job) + avg(http_requests) by (job) + {job="app-server"} 4550 + {job="api-server"} 1750 + +# Test alternative "by"-clause order. +eval instant at 50m sum by (group) (http_requests{job="api-server"}) + {group="canary"} 700 + {group="production"} 300 + +# Test both alternative "by"-clause orders in one expression. +# Public health warning: stick to one form within an expression (or even +# in an organization), or risk serious user confusion. +eval instant at 50m sum(sum by (group) (http_requests{job="api-server"})) by (job) + {} 1000 + +eval instant at 50m SUM(http_requests) + {} 3600 + +eval instant at 50m SUM(http_requests{instance="0"}) BY(job) + {job="api-server"} 400 + {job="app-server"} 1200 + +eval instant at 50m SUM(http_requests) BY (job) + {job="api-server"} 1000 + {job="app-server"} 2600 + +# Non-existent labels mentioned in BY-clauses shouldn't propagate to output. +eval instant at 50m SUM(http_requests) BY (job, nonexistent) + {job="api-server"} 1000 + {job="app-server"} 2600 + +eval instant at 50m COUNT(http_requests) BY (job) + {job="api-server"} 4 + {job="app-server"} 4 + +eval instant at 50m SUM(http_requests) BY (job, group) + {group="canary", job="api-server"} 700 + {group="canary", job="app-server"} 1500 + {group="production", job="api-server"} 300 + {group="production", job="app-server"} 1100 + +eval instant at 50m AVG(http_requests) BY (job) + {job="api-server"} 250 + {job="app-server"} 650 + +eval instant at 50m MIN(http_requests) BY (job) + {job="api-server"} 100 + {job="app-server"} 500 + +eval instant at 50m MAX(http_requests) BY (job) + {job="api-server"} 400 + {job="app-server"} 800 + +eval instant at 50m abs(-1 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 100 + {group="production", instance="1", job="api-server"} 200 + +eval instant at 50m floor(0.004 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} 0 + +eval instant at 50m ceil(0.004 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 1 + {group="production", instance="1", job="api-server"} 1 + +eval instant at 50m round(0.004 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} 1 + +# Round should correctly handle negative numbers. +eval instant at 50m round(-1 * (0.004 * http_requests{group="production",job="api-server"})) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} -1 + +# Round should round half up. +eval instant at 50m round(0.005 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 1 + {group="production", instance="1", job="api-server"} 1 + +eval instant at 50m round(-1 * (0.005 * http_requests{group="production",job="api-server"})) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} -1 + +eval instant at 50m round(1 + 0.005 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 2 + {group="production", instance="1", job="api-server"} 2 + +eval instant at 50m round(-1 * (1 + 0.005 * http_requests{group="production",job="api-server"})) + {group="production", instance="0", job="api-server"} -1 + {group="production", instance="1", job="api-server"} -2 + +# Round should accept the number to round nearest to. +eval instant at 50m round(0.0005 * http_requests{group="production",job="api-server"}, 0.1) + {group="production", instance="0", job="api-server"} 0.1 + {group="production", instance="1", job="api-server"} 0.1 + +eval instant at 50m round(2.1 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1) + {group="production", instance="0", job="api-server"} 2.2 + {group="production", instance="1", job="api-server"} 2.2 + +eval instant at 50m round(5.2 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1) + {group="production", instance="0", job="api-server"} 5.3 + {group="production", instance="1", job="api-server"} 5.3 + +# Round should work correctly with negative numbers and multiple decimal places. +eval instant at 50m round(-1 * (5.2 + 0.0005 * http_requests{group="production",job="api-server"}), 0.1) + {group="production", instance="0", job="api-server"} -5.2 + {group="production", instance="1", job="api-server"} -5.3 + +# Round should work correctly with big toNearests. +eval instant at 50m round(0.025 * http_requests{group="production",job="api-server"}, 5) + {group="production", instance="0", job="api-server"} 5 + {group="production", instance="1", job="api-server"} 5 + +eval instant at 50m round(0.045 * http_requests{group="production",job="api-server"}, 5) + {group="production", instance="0", job="api-server"} 5 + {group="production", instance="1", job="api-server"} 10 + +# Standard deviation and variance. +eval instant at 50m stddev(http_requests) + {} 229.12878474779 + +eval instant at 50m stddev by (instance)(http_requests) + {instance="0"} 223.60679774998 + {instance="1"} 223.60679774998 + +eval instant at 50m stdvar(http_requests) + {} 52500 + +eval instant at 50m stdvar by (instance)(http_requests) + {instance="0"} 50000 + {instance="1"} 50000 + +# Float precision test for standard deviation and variance +clear +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+1.33x10 + http_requests{job="api-server", instance="1", group="production"} 0+1.33x10 + http_requests{job="api-server", instance="0", group="canary"} 0+1.33x10 + +eval instant at 50m stddev(http_requests) + {} 0.0 + +eval instant at 50m stdvar(http_requests) + {} 0.0 + + +# Regression test for missing separator byte in labelsToGroupingKey. +clear +load 5m + label_grouping_test{a="aa", b="bb"} 0+10x10 + label_grouping_test{a="a", b="abb"} 0+20x10 + +eval instant at 50m sum(label_grouping_test) by (a, b) + {a="a", b="abb"} 200 + {a="aa", b="bb"} 100 + + + +# Tests for min/max. +clear +load 5m + http_requests{job="api-server", instance="0", group="production"} 1 + http_requests{job="api-server", instance="1", group="production"} 2 + http_requests{job="api-server", instance="0", group="canary"} NaN + http_requests{job="api-server", instance="1", group="canary"} 3 + http_requests{job="api-server", instance="2", group="canary"} 4 + +eval instant at 0m max(http_requests) + {} 4 + +eval instant at 0m min(http_requests) + {} 1 + +eval instant at 0m max by (group) (http_requests) + {group="production"} 2 + {group="canary"} 4 + +eval instant at 0m min by (group) (http_requests) + {group="production"} 1 + {group="canary"} 3 + +clear + +# Tests for topk/bottomk. +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + foo 3+0x10 + +eval_ordered instant at 50m topk(3, http_requests) + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="1", job="app-server"} 600 + +eval_ordered instant at 50m topk((3), (http_requests)) + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="1", job="app-server"} 600 + +eval_ordered instant at 50m topk(5, http_requests{group="canary",job="app-server"}) + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="0", job="app-server"} 700 + +eval_ordered instant at 50m bottomk(3, http_requests) + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="canary", instance="0", job="api-server"} 300 + +eval_ordered instant at 50m bottomk(5, http_requests{group="canary",job="app-server"}) + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="app-server"} 800 + +eval instant at 50m topk by (group) (1, http_requests) + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="canary", instance="1", job="app-server"} 800 + +eval instant at 50m bottomk by (group) (2, http_requests) + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="1", job="api-server"} 200 + +eval_ordered instant at 50m bottomk by (group) (2, http_requests{group="production"}) + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="1", job="api-server"} 200 + +# Test NaN is sorted away from the top/bottom. +eval_ordered instant at 50m topk(3, http_requests{job="api-server",group="production"}) + http_requests{job="api-server", instance="1", group="production"} 200 + http_requests{job="api-server", instance="0", group="production"} 100 + http_requests{job="api-server", instance="2", group="production"} NaN + +eval_ordered instant at 50m bottomk(3, http_requests{job="api-server",group="production"}) + http_requests{job="api-server", instance="0", group="production"} 100 + http_requests{job="api-server", instance="1", group="production"} 200 + http_requests{job="api-server", instance="2", group="production"} NaN + +# Test topk and bottomk allocate min(k, input_vector) for results vector +eval_ordered instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"}) + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="app-server"} 800 + +eval_ordered instant at 50m topk(9999999999, http_requests{job="api-server",group="production"}) + http_requests{job="api-server", instance="1", group="production"} 200 + http_requests{job="api-server", instance="0", group="production"} 100 + http_requests{job="api-server", instance="2", group="production"} NaN + +# Bug #5276. +eval_ordered instant at 50m topk(scalar(foo), http_requests) + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="1", job="app-server"} 600 + +clear + +# Tests for count_values. +load 5m + version{job="api-server", instance="0", group="production"} 6 + version{job="api-server", instance="1", group="production"} 6 + version{job="api-server", instance="2", group="production"} 6 + version{job="api-server", instance="0", group="canary"} 8 + version{job="api-server", instance="1", group="canary"} 8 + version{job="app-server", instance="0", group="production"} 6 + version{job="app-server", instance="1", group="production"} 6 + version{job="app-server", instance="0", group="canary"} 7 + version{job="app-server", instance="1", group="canary"} 7 + +eval instant at 5m count_values("version", version) + {version="6"} 5 + {version="7"} 2 + {version="8"} 2 + + +eval instant at 5m count_values(((("version"))), version) + {version="6"} 5 + {version="7"} 2 + {version="8"} 2 + + +eval instant at 5m count_values without (instance)("version", version) + {job="api-server", group="production", version="6"} 3 + {job="api-server", group="canary", version="8"} 2 + {job="app-server", group="production", version="6"} 2 + {job="app-server", group="canary", version="7"} 2 + +# Overwrite label with output. Don't do this. +eval instant at 5m count_values without (instance)("job", version) + {job="6", group="production"} 5 + {job="8", group="canary"} 2 + {job="7", group="canary"} 2 + +# Overwrite label with output. Don't do this. +eval instant at 5m count_values by (job, group)("job", version) + {job="6", group="production"} 5 + {job="8", group="canary"} 2 + {job="7", group="canary"} 2 + + +# Tests for quantile. +clear + +load 10s + data{test="two samples",point="a"} 0 + data{test="two samples",point="b"} 1 + data{test="three samples",point="a"} 0 + data{test="three samples",point="b"} 1 + data{test="three samples",point="c"} 2 + data{test="uneven samples",point="a"} 0 + data{test="uneven samples",point="b"} 1 + data{test="uneven samples",point="c"} 4 + foo .8 + +eval instant at 1m quantile without(point)(0.8, data) + {test="two samples"} 0.8 + {test="three samples"} 1.6 + {test="uneven samples"} 2.8 + +# Bug #5276. +eval instant at 1m quantile without(point)(scalar(foo), data) + {test="two samples"} 0.8 + {test="three samples"} 1.6 + {test="uneven samples"} 2.8 + + +eval instant at 1m quantile without(point)((scalar(foo)), data) + {test="two samples"} 0.8 + {test="three samples"} 1.6 + {test="uneven samples"} 2.8 + +# Tests for group. +clear + +load 10s + data{test="two samples",point="a"} 0 + data{test="two samples",point="b"} 1 + data{test="three samples",point="a"} 0 + data{test="three samples",point="b"} 1 + data{test="three samples",point="c"} 2 + data{test="uneven samples",point="a"} 0 + data{test="uneven samples",point="b"} 1 + data{test="uneven samples",point="c"} 4 + foo .8 + +eval instant at 1m group without(point)(data) + {test="two samples"} 1 + {test="three samples"} 1 + {test="uneven samples"} 1 + +eval instant at 1m group(foo) + {} 1 + +# Tests for avg. +clear + +load 10s + data{test="ten",point="a"} 8 + data{test="ten",point="b"} 10 + data{test="ten",point="c"} 12 + data{test="inf",point="a"} 0 + data{test="inf",point="b"} Inf + data{test="inf",point="d"} Inf + data{test="inf",point="c"} 0 + data{test="-inf",point="a"} -Inf + data{test="-inf",point="b"} -Inf + data{test="-inf",point="c"} 0 + data{test="inf2",point="a"} Inf + data{test="inf2",point="b"} 0 + data{test="inf2",point="c"} Inf + data{test="-inf2",point="a"} -Inf + data{test="-inf2",point="b"} 0 + data{test="-inf2",point="c"} -Inf + data{test="inf3",point="b"} Inf + data{test="inf3",point="d"} Inf + data{test="inf3",point="c"} Inf + data{test="inf3",point="d"} -Inf + data{test="-inf3",point="b"} -Inf + data{test="-inf3",point="d"} -Inf + data{test="-inf3",point="c"} -Inf + data{test="-inf3",point="c"} Inf + data{test="nan",point="a"} -Inf + data{test="nan",point="b"} 0 + data{test="nan",point="c"} Inf + data{test="big",point="a"} 9.988465674311579e+307 + data{test="big",point="b"} 9.988465674311579e+307 + data{test="big",point="c"} 9.988465674311579e+307 + data{test="big",point="d"} 9.988465674311579e+307 + data{test="-big",point="a"} -9.988465674311579e+307 + data{test="-big",point="b"} -9.988465674311579e+307 + data{test="-big",point="c"} -9.988465674311579e+307 + data{test="-big",point="d"} -9.988465674311579e+307 + data{test="bigzero",point="a"} -9.988465674311579e+307 + data{test="bigzero",point="b"} -9.988465674311579e+307 + data{test="bigzero",point="c"} 9.988465674311579e+307 + data{test="bigzero",point="d"} 9.988465674311579e+307 + +eval instant at 1m avg(data{test="ten"}) + {} 10 + +eval instant at 1m avg(data{test="inf"}) + {} Inf + +eval instant at 1m avg(data{test="inf2"}) + {} Inf + +eval instant at 1m avg(data{test="inf3"}) + {} NaN + +eval instant at 1m avg(data{test="-inf"}) + {} -Inf + +eval instant at 1m avg(data{test="-inf2"}) + {} -Inf + +eval instant at 1m avg(data{test="-inf3"}) + {} NaN + +eval instant at 1m avg(data{test="nan"}) + {} NaN + +eval instant at 1m avg(data{test="big"}) + {} 9.988465674311579e+307 + +eval instant at 1m avg(data{test="-big"}) + {} -9.988465674311579e+307 + +eval instant at 1m avg(data{test="bigzero"}) + {} 0 diff --git a/pkg/query/testdata/promql/prometheus/collision.test b/pkg/query/testdata/promql/prometheus/collision.test new file mode 100644 index 0000000000..4dcdfa4ddf --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/collision.test @@ -0,0 +1,22 @@ + +load 1s + node_namespace_pod:kube_pod_info:{namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 + node_cpu_seconds_total{cpu="10",endpoint="https",instance="10.253.57.87:9100",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v",service="node-exporter"} 449 + node_cpu_seconds_total{cpu="35",endpoint="https",instance="10.253.57.87:9100",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v",service="node-exporter"} 449 + node_cpu_seconds_total{cpu="89",endpoint="https",instance="10.253.57.87:9100",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v",service="node-exporter"} 449 + +eval instant at 4s count by(namespace, pod, cpu) (node_cpu_seconds_total{cpu=~".*",job="node-exporter",mode="idle",namespace="observability",pod="node-exporter-l454v"}) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{namespace="observability",pod="node-exporter-l454v"} + {cpu="10",namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 + {cpu="35",namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 + {cpu="89",namespace="observability",node="gke-search-infra-custom-96-253440-fli-d135b119-jx00",pod="node-exporter-l454v"} 1 + +clear + +# Test duplicate labelset in promql output. +load 5m + testmetric1{src="a",dst="b"} 0 + testmetric2{src="a",dst="b"} 1 + +eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'}) + +clear diff --git a/pkg/query/testdata/promql/prometheus/functions.test b/pkg/query/testdata/promql/prometheus/functions.test new file mode 100644 index 0000000000..c2c2a9a272 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/functions.test @@ -0,0 +1,892 @@ +# Testdata for resets() and changes(). +load 5m + http_requests{path="/foo"} 1 2 3 0 1 0 0 1 2 0 + http_requests{path="/bar"} 1 2 3 4 5 1 2 3 4 5 + http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1 + +# TODO(bwplotka): Tests for resets() were removed for now. See https://github.com/thanos-io/thanos/issues/3644 + +# Tests for changes(). +eval instant at 50m changes(http_requests[5m]) + {path="/foo"} 0 + {path="/bar"} 0 + {path="/biz"} 0 + +eval instant at 50m changes(http_requests[20m]) + {path="/foo"} 3 + {path="/bar"} 3 + {path="/biz"} 0 + +eval instant at 50m changes(http_requests[30m]) + {path="/foo"} 4 + {path="/bar"} 5 + {path="/biz"} 1 + +eval instant at 50m changes(http_requests[50m]) + {path="/foo"} 8 + {path="/bar"} 9 + {path="/biz"} 1 + +eval instant at 50m changes((http_requests[50m])) + {path="/foo"} 8 + {path="/bar"} 9 + {path="/biz"} 1 + +eval instant at 50m changes(nonexistent_metric[50m]) + +clear + +load 5m + x{a="b"} NaN NaN NaN + x{a="c"} 0 NaN 0 + +eval instant at 15m changes(x[15m]) + {a="b"} 0 + {a="c"} 2 + +clear + +# Tests for increase(). +load 5m + http_requests{path="/foo"} 0+10x10 + http_requests{path="/bar"} 0+10x5 0+10x5 + +# Tests for increase(). +eval instant at 50m increase(http_requests[50m]) + {path="/foo"} 100 + {path="/bar"} 90 + +eval instant at 50m increase(http_requests[100m]) + {path="/foo"} 100 + {path="/bar"} 90 + +clear + +# Test for increase() with counter reset. +# When the counter is reset, it always starts at 0. +# So the sequence 3 2 (decreasing counter = reset) is interpreted the same as 3 0 1 2. +# Prometheus assumes it missed the intermediate values 0 and 1. +load 5m + http_requests{path="/foo"} 0 1 2 3 2 3 4 + +eval instant at 30m increase(http_requests[30m]) + {path="/foo"} 7 + +clear + +# Tests for rate(). +load 5m + testcounter_reset_middle 0+10x4 0+10x5 + testcounter_reset_end 0+10x9 0 10 + +# Counter resets at in the middle of range are handled correctly by rate(). +eval instant at 50m rate(testcounter_reset_middle[50m]) + {} 0.03 + +# Counter resets at end of range are ignored by rate(). +eval instant at 50m rate(testcounter_reset_end[5m]) + {} 0 + +clear + +load 5m + calculate_rate_offset{x="a"} 0+10x10 + calculate_rate_offset{x="b"} 0+20x10 + calculate_rate_window 0+80x10 + +# Rates should calculate per-second rates. +eval instant at 50m rate(calculate_rate_window[50m]) + {} 0.26666666666666666 + +eval instant at 50m rate(calculate_rate_offset[10m] offset 5m) + {x="a"} 0.03333333333333333 + {x="b"} 0.06666666666666667 + +clear + +load 4m + testcounter_zero_cutoff{start="0m"} 0+240x10 + testcounter_zero_cutoff{start="1m"} 60+240x10 + testcounter_zero_cutoff{start="2m"} 120+240x10 + testcounter_zero_cutoff{start="3m"} 180+240x10 + testcounter_zero_cutoff{start="4m"} 240+240x10 + testcounter_zero_cutoff{start="5m"} 300+240x10 + +# Zero cutoff for left-side extrapolation. +eval instant at 10m rate(testcounter_zero_cutoff[20m]) + {start="0m"} 0.5 + {start="1m"} 0.55 + {start="2m"} 0.6 + {start="3m"} 0.65 + {start="4m"} 0.7 + {start="5m"} 0.6 + +# Normal half-interval cutoff for left-side extrapolation. +eval instant at 50m rate(testcounter_zero_cutoff[20m]) + {start="0m"} 0.6 + {start="1m"} 0.6 + {start="2m"} 0.6 + {start="3m"} 0.6 + {start="4m"} 0.6 + {start="5m"} 0.6 + +clear + +# Tests for irate(). +load 5m + http_requests{path="/foo"} 0+10x10 + http_requests{path="/bar"} 0+10x5 0+10x5 + +eval instant at 50m irate(http_requests[50m]) + {path="/foo"} .03333333333333333333 + {path="/bar"} .03333333333333333333 + +# Counter reset. +eval instant at 30m irate(http_requests[50m]) + {path="/foo"} .03333333333333333333 + {path="/bar"} 0 + +clear + +# Tests for delta(). +load 5m + http_requests{path="/foo"} 0 50 100 150 200 + http_requests{path="/bar"} 200 150 100 50 0 + +eval instant at 20m delta(http_requests[20m]) + {path="/foo"} 200 + {path="/bar"} -200 + +clear + +# Tests for idelta(). +load 5m + http_requests{path="/foo"} 0 50 100 150 + http_requests{path="/bar"} 0 50 100 50 + +eval instant at 20m idelta(http_requests[20m]) + {path="/foo"} 50 + {path="/bar"} -50 + +clear + +# Tests for deriv() and predict_linear(). +load 5m + testcounter_reset_middle 0+10x4 0+10x5 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + +# deriv should return the same as rate in simple cases. +eval instant at 50m rate(http_requests{group="canary", instance="1", job="app-server"}[50m]) + {group="canary", instance="1", job="app-server"} 0.26666666666666666 + +eval instant at 50m deriv(http_requests{group="canary", instance="1", job="app-server"}[50m]) + {group="canary", instance="1", job="app-server"} 0.26666666666666666 + +# deriv should return correct result. +eval instant at 50m deriv(testcounter_reset_middle[100m]) + {} 0.010606060606060607 + +# predict_linear should return correct result. +# X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000] +# Y = [ 0, 10, 20, 30, 40, 0, 10, 20, 30, 40, 50] +# sumX = 16500 +# sumY = 250 +# sumXY = 480000 +# sumX2 = 34650000 +# n = 11 +# covXY = 105000 +# varX = 9900000 +# slope = 0.010606060606060607 +# intercept at t=0: 6.818181818181818 +# intercept at t=3000: 38.63636363636364 +# intercept at t=3000+3600: 76.81818181818181 +eval instant at 50m predict_linear(testcounter_reset_middle[100m], 3600) + {} 76.81818181818181 + +# With http_requests, there is a sample value exactly at the end of +# the range, and it has exactly the predicted value, so predict_linear +# can be emulated with deriv. +eval instant at 50m predict_linear(http_requests[50m], 3600) - (http_requests + deriv(http_requests[50m]) * 3600) + {group="canary", instance="1", job="app-server"} 0 + +clear + +# Tests for label_replace. +load 5m + testmetric{src="source-value-10",dst="original-destination-value"} 0 + testmetric{src="source-value-20",dst="original-destination-value"} 1 + +# label_replace does a full-string match and replace. +eval instant at 0m label_replace(testmetric, "dst", "destination-value-$1", "src", "source-value-(.*)") + testmetric{src="source-value-10",dst="destination-value-10"} 0 + testmetric{src="source-value-20",dst="destination-value-20"} 1 + +# label_replace does not do a sub-string match. +eval instant at 0m label_replace(testmetric, "dst", "destination-value-$1", "src", "value-(.*)") + testmetric{src="source-value-10",dst="original-destination-value"} 0 + testmetric{src="source-value-20",dst="original-destination-value"} 1 + +# label_replace works with multiple capture groups. +eval instant at 0m label_replace(testmetric, "dst", "$1-value-$2", "src", "(.*)-value-(.*)") + testmetric{src="source-value-10",dst="source-value-10"} 0 + testmetric{src="source-value-20",dst="source-value-20"} 1 + +# label_replace does not overwrite the destination label if the source label +# does not exist. +eval instant at 0m label_replace(testmetric, "dst", "value-$1", "nonexistent-src", "source-value-(.*)") + testmetric{src="source-value-10",dst="original-destination-value"} 0 + testmetric{src="source-value-20",dst="original-destination-value"} 1 + +# label_replace overwrites the destination label if the source label is empty, +# but matched. +eval instant at 0m label_replace(testmetric, "dst", "value-$1", "nonexistent-src", "(.*)") + testmetric{src="source-value-10",dst="value-"} 0 + testmetric{src="source-value-20",dst="value-"} 1 + +# label_replace does not overwrite the destination label if the source label +# is not matched. +eval instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "non-matching-regex") + testmetric{src="source-value-10",dst="original-destination-value"} 0 + testmetric{src="source-value-20",dst="original-destination-value"} 1 + +eval instant at 0m label_replace((((testmetric))), (("dst")), (("value-$1")), (("src")), (("non-matching-regex"))) + testmetric{src="source-value-10",dst="original-destination-value"} 0 + testmetric{src="source-value-20",dst="original-destination-value"} 1 + +# label_replace drops labels that are set to empty values. +eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*") + testmetric{src="source-value-10"} 0 + testmetric{src="source-value-20"} 1 + +# label_replace fails when the regex is invalid. +eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*") + +# label_replace fails when the destination label name is not a valid Prometheus label name. +eval_fail instant at 0m label_replace(testmetric, "invalid-label-name", "", "src", "(.*)") + +# label_replace fails when there would be duplicated identical output label sets. +eval_fail instant at 0m label_replace(testmetric, "src", "", "", "") + +clear + +# Tests for vector, time and timestamp. +load 10s + metric 1 1 + +eval instant at 0s timestamp(metric) + {} 0 + +eval instant at 5s timestamp(metric) + {} 0 + +eval instant at 5s timestamp(((metric))) + {} 0 + +eval instant at 10s timestamp(metric) + {} 10 + +eval instant at 10s timestamp(((metric))) + {} 10 + +# Tests for label_join. +load 5m + testmetric{src="a",src1="b",src2="c",dst="original-destination-value"} 0 + testmetric{src="d",src1="e",src2="f",dst="original-destination-value"} 1 + +# label_join joins all src values in order. +eval instant at 0m label_join(testmetric, "dst", "-", "src", "src1", "src2") + testmetric{src="a",src1="b",src2="c",dst="a-b-c"} 0 + testmetric{src="d",src1="e",src2="f",dst="d-e-f"} 1 + +# label_join treats non existent src labels as empty strings. +eval instant at 0m label_join(testmetric, "dst", "-", "src", "src3", "src1") + testmetric{src="a",src1="b",src2="c",dst="a--b"} 0 + testmetric{src="d",src1="e",src2="f",dst="d--e"} 1 + +# label_join overwrites the destination label even if the resulting dst label is empty string +eval instant at 0m label_join(testmetric, "dst", "", "emptysrc", "emptysrc1", "emptysrc2") + testmetric{src="a",src1="b",src2="c"} 0 + testmetric{src="d",src1="e",src2="f"} 1 + +# test without src label for label_join +eval instant at 0m label_join(testmetric, "dst", ", ") + testmetric{src="a",src1="b",src2="c"} 0 + testmetric{src="d",src1="e",src2="f"} 1 + +# test without dst label for label_join +load 5m + testmetric1{src="foo",src1="bar",src2="foobar"} 0 + testmetric1{src="fizz",src1="buzz",src2="fizzbuzz"} 1 + +# label_join creates dst label if not present. +eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2") + testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0 + testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1 + +clear + +# Tests for vector. +eval instant at 0m vector(1) + {} 1 + +eval instant at 0s vector(time()) + {} 0 + +eval instant at 5s vector(time()) + {} 5 + +eval instant at 60m vector(time()) + {} 3600 + + +# Tests for clamp_max and clamp_min(). +load 5m + test_clamp{src="clamp-a"} -50 + test_clamp{src="clamp-b"} 0 + test_clamp{src="clamp-c"} 100 + +eval instant at 0m clamp_max(test_clamp, 75) + {src="clamp-a"} -50 + {src="clamp-b"} 0 + {src="clamp-c"} 75 + +eval instant at 0m clamp_min(test_clamp, -25) + {src="clamp-a"} -25 + {src="clamp-b"} 0 + {src="clamp-c"} 100 + +eval instant at 0m clamp_max(clamp_min(test_clamp, -20), 70) + {src="clamp-a"} -20 + {src="clamp-b"} 0 + {src="clamp-c"} 70 + +eval instant at 0m clamp_max((clamp_min(test_clamp, (-20))), (70)) + {src="clamp-a"} -20 + {src="clamp-b"} 0 + {src="clamp-c"} 70 + + +# Tests for sort/sort_desc. +clear +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + +eval_ordered instant at 50m sort(http_requests) + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="2", job="api-server"} NaN + +eval_ordered instant at 50m sort_desc(http_requests) + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="canary", instance="2", job="api-server"} NaN + +# Tests for holt_winters +clear + +# positive trends +load 10s + http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + +eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) + {job="api-server", instance="0", group="production"} 8000 + {job="api-server", instance="1", group="production"} 16000 + {job="api-server", instance="0", group="canary"} 24000 + {job="api-server", instance="1", group="canary"} 32000 + +# negative trends +clear +load 10s + http_requests{job="api-server", instance="0", group="production"} 8000-10x1000 + http_requests{job="api-server", instance="1", group="production"} 0-20x1000 + http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000 + http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000 + +eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) + {job="api-server", instance="0", group="production"} 0 + {job="api-server", instance="1", group="production"} -16000 + {job="api-server", instance="0", group="canary"} 24000 + {job="api-server", instance="1", group="canary"} -32000 + +# Tests for avg_over_time +clear +load 10s + metric 1 2 3 4 5 + metric2 1 2 3 4 Inf + metric3 1 2 3 4 -Inf + metric4 1 2 3 Inf -Inf + metric5 Inf 0 Inf + metric5b Inf 0 Inf + metric5c Inf Inf Inf -Inf + metric6 1 2 3 -Inf -Inf + metric6b -Inf 0 -Inf + metric6c -Inf -Inf -Inf Inf + metric7 1 2 -Inf -Inf Inf + metric8 9.988465674311579e+307 9.988465674311579e+307 + metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307 + metric10 -9.988465674311579e+307 9.988465674311579e+307 + +eval instant at 1m avg_over_time(metric[1m]) + {} 3 + +eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m]) + {} 3 + +eval instant at 1m avg_over_time(metric2[1m]) + {} Inf + +eval instant at 1m sum_over_time(metric2[1m])/count_over_time(metric2[1m]) + {} Inf + +eval instant at 1m avg_over_time(metric3[1m]) + {} -Inf + +eval instant at 1m sum_over_time(metric3[1m])/count_over_time(metric3[1m]) + {} -Inf + +eval instant at 1m avg_over_time(metric4[1m]) + {} NaN + +eval instant at 1m sum_over_time(metric4[1m])/count_over_time(metric4[1m]) + {} NaN + +eval instant at 1m avg_over_time(metric5[1m]) + {} Inf + +eval instant at 1m sum_over_time(metric5[1m])/count_over_time(metric5[1m]) + {} Inf + +eval instant at 1m avg_over_time(metric5b[1m]) + {} Inf + +eval instant at 1m sum_over_time(metric5b[1m])/count_over_time(metric5b[1m]) + {} Inf + +eval instant at 1m avg_over_time(metric5c[1m]) + {} NaN + +eval instant at 1m sum_over_time(metric5c[1m])/count_over_time(metric5c[1m]) + {} NaN + +eval instant at 1m avg_over_time(metric6[1m]) + {} -Inf + +eval instant at 1m sum_over_time(metric6[1m])/count_over_time(metric6[1m]) + {} -Inf + +eval instant at 1m avg_over_time(metric6b[1m]) + {} -Inf + +eval instant at 1m sum_over_time(metric6b[1m])/count_over_time(metric6b[1m]) + {} -Inf + +eval instant at 1m avg_over_time(metric6c[1m]) + {} NaN + +eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m]) + {} NaN + + +eval instant at 1m avg_over_time(metric7[1m]) + {} NaN + +eval instant at 1m sum_over_time(metric7[1m])/count_over_time(metric7[1m]) + {} NaN + +eval instant at 1m avg_over_time(metric8[1m]) + {} 9.988465674311579e+307 + +# This overflows float64. +eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m]) + {} Inf + +eval instant at 1m avg_over_time(metric9[1m]) + {} -9.988465674311579e+307 + +# This overflows float64. +eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m]) + {} -Inf + +eval instant at 1m avg_over_time(metric10[1m]) + {} 0 + +eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m]) + {} 0 + +# Tests for stddev_over_time and stdvar_over_time. +clear +load 10s + metric 0 8 8 2 3 + +eval instant at 1m stdvar_over_time(metric[1m]) + {} 10.56 + +eval instant at 1m stddev_over_time(metric[1m]) + {} 3.249615 + +eval instant at 1m stddev_over_time((metric[1m])) + {} 3.249615 + +# Tests for stddev_over_time and stdvar_over_time #4927. +clear +load 10s + metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 + +eval instant at 1m stdvar_over_time(metric[1m]) + {} 0 + +eval instant at 1m stddev_over_time(metric[1m]) + {} 0 + +# Tests for quantile_over_time +clear + +load 10s + data{test="two samples"} 0 1 + data{test="three samples"} 0 1 2 + data{test="uneven samples"} 0 1 4 + +eval instant at 1m quantile_over_time(0, data[1m]) + {test="two samples"} 0 + {test="three samples"} 0 + {test="uneven samples"} 0 + +eval instant at 1m quantile_over_time(0.5, data[1m]) + {test="two samples"} 0.5 + {test="three samples"} 1 + {test="uneven samples"} 1 + +eval instant at 1m quantile_over_time(0.75, data[1m]) + {test="two samples"} 0.75 + {test="three samples"} 1.5 + {test="uneven samples"} 2.5 + +eval instant at 1m quantile_over_time(0.8, data[1m]) + {test="two samples"} 0.8 + {test="three samples"} 1.6 + {test="uneven samples"} 2.8 + +eval instant at 1m quantile_over_time(1, data[1m]) + {test="two samples"} 1 + {test="three samples"} 2 + {test="uneven samples"} 4 + +eval instant at 1m quantile_over_time(-1, data[1m]) + {test="two samples"} -Inf + {test="three samples"} -Inf + {test="uneven samples"} -Inf + +eval instant at 1m quantile_over_time(2, data[1m]) + {test="two samples"} +Inf + {test="three samples"} +Inf + {test="uneven samples"} +Inf + +eval instant at 1m (quantile_over_time(2, (data[1m]))) + {test="two samples"} +Inf + {test="three samples"} +Inf + {test="uneven samples"} +Inf + +clear + +# Test time-related functions. +eval instant at 0m year() + {} 1970 + +eval instant at 1ms time() + 0.001 + +eval instant at 50m time() + 3000 + +eval instant at 0m year(vector(1136239445)) + {} 2006 + +eval instant at 0m month() + {} 1 + +eval instant at 0m month(vector(1136239445)) + {} 1 + +eval instant at 0m day_of_month() + {} 1 + +eval instant at 0m day_of_month(vector(1136239445)) + {} 2 + +# Thursday. +eval instant at 0m day_of_week() + {} 4 + +eval instant at 0m day_of_week(vector(1136239445)) + {} 1 + +eval instant at 0m hour() + {} 0 + +eval instant at 0m hour(vector(1136239445)) + {} 22 + +eval instant at 0m minute() + {} 0 + +eval instant at 0m minute(vector(1136239445)) + {} 4 + +# 2008-12-31 23:59:59 just before leap second. +eval instant at 0m year(vector(1230767999)) + {} 2008 + +# 2009-01-01 00:00:00 just after leap second. +eval instant at 0m year(vector(1230768000)) + {} 2009 + +# 2016-02-29 23:59:59 February 29th in leap year. +eval instant at 0m month(vector(1456790399)) + day_of_month(vector(1456790399)) / 100 + {} 2.29 + +# 2016-03-01 00:00:00 March 1st in leap year. +eval instant at 0m month(vector(1456790400)) + day_of_month(vector(1456790400)) / 100 + {} 3.01 + +# February 1st 2016 in leap year. +eval instant at 0m days_in_month(vector(1454284800)) + {} 29 + +# February 1st 2017 not in leap year. +eval instant at 0m days_in_month(vector(1485907200)) + {} 28 + +clear + +# Test duplicate labelset in promql output. +load 5m + testmetric1{src="a",dst="b"} 0 + testmetric2{src="a",dst="b"} 1 + +eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) + +# Tests for *_over_time +clear + +load 10s + data{type="numbers"} 2 0 3 + data{type="some_nan"} 2 0 NaN + data{type="some_nan2"} 2 NaN 1 + data{type="some_nan3"} NaN 0 1 + data{type="only_nan"} NaN NaN NaN + +eval instant at 1m min_over_time(data[1m]) + {type="numbers"} 0 + {type="some_nan"} 0 + {type="some_nan2"} 1 + {type="some_nan3"} 0 + {type="only_nan"} NaN + +eval instant at 1m max_over_time(data[1m]) + {type="numbers"} 3 + {type="some_nan"} 2 + {type="some_nan2"} 2 + {type="some_nan3"} 1 + {type="only_nan"} NaN + +clear + +# Test for absent() +eval instant at 50m absent(nonexistent) + {} 1 + +eval instant at 50m absent(nonexistent{job="testjob", instance="testinstance", method=~".x"}) + {instance="testinstance", job="testjob"} 1 + +eval instant at 50m absent(nonexistent{job="testjob",job="testjob2",foo="bar"}) + {foo="bar"} 1 + +eval instant at 50m absent(nonexistent{job="testjob",job="testjob2",job="three",foo="bar"}) + {foo="bar"} 1 + +eval instant at 50m absent(nonexistent{job="testjob",job=~"testjob2",foo="bar"}) + {foo="bar"} 1 + +clear + +# Don't return anything when there's something there. +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + +eval instant at 50m absent(http_requests) + +eval instant at 50m absent(sum(http_requests)) + +clear + +eval instant at 50m absent(sum(nonexistent{job="testjob", instance="testinstance"})) + {} 1 + +eval instant at 50m absent(max(nonexistant)) + {} 1 + +eval instant at 50m absent(nonexistant > 1) + {} 1 + +eval instant at 50m absent(a + b) + {} 1 + +eval instant at 50m absent(a and b) + {} 1 + +eval instant at 50m absent(rate(nonexistant[5m])) + {} 1 + +clear + +# Testdata for absent_over_time() +eval instant at 1m absent_over_time(http_requests[5m]) + {} 1 + +eval instant at 1m absent_over_time(http_requests{handler="/foo"}[5m]) + {handler="/foo"} 1 + +eval instant at 1m absent_over_time(http_requests{handler!="/foo"}[5m]) + {} 1 + +eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m]) + {} 1 + +eval instant at 1m absent_over_time(rate(nonexistant[5m])[5m:]) + {} 1 + +eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) + {instance="127.0.0.1"} 1 + +load 1m + http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 + httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 + httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 + ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN + +eval instant at 5m absent_over_time(http_requests[5m]) + +eval instant at 5m absent_over_time(rate(http_requests[5m])[5m:1m]) + +eval instant at 0m absent_over_time(httpd_log_lines_total[30s]) + +eval instant at 1m absent_over_time(httpd_log_lines_total[30s]) + {} 1 + +eval instant at 15m absent_over_time(http_requests[5m]) + +eval instant at 16m absent_over_time(http_requests[5m]) + {} 1 + +eval instant at 16m absent_over_time(http_requests[6m]) + +eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) + +eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m]) + +eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m]) + +eval instant at 21m absent_over_time({instance="127.0.0.1"}[5m]) + {instance="127.0.0.1"} 1 + +eval instant at 21m absent_over_time({instance="127.0.0.1"}[20m]) + +eval instant at 21m absent_over_time({job="grok"}[20m]) + {job="grok"} 1 + +eval instant at 30m absent_over_time({instance="127.0.0.1"}[5m:5s]) + {} 1 + +eval instant at 5m absent_over_time({job="ingress"}[4m]) + +eval instant at 10m absent_over_time({job="ingress"}[4m]) + {job="ingress"} 1 + +clear + +# Testing exp() sqrt() log2() log10() ln() +load 5m + exp_root_log{l="x"} 10 + exp_root_log{l="y"} 20 + +eval instant at 5m exp(exp_root_log) + {l="x"} 22026.465794806718 + {l="y"} 485165195.4097903 + +eval instant at 5m exp(exp_root_log - 10) + {l="y"} 22026.465794806718 + {l="x"} 1 + +eval instant at 5m exp(exp_root_log - 20) + {l="x"} 4.5399929762484854e-05 + {l="y"} 1 + +eval instant at 5m ln(exp_root_log) + {l="x"} 2.302585092994046 + {l="y"} 2.995732273553991 + +eval instant at 5m ln(exp_root_log - 10) + {l="y"} 2.302585092994046 + {l="x"} -Inf + +eval instant at 5m ln(exp_root_log - 20) + {l="y"} -Inf + {l="x"} NaN + +eval instant at 5m exp(ln(exp_root_log)) + {l="y"} 20 + {l="x"} 10 + +eval instant at 5m sqrt(exp_root_log) + {l="x"} 3.1622776601683795 + {l="y"} 4.47213595499958 + +eval instant at 5m log2(exp_root_log) + {l="x"} 3.3219280948873626 + {l="y"} 4.321928094887363 + +eval instant at 5m log2(exp_root_log - 10) + {l="y"} 3.3219280948873626 + {l="x"} -Inf + +eval instant at 5m log2(exp_root_log - 20) + {l="x"} NaN + {l="y"} -Inf + +eval instant at 5m log10(exp_root_log) + {l="x"} 1 + {l="y"} 1.301029995663981 + +eval instant at 5m log10(exp_root_log - 10) + {l="y"} 1 + {l="x"} -Inf + +eval instant at 5m log10(exp_root_log - 20) + {l="x"} NaN + {l="y"} -Inf + +clear diff --git a/pkg/query/testdata/promql/prometheus/histograms.test b/pkg/query/testdata/promql/prometheus/histograms.test new file mode 100644 index 0000000000..ec236576b4 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/histograms.test @@ -0,0 +1,193 @@ +# Two histograms with 4 buckets each (x_sum and x_count not included, +# only buckets). Lowest bucket for one histogram < 0, for the other > +# 0. They have the same name, just separated by label. Not useful in +# practice, but can happen (if clients change bucketing), and the +# server has to cope with it. + +# Test histogram. +load 5m + testhistogram_bucket{le="0.1", start="positive"} 0+5x10 + testhistogram_bucket{le=".2", start="positive"} 0+7x10 + testhistogram_bucket{le="1e0", start="positive"} 0+11x10 + testhistogram_bucket{le="+Inf", start="positive"} 0+12x10 + testhistogram_bucket{le="-.2", start="negative"} 0+1x10 + testhistogram_bucket{le="-0.1", start="negative"} 0+2x10 + testhistogram_bucket{le="0.3", start="negative"} 0+2x10 + testhistogram_bucket{le="+Inf", start="negative"} 0+3x10 + + +# Now a more realistic histogram per job and instance to test aggregation. +load 5m + request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 + request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 + request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 + request_duration_seconds_bucket{job="job1", instance="ins2", le="0.1"} 0+2x10 + request_duration_seconds_bucket{job="job1", instance="ins2", le="0.2"} 0+5x10 + request_duration_seconds_bucket{job="job1", instance="ins2", le="+Inf"} 0+6x10 + request_duration_seconds_bucket{job="job2", instance="ins1", le="0.1"} 0+3x10 + request_duration_seconds_bucket{job="job2", instance="ins1", le="0.2"} 0+4x10 + request_duration_seconds_bucket{job="job2", instance="ins1", le="+Inf"} 0+6x10 + request_duration_seconds_bucket{job="job2", instance="ins2", le="0.1"} 0+4x10 + request_duration_seconds_bucket{job="job2", instance="ins2", le="0.2"} 0+7x10 + request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10 + +# Different le representations in one histogram. +load 5m + mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 + mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10 + mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10 + mixed_bucket{job="job1", instance="ins1", le="2.0e-1"} 0+1x10 + mixed_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 + mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10 + mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10 + +# Quantile too low. +eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket) + {start="positive"} -Inf + {start="negative"} -Inf + +# Quantile too high. +eval instant at 50m histogram_quantile(1.01, testhistogram_bucket) + {start="positive"} +Inf + {start="negative"} +Inf + +# Quantile value in lowest bucket, which is positive. +eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"}) + {start="positive"} 0 + +# Quantile value in lowest bucket, which is negative. +eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"}) + {start="negative"} -0.2 + +# Quantile value in highest bucket. +eval instant at 50m histogram_quantile(1, testhistogram_bucket) + {start="positive"} 1 + {start="negative"} 0.3 + +# Finally some useful quantiles. +eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) + {start="positive"} 0.048 + {start="negative"} -0.2 + + +eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) + {start="positive"} 0.15 + {start="negative"} -0.15 + +eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) + {start="positive"} 0.72 + {start="negative"} 0.3 + +# More realistic with rates. +eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) + {start="positive"} 0.048 + {start="negative"} -0.2 + +eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) + {start="positive"} 0.15 + {start="negative"} -0.15 + +eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) + {start="positive"} 0.72 + {start="negative"} 0.3 + +# Aggregated histogram: Everything in one. +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) + {} 0.075 + +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) + {} 0.1277777777777778 + +# Aggregated histogram: Everything in one. Now with avg, which does not change anything. +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) + {} 0.075 + +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) + {} 0.12777777777777778 + +# Aggregated histogram: By job. +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) + {instance="ins1"} 0.075 + {instance="ins2"} 0.075 + +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) + {instance="ins1"} 0.1333333333 + {instance="ins2"} 0.125 + +# Aggregated histogram: By instance. +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) + {job="job1"} 0.1 + {job="job2"} 0.0642857142857143 + +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) + {job="job1"} 0.14 + {job="job2"} 0.1125 + +# Aggregated histogram: By job and instance. +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) + {instance="ins1", job="job1"} 0.11 + {instance="ins2", job="job1"} 0.09 + {instance="ins1", job="job2"} 0.06 + {instance="ins2", job="job2"} 0.0675 + +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) + {instance="ins1", job="job1"} 0.15 + {instance="ins2", job="job1"} 0.1333333333333333 + {instance="ins1", job="job2"} 0.1 + {instance="ins2", job="job2"} 0.1166666666666667 + +# The unaggregated histogram for comparison. Same result as the previous one. +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) + {instance="ins1", job="job1"} 0.11 + {instance="ins2", job="job1"} 0.09 + {instance="ins1", job="job2"} 0.06 + {instance="ins2", job="job2"} 0.0675 + +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) + {instance="ins1", job="job1"} 0.15 + {instance="ins2", job="job1"} 0.13333333333333333 + {instance="ins1", job="job2"} 0.1 + {instance="ins2", job="job2"} 0.11666666666666667 + +# A histogram with nonmonotonic bucket counts. This may happen when recording +# rule evaluation or federation races scrape ingestion, causing some buckets +# counts to be derived from fewer samples. + +load 5m + nonmonotonic_bucket{le="0.1"} 0+2x10 + nonmonotonic_bucket{le="1"} 0+1x10 + nonmonotonic_bucket{le="10"} 0+5x10 + nonmonotonic_bucket{le="100"} 0+4x10 + nonmonotonic_bucket{le="1000"} 0+9x10 + nonmonotonic_bucket{le="+Inf"} 0+8x10 + +# Nonmonotonic buckets +eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) + {} 0.0045 + +eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) + {} 8.5 + +eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) + {} 979.75 + +# Buckets with different representations of the same upper bound. +eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) + {instance="ins1", job="job1"} 0.15 + {instance="ins2", job="job1"} NaN + +eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) + {instance="ins1", job="job1"} 0.2 + {instance="ins2", job="job1"} NaN + +eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) + {instance="ins1", job="job1"} 0.2 + {instance="ins2", job="job1"} NaN + +load 5m + empty_bucket{le="0.1", job="job1", instance="ins1"} 0x10 + empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 + empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 + +eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) + {instance="ins1", job="job1"} NaN \ No newline at end of file diff --git a/pkg/query/testdata/promql/prometheus/literals.test b/pkg/query/testdata/promql/prometheus/literals.test new file mode 100644 index 0000000000..0d86638429 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/literals.test @@ -0,0 +1,59 @@ +eval instant at 50m 12.34e6 + 12340000 + +eval instant at 50m 12.34e+6 + 12340000 + +eval instant at 50m 12.34e-6 + 0.00001234 + +eval instant at 50m 1+1 + 2 + +eval instant at 50m 1-1 + 0 + +eval instant at 50m 1 - -1 + 2 + +eval instant at 50m .2 + 0.2 + +eval instant at 50m +0.2 + 0.2 + +eval instant at 50m -0.2e-6 + -0.0000002 + +eval instant at 50m +Inf + +Inf + +eval instant at 50m inF + +Inf + +eval instant at 50m -inf + -Inf + +eval instant at 50m NaN + NaN + +eval instant at 50m nan + NaN + +eval instant at 50m 2. + 2 + +eval instant at 50m 1 / 0 + +Inf + +eval instant at 50m ((1) / (0)) + +Inf + +eval instant at 50m -1 / 0 + -Inf + +eval instant at 50m 0 / 0 + NaN + +eval instant at 50m 1 % 0 + NaN diff --git a/pkg/query/testdata/promql/prometheus/operators.test b/pkg/query/testdata/promql/prometheus/operators.test new file mode 100644 index 0000000000..d5a4d76f50 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/operators.test @@ -0,0 +1,469 @@ +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + +load 5m + vector_matching_a{l="x"} 0+1x100 + vector_matching_a{l="y"} 0+2x50 + vector_matching_b{l="x"} 0+4x25 + + +eval instant at 50m SUM(http_requests) BY (job) - COUNT(http_requests) BY (job) + {job="api-server"} 996 + {job="app-server"} 2596 + +eval instant at 50m 2 - SUM(http_requests) BY (job) + {job="api-server"} -998 + {job="app-server"} -2598 + +eval instant at 50m -http_requests{job="api-server",instance="0",group="production"} + {job="api-server",instance="0",group="production"} -100 + +eval instant at 50m +http_requests{job="api-server",instance="0",group="production"} + http_requests{job="api-server",instance="0",group="production"} 100 + +eval instant at 50m - - - SUM(http_requests) BY (job) + {job="api-server"} -1000 + {job="app-server"} -2600 + +eval instant at 50m - - - 1 + -1 + +eval instant at 50m -2^---1*3 + -1.5 + +eval instant at 50m 2/-2^---1*3+2 + -10 + +eval instant at 50m -10^3 * - SUM(http_requests) BY (job) ^ -1 + {job="api-server"} 1 + {job="app-server"} 0.38461538461538464 + +eval instant at 50m 1000 / SUM(http_requests) BY (job) + {job="api-server"} 1 + {job="app-server"} 0.38461538461538464 + +eval instant at 50m SUM(http_requests) BY (job) - 2 + {job="api-server"} 998 + {job="app-server"} 2598 + +eval instant at 50m SUM(http_requests) BY (job) % 3 + {job="api-server"} 1 + {job="app-server"} 2 + +eval instant at 50m SUM(http_requests) BY (job) % 0.3 + {job="api-server"} 0.1 + {job="app-server"} 0.2 + +eval instant at 50m SUM(http_requests) BY (job) ^ 2 + {job="api-server"} 1000000 + {job="app-server"} 6760000 + +eval instant at 50m SUM(http_requests) BY (job) % 3 ^ 2 + {job="api-server"} 1 + {job="app-server"} 8 + +eval instant at 50m SUM(http_requests) BY (job) % 2 ^ (3 ^ 2) + {job="api-server"} 488 + {job="app-server"} 40 + +eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 + {job="api-server"} 488 + {job="app-server"} 40 + +eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 ^ 2 + {job="api-server"} 1000 + {job="app-server"} 2600 + +eval instant at 50m COUNT(http_requests) BY (job) ^ COUNT(http_requests) BY (job) + {job="api-server"} 256 + {job="app-server"} 256 + +eval instant at 50m SUM(http_requests) BY (job) / 0 + {job="api-server"} +Inf + {job="app-server"} +Inf + +eval instant at 50m http_requests{group="canary", instance="0", job="api-server"} / 0 + {group="canary", instance="0", job="api-server"} +Inf + +eval instant at 50m -1 * http_requests{group="canary", instance="0", job="api-server"} / 0 + {group="canary", instance="0", job="api-server"} -Inf + +eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} / 0 + {group="canary", instance="0", job="api-server"} NaN + +eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} % 0 + {group="canary", instance="0", job="api-server"} NaN + +eval instant at 50m SUM(http_requests) BY (job) + SUM(http_requests) BY (job) + {job="api-server"} 2000 + {job="app-server"} 5200 + +eval instant at 50m (SUM((http_requests)) BY (job)) + SUM(http_requests) BY (job) + {job="api-server"} 2000 + {job="app-server"} 5200 + +eval instant at 50m http_requests{job="api-server", group="canary"} + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="1", job="api-server"} 400 + +eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60 + {group="canary", instance="0", job="api-server"} 330 + {group="canary", instance="1", job="api-server"} 440 + +eval instant at 50m rate(http_requests[25m]) * 25 * 60 + {group="canary", instance="0", job="api-server"} 150 + {group="canary", instance="0", job="app-server"} 350 + {group="canary", instance="1", job="api-server"} 200 + {group="canary", instance="1", job="app-server"} 400 + {group="production", instance="0", job="api-server"} 50 + {group="production", instance="0", job="app-server"} 249.99999999999997 + {group="production", instance="1", job="api-server"} 100 + {group="production", instance="1", job="app-server"} 300 + +eval instant at 50m (rate((http_requests[25m])) * 25) * 60 + {group="canary", instance="0", job="api-server"} 150 + {group="canary", instance="0", job="app-server"} 350 + {group="canary", instance="1", job="api-server"} 200 + {group="canary", instance="1", job="app-server"} 400 + {group="production", instance="0", job="api-server"} 50 + {group="production", instance="0", job="app-server"} 249.99999999999997 + {group="production", instance="1", job="api-server"} 100 + {group="production", instance="1", job="app-server"} 300 + + +eval instant at 50m http_requests{group="canary"} and http_requests{instance="0"} + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + +eval instant at 50m (http_requests{group="canary"} + 1) and http_requests{instance="0"} + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + +eval instant at 50m (http_requests{group="canary"} + 1) and on(instance, job) http_requests{instance="0", group="production"} + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + +eval instant at 50m (http_requests{group="canary"} + 1) and on(instance) http_requests{instance="0", group="production"} + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + +eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group) http_requests{instance="0", group="production"} + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + +eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group, job) http_requests{instance="0", group="production"} + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + +eval instant at 50m http_requests{group="canary"} or http_requests{group="production"} + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + +# On overlap the rhs samples must be dropped. +eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"} + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + {group="canary", instance="1", job="api-server"} 401 + {group="canary", instance="1", job="app-server"} 801 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + + +# Matching only on instance excludes everything that has instance=0/1 but includes +# entries without the instance label. +eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a) + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + {group="canary", instance="1", job="api-server"} 401 + {group="canary", instance="1", job="app-server"} 801 + vector_matching_a{l="x"} 10 + vector_matching_a{l="y"} 20 + +eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, job) (http_requests or cpu_count or vector_matching_a) + {group="canary", instance="0", job="api-server"} 301 + {group="canary", instance="0", job="app-server"} 701 + {group="canary", instance="1", job="api-server"} 401 + {group="canary", instance="1", job="app-server"} 801 + vector_matching_a{l="x"} 10 + vector_matching_a{l="y"} 20 + +eval instant at 50m http_requests{group="canary"} unless http_requests{instance="0"} + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + +eval instant at 50m http_requests{group="canary"} unless on(job) http_requests{instance="0"} + +eval instant at 50m http_requests{group="canary"} unless on(job, instance) http_requests{instance="0"} + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + +eval instant at 50m http_requests{group="canary"} / on(instance,job) http_requests{group="production"} + {instance="0", job="api-server"} 3 + {instance="0", job="app-server"} 1.4 + {instance="1", job="api-server"} 2 + {instance="1", job="app-server"} 1.3333333333333333 + +eval instant at 50m http_requests{group="canary"} unless ignoring(group, instance) http_requests{instance="0"} + +eval instant at 50m http_requests{group="canary"} unless ignoring(group) http_requests{instance="0"} + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + +eval instant at 50m http_requests{group="canary"} / ignoring(group) http_requests{group="production"} + {instance="0", job="api-server"} 3 + {instance="0", job="app-server"} 1.4 + {instance="1", job="api-server"} 2 + {instance="1", job="app-server"} 1.3333333333333333 + +# https://github.com/prometheus/prometheus/issues/1489 +eval instant at 50m http_requests AND ON (dummy) vector(1) + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + +eval instant at 50m http_requests AND IGNORING (group, instance, job) vector(1) + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + + +# Comparisons. +eval instant at 50m SUM(http_requests) BY (job) > 1000 + {job="app-server"} 2600 + +eval instant at 50m 1000 < SUM(http_requests) BY (job) + {job="app-server"} 2600 + +eval instant at 50m SUM(http_requests) BY (job) <= 1000 + {job="api-server"} 1000 + +eval instant at 50m SUM(http_requests) BY (job) != 1000 + {job="app-server"} 2600 + +eval instant at 50m SUM(http_requests) BY (job) == 1000 + {job="api-server"} 1000 + +eval instant at 50m SUM(http_requests) BY (job) == bool 1000 + {job="api-server"} 1 + {job="app-server"} 0 + +eval instant at 50m SUM(http_requests) BY (job) == bool SUM(http_requests) BY (job) + {job="api-server"} 1 + {job="app-server"} 1 + +eval instant at 50m SUM(http_requests) BY (job) != bool SUM(http_requests) BY (job) + {job="api-server"} 0 + {job="app-server"} 0 + +eval instant at 50m 0 == bool 1 + 0 + +eval instant at 50m 1 == bool 1 + 1 + +eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100 + {job="api-server", instance="0", group="production"} 1 + +# group_left/group_right. + +clear + +load 5m + node_var{instance="abc",job="node"} 2 + node_role{instance="abc",job="node",role="prometheus"} 1 + +load 5m + node_cpu{instance="abc",job="node",mode="idle"} 3 + node_cpu{instance="abc",job="node",mode="user"} 1 + node_cpu{instance="def",job="node",mode="idle"} 8 + node_cpu{instance="def",job="node",mode="user"} 2 + +load 5m + random{foo="bar"} 1 + +load 5m + threshold{instance="abc",job="node",target="a@b.com"} 0 + +# Copy machine role to node variable. +eval instant at 5m node_role * on (instance) group_right (role) node_var + {instance="abc",job="node",role="prometheus"} 2 + +eval instant at 5m node_var * on (instance) group_left (role) node_role + {instance="abc",job="node",role="prometheus"} 2 + +eval instant at 5m node_var * ignoring (role) group_left (role) node_role + {instance="abc",job="node",role="prometheus"} 2 + +eval instant at 5m node_role * ignoring (role) group_right (role) node_var + {instance="abc",job="node",role="prometheus"} 2 + +# Copy machine role to node variable with instrumentation labels. +eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role + {instance="abc",job="node",mode="idle",role="prometheus"} 3 + {instance="abc",job="node",mode="user",role="prometheus"} 1 + +eval instant at 5m node_cpu * on (instance) group_left (role) node_role + {instance="abc",job="node",mode="idle",role="prometheus"} 3 + {instance="abc",job="node",mode="user",role="prometheus"} 1 + + +# Ratio of total. +eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) + {instance="abc",job="node",mode="idle"} .75 + {instance="abc",job="node",mode="user"} .25 + {instance="def",job="node",mode="idle"} .80 + {instance="def",job="node",mode="user"} .20 + +eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) + {job="node",mode="idle"} 0.7857142857142857 + {job="node",mode="user"} 0.21428571428571427 + +eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) + {} 1.0 + + +eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) + {instance="abc",job="node",mode="idle"} .75 + {instance="abc",job="node",mode="user"} .25 + {instance="def",job="node",mode="idle"} .80 + {instance="def",job="node",mode="user"} .20 + +eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) + {instance="abc",job="node",mode="idle"} .75 + {instance="abc",job="node",mode="user"} .25 + {instance="def",job="node",mode="idle"} .80 + {instance="def",job="node",mode="user"} .20 + +eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) + {job="node",mode="idle"} 0.7857142857142857 + {job="node",mode="user"} 0.21428571428571427 + +eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) + {} 1.0 + + +# Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here). +eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 + {instance="abc",job="node",mode="idle",foo="bar"} 3 + {instance="abc",job="node",mode="user",foo="bar"} 1 + {instance="def",job="node",mode="idle",foo="bar"} 8 + {instance="def",job="node",mode="user",foo="bar"} 2 + + +# Use threshold from metric, and copy over target. +eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold + node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 + node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 + +# Use threshold from metric, and a default (1) if it's not present. +eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) + node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 + node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 + node_cpu{instance="def",job="node",mode="idle"} 8 + node_cpu{instance="def",job="node",mode="user"} 2 + + +# Check that binops drop the metric name. +eval instant at 5m node_cpu + 2 + {instance="abc",job="node",mode="idle"} 5 + {instance="abc",job="node",mode="user"} 3 + {instance="def",job="node",mode="idle"} 10 + {instance="def",job="node",mode="user"} 4 + +eval instant at 5m node_cpu - 2 + {instance="abc",job="node",mode="idle"} 1 + {instance="abc",job="node",mode="user"} -1 + {instance="def",job="node",mode="idle"} 6 + {instance="def",job="node",mode="user"} 0 + +eval instant at 5m node_cpu / 2 + {instance="abc",job="node",mode="idle"} 1.5 + {instance="abc",job="node",mode="user"} 0.5 + {instance="def",job="node",mode="idle"} 4 + {instance="def",job="node",mode="user"} 1 + +eval instant at 5m node_cpu * 2 + {instance="abc",job="node",mode="idle"} 6 + {instance="abc",job="node",mode="user"} 2 + {instance="def",job="node",mode="idle"} 16 + {instance="def",job="node",mode="user"} 4 + +eval instant at 5m node_cpu ^ 2 + {instance="abc",job="node",mode="idle"} 9 + {instance="abc",job="node",mode="user"} 1 + {instance="def",job="node",mode="idle"} 64 + {instance="def",job="node",mode="user"} 4 + +eval instant at 5m node_cpu % 2 + {instance="abc",job="node",mode="idle"} 1 + {instance="abc",job="node",mode="user"} 1 + {instance="def",job="node",mode="idle"} 0 + {instance="def",job="node",mode="user"} 0 + + +clear + +load 5m + random{foo="bar"} 2 + metricA{baz="meh"} 3 + metricB{baz="meh"} 4 + +# On with no labels, for metrics with no common labels. +eval instant at 5m random + on() metricA + {} 5 + +# Ignoring with no labels is the same as no ignoring. +eval instant at 5m metricA + ignoring() metricB + {baz="meh"} 7 + +eval instant at 5m metricA + metricB + {baz="meh"} 7 + +clear + +# Test duplicate labelset in promql output. +load 5m + testmetric1{src="a",dst="b"} 0 + testmetric2{src="a",dst="b"} 1 + +eval_fail instant at 0m -{__name__=~'testmetric1|testmetric2'} + +clear + +load 5m + test_total{instance="localhost"} 50 + test_smaller{instance="localhost"} 10 + +eval instant at 5m test_total > bool test_smaller + {instance="localhost"} 1 + +eval instant at 5m test_total > test_smaller + test_total{instance="localhost"} 50 + +eval instant at 5m test_total < bool test_smaller + {instance="localhost"} 0 + +eval instant at 5m test_total < test_smaller diff --git a/pkg/query/testdata/promql/prometheus/selectors.test b/pkg/query/testdata/promql/prometheus/selectors.test new file mode 100644 index 0000000000..c4309957b0 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/selectors.test @@ -0,0 +1,201 @@ +load 10s + http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + +eval instant at 8000s rate(http_requests[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + {job="api-server", instance="0", group="canary"} 3 + {job="api-server", instance="1", group="canary"} 4 + +eval instant at 18000s rate(http_requests[1m]) + {job="api-server", instance="0", group="production"} 3 + {job="api-server", instance="1", group="production"} 3 + {job="api-server", instance="0", group="canary"} 8 + {job="api-server", instance="1", group="canary"} 4 + +eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 18000s rate(http_requests{group=~".*ry", instance="1"}[1m]) + {job="api-server", instance="1", group="canary"} 4 + +eval instant at 18000s rate(http_requests{instance!="3"}[1m] offset 10000s) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + {job="api-server", instance="0", group="canary"} 3 + {job="api-server", instance="1", group="canary"} 4 + +eval instant at 18000s rate(http_requests[40s]) - rate(http_requests[1m] offset 10000s) + {job="api-server", instance="0", group="production"} 2 + {job="api-server", instance="1", group="production"} 1 + {job="api-server", instance="0", group="canary"} 5 + {job="api-server", instance="1", group="canary"} 0 + +# https://github.com/prometheus/prometheus/issues/3575 +eval instant at 0s http_requests{foo!="bar"} + http_requests{job="api-server", instance="0", group="production"} 0 + http_requests{job="api-server", instance="1", group="production"} 0 + http_requests{job="api-server", instance="0", group="canary"} 0 + http_requests{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests{foo!="bar", job="api-server"} + http_requests{job="api-server", instance="0", group="production"} 0 + http_requests{job="api-server", instance="1", group="production"} 0 + http_requests{job="api-server", instance="0", group="canary"} 0 + http_requests{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests{foo!~"bar", job="api-server"} + http_requests{job="api-server", instance="0", group="production"} 0 + http_requests{job="api-server", instance="1", group="production"} 0 + http_requests{job="api-server", instance="0", group="canary"} 0 + http_requests{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""} + http_requests{job="api-server", instance="1", group="production"} 0 + http_requests{job="api-server", instance="1", group="canary"} 0 + +# https://github.com/prometheus/prometheus/issues/7994 +eval instant at 8000s rate(http_requests{group=~"(?i:PRO).*"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 8000s rate(http_requests{group=~".*?(?i:PRO).*"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 8000s rate(http_requests{group=~".*(?i:DUC).*"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 8000s rate(http_requests{group=~".*(?i:TION)"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 8000s rate(http_requests{group=~".*(?i:TION).*?"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + + +eval instant at 8000s rate(http_requests{group=~"((?i)PRO).*"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 8000s rate(http_requests{group=~".*((?i)DUC).*"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 8000s rate(http_requests{group=~".*((?i)TION)"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + + +eval instant at 8000s rate(http_requests{group=~"(?i:PRODUCTION)"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 8000s rate(http_requests{group=~".*(?i:C).*"}[1m]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + {job="api-server", instance="0", group="canary"} 3 + {job="api-server", instance="1", group="canary"} 4 + +clear +load 1m + metric1{a="a"} 0+1x100 + metric2{b="b"} 0+1x50 + +eval instant at 90m metric1 offset 15m or metric2 offset 45m + metric1{a="a"} 75 + metric2{b="b"} 45 + +clear + +load 5m + x{y="testvalue"} 0+10x10 + +load 5m + cpu_count{instance="0", type="numa"} 0+30x10 + cpu_count{instance="0", type="smp"} 0+10x20 + cpu_count{instance="1", type="smp"} 0+20x10 + +load 5m + label_grouping_test{a="aa", b="bb"} 0+10x10 + label_grouping_test{a="a", b="abb"} 0+20x10 + +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + +# Single-letter label names and values. +eval instant at 50m x{y="testvalue"} + x{y="testvalue"} 100 + +# Basic Regex +eval instant at 50m {__name__=~".+"} + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + x{y="testvalue"} 100 + label_grouping_test{a="a", b="abb"} 200 + label_grouping_test{a="aa", b="bb"} 100 + cpu_count{instance="1", type="smp"} 200 + cpu_count{instance="0", type="smp"} 100 + cpu_count{instance="0", type="numa"} 300 + +eval instant at 50m {job=~".+-server", job!~"api-.+"} + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="app-server"} 600 + +eval instant at 50m http_requests{group!="canary"} + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="0", job="api-server"} 100 + +eval instant at 50m http_requests{job=~".+-server",group!="canary"} + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="0", job="api-server"} 100 + +eval instant at 50m http_requests{job!~"api-.+",group!="canary"} + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="production", instance="0", job="app-server"} 500 + +eval instant at 50m http_requests{group="production",job=~"api-.+"} + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="1", job="api-server"} 200 + +eval instant at 50m http_requests{group="production",job="api-server"} offset 5m + http_requests{group="production", instance="0", job="api-server"} 90 + http_requests{group="production", instance="1", job="api-server"} 180 + +clear + +# Matrix tests. +load 1h + testmetric{aa="bb"} 1 + testmetric{a="abb"} 2 + +eval instant at 0h testmetric + testmetric{aa="bb"} 1 + testmetric{a="abb"} 2 + +clear diff --git a/pkg/query/testdata/promql/prometheus/staleness.test b/pkg/query/testdata/promql/prometheus/staleness.test new file mode 100644 index 0000000000..76ee2f2878 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/staleness.test @@ -0,0 +1,51 @@ +load 10s + metric 0 1 stale 2 + +# Instant vector doesn't return series when stale. +eval instant at 10s metric + {__name__="metric"} 1 + +eval instant at 20s metric + +eval instant at 30s metric + {__name__="metric"} 2 + +eval instant at 40s metric + {__name__="metric"} 2 + +# It goes stale 5 minutes after the last sample. +eval instant at 330s metric + {__name__="metric"} 2 + +eval instant at 331s metric + + +# Range vector ignores stale sample. +eval instant at 30s count_over_time(metric[1m]) + {} 3 + +eval instant at 10s count_over_time(metric[1s]) + {} 1 + +eval instant at 20s count_over_time(metric[1s]) + +eval instant at 20s count_over_time(metric[10s]) + {} 1 + + +clear + +load 10s + metric 0 + +# Series with single point goes stale after 5 minutes. +eval instant at 0s metric + {__name__="metric"} 0 + +eval instant at 150s metric + {__name__="metric"} 0 + +eval instant at 300s metric + {__name__="metric"} 0 + +eval instant at 301s metric diff --git a/pkg/query/testdata/promql/prometheus/subquery.test b/pkg/query/testdata/promql/prometheus/subquery.test new file mode 100644 index 0000000000..db85b16227 --- /dev/null +++ b/pkg/query/testdata/promql/prometheus/subquery.test @@ -0,0 +1,117 @@ +load 10s + metric 1 2 + +# Evaluation before 0s gets no sample. +eval instant at 10s sum_over_time(metric[50s:10s]) + {} 3 + +eval instant at 10s sum_over_time(metric[50s:5s]) + {} 4 + +# Every evaluation yields the last value, i.e. 2 +eval instant at 5m sum_over_time(metric[50s:10s]) + {} 12 + +# Series becomes stale at 5m10s (5m after last sample) +# Hence subquery gets a single sample at 6m-50s=5m10s. +eval instant at 6m sum_over_time(metric[50s:10s]) + {} 2 + +eval instant at 10s rate(metric[20s:10s]) + {} 0.1 + +eval instant at 20s rate(metric[20s:5s]) + {} 0.05 + +clear + +load 10s + http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + +eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m:10s]) + {job="api-server", instance="0", group="production"} 1 + {job="api-server", instance="1", group="production"} 2 + +eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s]) + {job="api-server", instance="0", group="canary"} 8 + {job="api-server", instance="1", group="canary"} 4 + {job="api-server", instance="1", group="production"} 3 + {job="api-server", instance="0", group="production"} 3 + +clear + +load 10s + metric1 0+1x1000 + metric2 0+2x1000 + metric3 0+3x1000 + +eval instant at 1000s sum_over_time(metric1[30s:10s]) + {} 394 + +# This is (394*2 - 100), because other than the last 100 at 1000s, +# everything else is repeated with the 5s step. +eval instant at 1000s sum_over_time(metric1[30s:5s]) + {} 688 + +# Offset is aligned with the step. +eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) + {} 394 + +# Same result for different offsets due to step alignment. +eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) + {} 297 + +eval instant at 1010s sum_over_time(metric1[30s:10s] offset 7s) + {} 297 + +eval instant at 1010s sum_over_time(metric1[30s:10s] offset 5s) + {} 297 + +eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s) + {} 297 + +eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) + {} 297 + +# Nested subqueries +eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) + {} 0.4 + +eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) + {} 0.8 + +eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) + {} 1.2 + +eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) + {} 2.4 + +clear + +# Fibonacci sequence, to ensure the rate is not constant. +# Additional note: using subqueries unnecessarily is unwise. +load 7s + metric 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760 + +# Extrapolated from [3@21, 144@77]: (144 - 3) / (77 - 21) +eval instant at 80s rate(metric[1m]) + {} 2.517857143 + +# No extrapolation, [2@20, 144@80]: (144 - 2) / 60 +eval instant at 80s rate(metric[1m:10s]) + {} 2.366666667 + +# Only one value between 10s and 20s, 2@14 +eval instant at 20s min_over_time(metric[10s]) + {} 2 + +# min(1@10, 2@20) +eval instant at 20s min_over_time(metric[10s:10s]) + {} 1 + +eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) + {} 0.12119047619047618 + diff --git a/pkg/query/testdata/promql/thanos/aggregators.test b/pkg/query/testdata/promql/thanos/aggregators.test new file mode 100644 index 0000000000..075a3f9c3e --- /dev/null +++ b/pkg/query/testdata/promql/thanos/aggregators.test @@ -0,0 +1,220 @@ +store {} 0 30m + +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + +store {} 30m 10d + +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + +store {} 0 30m + +load 5m + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + +store {} 30m 10d + +load 5m + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + +store {} 0 10d + +load 5m + foo{job="api-server", instance="0", region="europe"} 0+90x10 + foo{job="api-server"} 0+100x10 + +# Simple sum. +eval instant at 50m SUM BY (group) (http_requests{job="api-server"}) + {group="canary"} 700 + {group="production"} 300 + +eval instant at 50m SUM BY (group) (((http_requests{job="api-server"}))) + {group="canary"} 700 + {group="production"} 300 + +# Test alternative "by"-clause order. +eval instant at 50m sum by (group) (http_requests{job="api-server"}) + {group="canary"} 700 + {group="production"} 300 + +# Simple average. +eval instant at 50m avg by (group) (http_requests{job="api-server"}) + {group="canary"} 350 + {group="production"} 150 + +# Simple count. +eval instant at 50m count by (group) (http_requests{job="api-server"}) + {group="canary"} 2 + {group="production"} 2 + +# Simple without. +eval instant at 50m sum without (instance) (http_requests{job="api-server"}) + {group="canary",job="api-server"} 700 + {group="production",job="api-server"} 300 + +# Empty by. +eval instant at 50m sum by () (http_requests{job="api-server"}) + {} 1000 + +# No by/without. +eval instant at 50m sum(http_requests{job="api-server"}) + {} 1000 + +# Empty without. +eval instant at 50m sum without () (http_requests{job="api-server",group="production"}) + {group="production",job="api-server",instance="0"} 100 + {group="production",job="api-server",instance="1"} 200 + +# Without with mismatched and missing labels. Do not do this. +eval instant at 50m sum without (instance) (http_requests{job="api-server"} or foo) + {group="canary",job="api-server"} 700 + {group="production",job="api-server"} 300 + {region="europe",job="api-server"} 900 + {job="api-server"} 1000 + +# Lower-cased aggregation operators should work too. +eval instant at 50m sum(http_requests) by (job) + min(http_requests) by (job) + max(http_requests) by (job) + avg(http_requests) by (job) + {job="app-server"} 4550 + {job="api-server"} 1750 + +# Test alternative "by"-clause order. +eval instant at 50m sum by (group) (http_requests{job="api-server"}) + {group="canary"} 700 + {group="production"} 300 + +# Test both alternative "by"-clause orders in one expression. +# Public health warning: stick to one form within an expression (or even +# in an organization), or risk serious user confusion. +eval instant at 50m sum(sum by (group) (http_requests{job="api-server"})) by (job) + {} 1000 + +eval instant at 50m SUM(http_requests) + {} 3600 + +eval instant at 50m SUM(http_requests{instance="0"}) BY(job) + {job="api-server"} 400 + {job="app-server"} 1200 + +eval instant at 50m SUM(http_requests) BY (job) + {job="api-server"} 1000 + {job="app-server"} 2600 + +# Non-existent labels mentioned in BY-clauses shouldn't propagate to output. +eval instant at 50m SUM(http_requests) BY (job, nonexistent) + {job="api-server"} 1000 + {job="app-server"} 2600 + +eval instant at 50m COUNT(http_requests) BY (job) + {job="api-server"} 4 + {job="app-server"} 4 + +eval instant at 50m SUM(http_requests) BY (job, group) + {group="canary", job="api-server"} 700 + {group="canary", job="app-server"} 1500 + {group="production", job="api-server"} 300 + {group="production", job="app-server"} 1100 + +eval instant at 50m AVG(http_requests) BY (job) + {job="api-server"} 250 + {job="app-server"} 650 + +eval instant at 50m MIN(http_requests) BY (job) + {job="api-server"} 100 + {job="app-server"} 500 + +eval instant at 50m MAX(http_requests) BY (job) + {job="api-server"} 400 + {job="app-server"} 800 + +eval instant at 50m abs(-1 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 100 + {group="production", instance="1", job="api-server"} 200 + +eval instant at 50m floor(0.004 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} 0 + +eval instant at 50m ceil(0.004 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 1 + {group="production", instance="1", job="api-server"} 1 + +eval instant at 50m round(0.004 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} 1 + +# Round should correctly handle negative numbers. +eval instant at 50m round(-1 * (0.004 * http_requests{group="production",job="api-server"})) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} -1 + +# Round should round half up. +eval instant at 50m round(0.005 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 1 + {group="production", instance="1", job="api-server"} 1 + +eval instant at 50m round(-1 * (0.005 * http_requests{group="production",job="api-server"})) + {group="production", instance="0", job="api-server"} 0 + {group="production", instance="1", job="api-server"} -1 + +eval instant at 50m round(1 + 0.005 * http_requests{group="production",job="api-server"}) + {group="production", instance="0", job="api-server"} 2 + {group="production", instance="1", job="api-server"} 2 + +eval instant at 50m round(-1 * (1 + 0.005 * http_requests{group="production",job="api-server"})) + {group="production", instance="0", job="api-server"} -1 + {group="production", instance="1", job="api-server"} -2 + +# Round should accept the number to round nearest to. +eval instant at 50m round(0.0005 * http_requests{group="production",job="api-server"}, 0.1) + {group="production", instance="0", job="api-server"} 0.1 + {group="production", instance="1", job="api-server"} 0.1 + +eval instant at 50m round(2.1 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1) + {group="production", instance="0", job="api-server"} 2.2 + {group="production", instance="1", job="api-server"} 2.2 + +eval instant at 50m round(5.2 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1) + {group="production", instance="0", job="api-server"} 5.3 + {group="production", instance="1", job="api-server"} 5.3 + +# Round should work correctly with negative numbers and multiple decimal places. +eval instant at 50m round(-1 * (5.2 + 0.0005 * http_requests{group="production",job="api-server"}), 0.1) + {group="production", instance="0", job="api-server"} -5.2 + {group="production", instance="1", job="api-server"} -5.3 + +# Round should work correctly with big toNearests. +eval instant at 50m round(0.025 * http_requests{group="production",job="api-server"}, 5) + {group="production", instance="0", job="api-server"} 5 + {group="production", instance="1", job="api-server"} 5 + +eval instant at 50m round(0.045 * http_requests{group="production",job="api-server"}, 5) + {group="production", instance="0", job="api-server"} 5 + {group="production", instance="1", job="api-server"} 10 + +# Standard deviation and variance. +eval instant at 50m stddev(http_requests) + {} 229.12878474779 + +eval instant at 50m stddev by (instance)(http_requests) + {instance="0"} 223.60679774998 + {instance="1"} 223.60679774998 + +eval instant at 50m stdvar(http_requests) + {} 52500 + +eval instant at 50m stdvar by (instance)(http_requests) + {instance="0"} 50000 + {instance="1"} 50000 diff --git a/pkg/receive/multitsdb.go b/pkg/receive/multitsdb.go index ca42bd24e4..ed297db7c8 100644 --- a/pkg/receive/multitsdb.go +++ b/pkg/receive/multitsdb.go @@ -295,7 +295,7 @@ func (t *MultiTSDB) startTSDB(logger log.Logger, tenantID string, tenant *tenant t.allowOutOfOrderUpload, ) } - tenant.set(store.NewTSDBStore(logger, reg, s, component.Receive, lset), s, ship) + tenant.set(store.NewTSDBStore(logger, s, component.Receive, lset), s, ship) level.Info(logger).Log("msg", "TSDB is now ready") return nil } diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 1f91fc16e5..9c31fdeaa7 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -867,7 +867,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie defer s.queryGate.Done() } - matchers, err := storepb.TranslateFromPromMatchers(req.Matchers...) + matchers, err := storepb.MatchersToPromMatchers(req.Matchers...) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } @@ -891,7 +891,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal series request hints").Error()) } - reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + reqBlockMatchers, err = storepb.MatchersToPromMatchers(reqHints.BlockMatchers...) if err != nil { return status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) } @@ -1076,7 +1076,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal label names request hints").Error()) } - reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + reqBlockMatchers, err = storepb.MatchersToPromMatchers(reqHints.BlockMatchers...) if err != nil { return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) } @@ -1160,7 +1160,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal label values request hints").Error()) } - reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + reqBlockMatchers, err = storepb.MatchersToPromMatchers(reqHints.BlockMatchers...) if err != nil { return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) } diff --git a/pkg/store/local.go b/pkg/store/local.go index 1eee0ea2ca..a7a7583bdf 100644 --- a/pkg/store/local.go +++ b/pkg/store/local.go @@ -151,22 +151,17 @@ func (s *LocalStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.I // Series returns all series for a requested time range and label matcher. The returned data may // exceed the requested time bounds. func (s *LocalStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - match, newMatchers, err := matchesExternalLabels(r.Matchers, s.extLabels) + match, matchers, err := matchesExternalLabels(r.Matchers, s.extLabels) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } if !match { return nil } - if len(newMatchers) == 0 { + if len(matchers) == 0 { return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) } - matchers, err := storepb.TranslateFromPromMatchers(newMatchers...) - if err != nil { - return status.Error(codes.InvalidArgument, err.Error()) - } - var chosen []int for si, series := range s.series { lbls := labelpb.ZLabelsToPromLabels(series.Labels) diff --git a/pkg/store/prometheus.go b/pkg/store/prometheus.go index 81458f318c..7239c4b3ad 100644 --- a/pkg/store/prometheus.go +++ b/pkg/store/prometheus.go @@ -135,16 +135,14 @@ func (p *PrometheusStore) putBuffer(b *[]byte) { func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_SeriesServer) error { extLset := p.externalLabelsFn() - match, newMatchers, err := matchesExternalLabels(r.Matchers, extLset) + match, matchers, err := matchesExternalLabels(r.Matchers, extLset) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } - if !match { return nil } - - if len(newMatchers) == 0 { + if len(matchers) == 0 { return status.Error(codes.InvalidArgument, "no matchers specified (excluding external labels)") } @@ -155,7 +153,7 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_Serie } if r.SkipChunks { - labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, newMatchers, r.MinTime, r.MaxTime) + labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime) if err != nil { return err } @@ -176,18 +174,17 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_Serie } q := &prompb.Query{StartTimestampMs: r.MinTime, EndTimestampMs: r.MaxTime} - - for _, m := range newMatchers { + for _, m := range matchers { pm := &prompb.LabelMatcher{Name: m.Name, Value: m.Value} switch m.Type { - case storepb.LabelMatcher_EQ: + case labels.MatchEqual: pm.Type = prompb.LabelMatcher_EQ - case storepb.LabelMatcher_NEQ: + case labels.MatchNotEqual: pm.Type = prompb.LabelMatcher_NEQ - case storepb.LabelMatcher_RE: + case labels.MatchRegexp: pm.Type = prompb.LabelMatcher_RE - case storepb.LabelMatcher_NRE: + case labels.MatchNotRegexp: pm.Type = prompb.LabelMatcher_NRE default: return errors.New("unrecognized matcher type") @@ -377,8 +374,8 @@ func (p *PrometheusStore) chunkSamples(series *prompb.TimeSeries, maxSamplesPerC } chks = append(chks, storepb.AggrChunk{ - MinTime: int64(samples[0].Timestamp), - MaxTime: int64(samples[chunkSize-1].Timestamp), + MinTime: samples[0].Timestamp, + MaxTime: samples[chunkSize-1].Timestamp, Raw: &storepb.Chunk{Type: enc, Data: cb}, }) @@ -434,25 +431,26 @@ func (p *PrometheusStore) startPromRemoteRead(ctx context.Context, q *prompb.Que return presp, nil } -// matchesExternalLabels filters out external labels matching from matcher if exists as the local storage does not have them. -// It also returns false if given matchers are not matching external labels. -func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labels) (bool, []storepb.LabelMatcher, error) { - if len(externalLabels) == 0 { - return true, ms, nil - } - - tms, err := storepb.TranslateFromPromMatchers(ms...) +// matchesExternalLabels returns false if given matchers are not matching external labels. +// If true, matchesExternalLabels also returns Prometheus matchers without those matching external labels. +func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labels) (bool, []*labels.Matcher, error) { + tms, err := storepb.MatchersToPromMatchers(ms...) if err != nil { return false, nil, err } - var newMatcher []storepb.LabelMatcher + if len(externalLabels) == 0 { + return true, tms, nil + } + + var newMatchers []*labels.Matcher for i, tm := range tms { // Validate all matchers. extValue := externalLabels.Get(tm.Name) if extValue == "" { // Agnostic to external labels. - newMatcher = append(newMatcher, ms[i]) + tms = append(tms[:i], tms[i:]...) + newMatchers = append(newMatchers, tm) continue } @@ -462,8 +460,7 @@ func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labe return false, nil, nil } } - - return true, newMatcher, nil + return true, newMatchers, nil } // encodeChunk translates the sample pairs into a chunk. diff --git a/pkg/store/prometheus_test.go b/pkg/store/prometheus_test.go index 6766a1ea2b..4cda3ddafa 100644 --- a/pkg/store/prometheus_test.go +++ b/pkg/store/prometheus_test.go @@ -36,8 +36,6 @@ func TestPrometheusStore_Series_promOnPath_e2e(t *testing.T) { } func testPrometheusStoreSeriesE2e(t *testing.T, prefix string) { - t.Helper() - defer testutil.TolerantVerifyLeak(t) p, err := e2eutil.NewPrometheusOnPath(prefix) diff --git a/pkg/store/proxy.go b/pkg/store/proxy.go index 235adab589..4391a36619 100644 --- a/pkg/store/proxy.go +++ b/pkg/store/proxy.go @@ -188,24 +188,27 @@ func (s cancelableRespSender) send(r *storepb.SeriesResponse) { // Series returns all series for a requested time range and label matcher. Requested series are taken from other // stores and proxied to RPC client. NOTE: Resulted data are not trimmed exactly to min and max time range. func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - match, newMatchers, err := matchesExternalLabels(r.Matchers, s.selectorLabels) + // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be + // tiggered by tracing span to reduce cognitive load. + reqLogger := log.With(s.logger, "component", "proxy", "request", r.String()) + + match, matchers, err := matchesExternalLabels(r.Matchers, s.selectorLabels) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } if !match { return nil } - - if len(newMatchers) == 0 { - return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) + if len(matchers) == 0 { + return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding selector labels)").Error()) } + storeMatchers, _ := storepb.PromMatchersToMatchers(matchers...) // Error would be returned by matchesExternalLabels, so skip check. g, gctx := errgroup.WithContext(srv.Context()) // Allow to buffer max 10 series response. // Each might be quite large (multi chunk long series given by sidecar). respSender, respCh := newCancelableRespChannel(gctx, 10) - g.Go(func() error { // This go routine is responsible for calling store's Series concurrently. Merged results // are passed to respCh and sent concurrently to client (if buffer of 10 have room). @@ -217,7 +220,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe r = &storepb.SeriesRequest{ MinTime: r.MinTime, MaxTime: r.MaxTime, - Matchers: newMatchers, + Matchers: storeMatchers, Aggregates: r.Aggregates, MaxResolutionWindow: r.MaxResolutionWindow, SkipChunks: r.SkipChunks, @@ -232,24 +235,12 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe }() for _, st := range s.stores() { - // We might be able to skip the store if its meta information indicates - // it cannot have series matching our query. - // NOTE: all matchers are validated in matchesExternalLabels method so we explicitly ignore error. - var ok bool - tracing.DoInSpan(gctx, "store_matches", func(ctx context.Context) { - var storeDebugMatcher [][]*labels.Matcher - if ctxVal := srv.Context().Value(StoreMatcherKey); ctxVal != nil { - if value, ok := ctxVal.([][]*labels.Matcher); ok { - storeDebugMatcher = value - } - } - // We can skip error, we already translated matchers once. - ok, _ = storeMatches(st, r.MinTime, r.MaxTime, storeDebugMatcher, r.Matchers...) - }) - if !ok { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s filtered out", st)) + // We might be able to skip the store if its meta information indicates it cannot have series matching our query. + if ok, reason := storeMatches(gctx, st, r.MinTime, r.MaxTime, matchers...); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s filtered out: %v", st, reason)) continue } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) // This is used to cancel this stream when one operations takes too long. @@ -267,7 +258,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe } err = errors.Wrapf(err, "fetch series for %s %s", storeID, st) if r.PartialResponseDisabled { - level.Error(s.logger).Log("err", err, "msg", "partial response disabled; aborting request") + level.Error(reqLogger).Log("err", err, "msg", "partial response disabled; aborting request") return err } respSender.send(storepb.NewWarnSeriesResponse(err)) @@ -276,15 +267,16 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe // Schedule streamSeriesSet that translates gRPC streamed response // into seriesSet (if series) or respCh if warnings. - seriesSet = append(seriesSet, startStreamSeriesSet(seriesCtx, s.logger, closeSeries, + seriesSet = append(seriesSet, startStreamSeriesSet(seriesCtx, reqLogger, closeSeries, wg, sc, respSender, st.String(), !r.PartialResponseDisabled, s.responseTimeout, s.metrics.emptyStreamResponses)) } - level.Debug(s.logger).Log("msg", strings.Join(storeDebugMsgs, ";")) + level.Debug(reqLogger).Log("msg", "Series: started fanout streams", "status", strings.Join(storeDebugMsgs, ";")) + if len(seriesSet) == 0 { // This is indicates that configured StoreAPIs are not the ones end user expects. err := errors.New("No StoreAPIs matched for this query") - level.Warn(s.logger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) + level.Warn(reqLogger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) respSender.send(storepb.NewWarnSeriesResponse(err)) return nil } @@ -312,7 +304,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe }) if err := g.Wait(); err != nil { // TODO(bwplotka): Replace with request logger. - level.Error(s.logger).Log("err", err) + level.Error(reqLogger).Log("err", err) return err } return nil @@ -483,44 +475,58 @@ func (s *streamSeriesSet) Err() error { return errors.Wrap(s.err, s.name) } -// matchStore returns true if the given store may hold data for the given label matchers. -func storeMatches(s Client, mint, maxt int64, storeDebugMatchers [][]*labels.Matcher, matchers ...storepb.LabelMatcher) (bool, error) { +// storeMatches returns boolean if the given store may hold data for the given label matchers, time ranges and debug store matches gathered from context. +// It also produces tracing span. +func storeMatches(ctx context.Context, s Client, mint, maxt int64, matchers ...*labels.Matcher) (ok bool, reason string) { + span, ctx := tracing.StartSpan(ctx, "store_matches") + defer span.Finish() + + var storeDebugMatcher [][]*labels.Matcher + if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { + if value, ok := ctxVal.([][]*labels.Matcher); ok { + storeDebugMatcher = value + } + } + storeMinTime, storeMaxTime := s.TimeRange() - if mint > storeMaxTime || maxt <= storeMinTime { - return false, nil + if mint > storeMaxTime || maxt < storeMinTime { + return false, fmt.Sprintf("does not have data within this time period: [%v,%v]. Store time ranges: [%v,%v]", mint, maxt, storeMinTime, storeMaxTime) } - if !storeMatchDebugMetadata(s, storeDebugMatchers) { - return false, nil + if ok, reason := storeMatchDebugMetadata(s, storeDebugMatcher); !ok { + return false, reason } - promMatchers, err := storepb.TranslateFromPromMatchers(matchers...) - if err != nil { - return false, err + extLset := s.LabelSets() + if !labelSetsMatch(matchers, extLset...) { + return false, fmt.Sprintf("external labels %v does not match request label matchers: %v", extLset, matchers) } - return labelSetsMatch(promMatchers, s.LabelSets()...), nil + return true, "" } // storeMatchDebugMetadata return true if the store's address match the storeDebugMatchers. -func storeMatchDebugMetadata(s Client, storeDebugMatchers [][]*labels.Matcher) bool { +func storeMatchDebugMetadata(s Client, storeDebugMatchers [][]*labels.Matcher) (ok bool, reason string) { if len(storeDebugMatchers) == 0 { - return true + return true, "" } match := false for _, sm := range storeDebugMatchers { match = match || labelSetsMatch(sm, labels.FromStrings("__address__", s.Addr())) } - return match + if !match { + return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", s.Addr(), storeDebugMatchers) + } + return true, "" } // labelSetsMatch returns false if all label-set do not match the matchers (aka: OR is between all label-sets). -func labelSetsMatch(matchers []*labels.Matcher, lss ...labels.Labels) bool { - if len(lss) == 0 { +func labelSetsMatch(matchers []*labels.Matcher, lset ...labels.Labels) bool { + if len(lset) == 0 { return true } - for _, ls := range lss { + for _, ls := range lset { notMatched := false for _, m := range matchers { if lv := ls.Get(m.Name); lv != "" && !m.Matches(lv) { @@ -549,19 +555,10 @@ func (s *ProxyStore) LabelNames(ctx context.Context, r *storepb.LabelNamesReques for _, st := range s.stores() { st := st - var ok bool - tracing.DoInSpan(gctx, "store_matches", func(ctx context.Context) { - var storeDebugMatcher [][]*labels.Matcher - if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { - if value, ok := ctxVal.([][]*labels.Matcher); ok { - storeDebugMatcher = value - } - } - // We can skip error, we already translated matchers once. - ok, _ = storeMatches(st, r.Start, r.End, storeDebugMatcher) - }) - if !ok { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out", st)) + + // We might be able to skip the store if its meta information indicates it cannot have series matching our query. + if ok, reason := storeMatches(gctx, st, r.Start, r.End); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to %v", st, reason)) continue } storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) @@ -617,33 +614,24 @@ func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequ ) for _, st := range s.stores() { - store := st - var ok bool - tracing.DoInSpan(gctx, "store_matches", func(ctx context.Context) { - var storeDebugMatcher [][]*labels.Matcher - if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { - if value, ok := ctxVal.([][]*labels.Matcher); ok { - storeDebugMatcher = value - } - } - // We can skip error, we already translated matchers once. - ok, _ = storeMatches(st, r.Start, r.End, storeDebugMatcher) - }) - if !ok { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out", st)) + st := st + + // We might be able to skip the store if its meta information indicates it cannot have series matching our query. + if ok, reason := storeMatches(gctx, st, r.Start, r.End); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to %v", st, reason)) continue } storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) g.Go(func() error { - resp, err := store.LabelValues(gctx, &storepb.LabelValuesRequest{ + resp, err := st.LabelValues(gctx, &storepb.LabelValuesRequest{ Label: r.Label, PartialResponseDisabled: r.PartialResponseDisabled, Start: r.Start, End: r.End, }) if err != nil { - err = errors.Wrapf(err, "fetch label values from store %s", store) + err = errors.Wrapf(err, "fetch label values from store %s", st) if r.PartialResponseDisabled { return err } diff --git a/pkg/store/proxy_test.go b/pkg/store/proxy_test.go index 5331edf7f0..366d16ee77 100644 --- a/pkg/store/proxy_test.go +++ b/pkg/store/proxy_test.go @@ -1409,31 +1409,34 @@ func TestStoreMatches(t *testing.T) { for _, c := range []struct { s Client mint, maxt int64 + ms []*labels.Matcher - ms []storepb.LabelMatcher - expectedMatch bool + expectedMatch bool + expectedReason string }{ { s: &testClient{labelSets: []labels.Labels{labels.FromStrings("a", "b")}}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "1"}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "b", "1"), }, - - expectedMatch: false, + maxt: -1, + expectedMatch: false, + expectedReason: "does not have data within this time period: [0,-1]. Store time ranges: [0,0]", }, { s: &testClient{labelSets: []labels.Labels{labels.FromStrings("a", "b")}}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "b", Value: "1"}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "b", "1"), }, maxt: 1, expectedMatch: true, }, { - s: &testClient{minTime: 100, maxTime: 200}, - mint: 201, - maxt: 300, - expectedMatch: false, + s: &testClient{minTime: 100, maxTime: 200}, + mint: 201, + maxt: 300, + expectedMatch: false, + expectedReason: "does not have data within this time period: [201,300]. Store time ranges: [100,200]", }, { s: &testClient{minTime: 100, maxTime: 200}, @@ -1442,10 +1445,11 @@ func TestStoreMatches(t *testing.T) { expectedMatch: true, }, { - s: &testClient{minTime: 100, maxTime: 200}, - mint: 50, - maxt: 100, - expectedMatch: false, + s: &testClient{minTime: 100, maxTime: 200}, + mint: 50, + maxt: 99, + expectedMatch: false, + expectedReason: "does not have data within this time period: [50,99]. Store time ranges: [100,200]", }, { s: &testClient{minTime: 100, maxTime: 200}, @@ -1455,32 +1459,33 @@ func TestStoreMatches(t *testing.T) { }, { s: &testClient{labelSets: []labels.Labels{labels.FromStrings("a", "b")}}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "b"}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "a", "b"), }, maxt: 1, expectedMatch: true, }, { s: &testClient{labelSets: []labels.Labels{labels.FromStrings("a", "b")}}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "c"}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "a", "c"), }, - maxt: 1, - expectedMatch: false, + maxt: 1, + expectedMatch: false, + expectedReason: "external labels [{a=\"b\"}] does not match request label matchers: [a=\"c\"]", }, { s: &testClient{labelSets: []labels.Labels{labels.FromStrings("a", "b")}}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_RE, Name: "a", Value: "b|c"}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, "a", "b|c"), }, maxt: 1, expectedMatch: true, }, { s: &testClient{labelSets: []labels.Labels{labels.FromStrings("a", "b")}}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_NEQ, Name: "a", Value: ""}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchNotRegexp, "a", ""), }, maxt: 1, expectedMatch: true, @@ -1491,11 +1496,12 @@ func TestStoreMatches(t *testing.T) { labels.FromStrings("a", "c"), labels.FromStrings("a", "d"), }}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "e"}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "a", "e"), }, - maxt: 1, - expectedMatch: false, + maxt: 1, + expectedMatch: false, + expectedReason: "external labels [{a=\"b\"} {a=\"c\"} {a=\"d\"}] does not match request label matchers: [a=\"e\"]", }, { s: &testClient{labelSets: []labels.Labels{ @@ -1503,8 +1509,8 @@ func TestStoreMatches(t *testing.T) { labels.FromStrings("a", "c"), labels.FromStrings("a", "d"), }}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "c"}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "a", "c"), }, maxt: 1, expectedMatch: true, @@ -1515,17 +1521,18 @@ func TestStoreMatches(t *testing.T) { labels.FromStrings("a", "c"), labels.FromStrings("a", "d"), }}, - ms: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_NEQ, Name: "a", Value: ""}, + ms: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchNotRegexp, "a", ""), }, maxt: 1, expectedMatch: true, }, } { t.Run("", func(t *testing.T) { - ok, err := storeMatches(c.s, c.mint, c.maxt, nil, c.ms...) - testutil.Ok(t, err) + ok, reason := storeMatches(context.TODO(), c.s, c.mint, c.maxt, c.ms...) testutil.Equals(t, c.expectedMatch, ok) + testutil.Equals(t, c.expectedReason, reason) + }) } } @@ -1879,7 +1886,15 @@ func TestProxyStore_NotLeakingOnPrematureFinish(t *testing.T) { func TestProxyStore_storeMatchMetadata(t *testing.T) { c := testClient{} - testutil.Assert(t, storeMatchDebugMetadata(c, [][]*labels.Matcher{{}})) - testutil.Assert(t, !storeMatchDebugMetadata(c, [][]*labels.Matcher{{labels.MustNewMatcher(labels.MatchEqual, "__address__", "wrong")}})) - testutil.Assert(t, storeMatchDebugMetadata(c, [][]*labels.Matcher{{labels.MustNewMatcher(labels.MatchEqual, "__address__", "testaddr")}})) + ok, reason := storeMatchDebugMetadata(c, [][]*labels.Matcher{{}}) + testutil.Assert(t, ok) + testutil.Equals(t, "", reason) + + ok, reason = storeMatchDebugMetadata(c, [][]*labels.Matcher{{labels.MustNewMatcher(labels.MatchEqual, "__address__", "wrong")}}) + testutil.Assert(t, !ok) + testutil.Equals(t, "__address__ testaddr does not match debug store metadata matchers: [[__address__=\"wrong\"]]", reason) + + ok, reason = storeMatchDebugMetadata(c, [][]*labels.Matcher{{labels.MustNewMatcher(labels.MatchEqual, "__address__", "testaddr")}}) + testutil.Assert(t, ok) + testutil.Equals(t, "", reason) } diff --git a/pkg/store/storepb/custom.go b/pkg/store/storepb/custom.go index 41f9095272..63c3808135 100644 --- a/pkg/store/storepb/custom.go +++ b/pkg/store/storepb/custom.go @@ -337,9 +337,9 @@ func (x *PartialResponseStrategy) MarshalJSON() ([]byte, error) { return []byte(strconv.Quote(x.String())), nil } -// TranslatePromMatchers returns proto matchers from Prometheus matchers. +// PromMatchersToMatchers returns proto matchers from Prometheus matchers. // NOTE: It allocates memory. -func TranslatePromMatchers(ms ...*labels.Matcher) ([]LabelMatcher, error) { +func PromMatchersToMatchers(ms ...*labels.Matcher) ([]LabelMatcher, error) { res := make([]LabelMatcher, 0, len(ms)) for _, m := range ms { var t LabelMatcher_Type @@ -361,10 +361,9 @@ func TranslatePromMatchers(ms ...*labels.Matcher) ([]LabelMatcher, error) { return res, nil } -// TranslateFromPromMatchers returns Prometheus matchers from proto matchers. +// MatchersToPromMatchers returns Prometheus matchers from proto matchers. // NOTE: It allocates memory. -// TODO(bwplotka): Create yolo/no-alloc helper. -func TranslateFromPromMatchers(ms ...LabelMatcher) ([]*labels.Matcher, error) { +func MatchersToPromMatchers(ms ...LabelMatcher) ([]*labels.Matcher, error) { res := make([]*labels.Matcher, 0, len(ms)) for _, m := range ms { var t labels.MatchType diff --git a/pkg/store/storepb/custom_test.go b/pkg/store/storepb/custom_test.go index 0ca200d394..35901d52bc 100644 --- a/pkg/store/storepb/custom_test.go +++ b/pkg/store/storepb/custom_test.go @@ -506,12 +506,12 @@ func TestMatchersToString_Translate(t *testing.T) { t.Run(c.expected, func(t *testing.T) { testutil.Equals(t, c.expected, MatchersToString(c.ms...)) - promMs, err := TranslateFromPromMatchers(c.ms...) + promMs, err := MatchersToPromMatchers(c.ms...) testutil.Ok(t, err) testutil.Equals(t, c.expected, PromMatchersToString(promMs...)) - ms, err := TranslatePromMatchers(promMs...) + ms, err := PromMatchersToMatchers(promMs...) testutil.Ok(t, err) testutil.Equals(t, c.ms, ms) diff --git a/pkg/store/storepb/inprocess.go b/pkg/store/storepb/inprocess.go new file mode 100644 index 0000000000..aeb4a25aef --- /dev/null +++ b/pkg/store/storepb/inprocess.go @@ -0,0 +1,97 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package storepb + +import ( + "context" + "io" + + "google.golang.org/grpc" +) + +func ServerAsClient(srv StoreServer, clientReceiveBufferSize int) StoreClient { + return &serverAsClient{srv: srv, clientReceiveBufferSize: clientReceiveBufferSize} +} + +// serverAsClient allows to use servers as clients. +// NOTE: Passing CallOptions does not work - it would be needed to be implemented in grpc itself (before, after are private). +type serverAsClient struct { + clientReceiveBufferSize int + srv StoreServer +} + +func (s serverAsClient) Info(ctx context.Context, in *InfoRequest, _ ...grpc.CallOption) (*InfoResponse, error) { + return s.srv.Info(ctx, in) +} + +func (s serverAsClient) LabelNames(ctx context.Context, in *LabelNamesRequest, _ ...grpc.CallOption) (*LabelNamesResponse, error) { + return s.srv.LabelNames(ctx, in) +} + +func (s serverAsClient) LabelValues(ctx context.Context, in *LabelValuesRequest, _ ...grpc.CallOption) (*LabelValuesResponse, error) { + return s.srv.LabelValues(ctx, in) +} + +func (s serverAsClient) Series(ctx context.Context, in *SeriesRequest, _ ...grpc.CallOption) (Store_SeriesClient, error) { + inSrv := &inProcessStream{recv: make(chan *SeriesResponse, s.clientReceiveBufferSize), err: make(chan error)} + inSrv.ctx, inSrv.cancel = context.WithCancel(ctx) + go func() { + inSrv.err <- s.srv.Series(in, inSrv) + close(inSrv.err) + close(inSrv.recv) + }() + return &inProcessClientStream{srv: inSrv}, nil +} + +// TODO(bwplotka): Add streaming attributes, metadata etc. Currently those are disconnected. Follow up on https://github.com/grpc/grpc-go/issues/906. +// TODO(bwplotka): Use this in proxy.go and receiver multi tenant proxy. +type inProcessStream struct { + grpc.ServerStream + + ctx context.Context + cancel context.CancelFunc + recv chan *SeriesResponse + err chan error +} + +func (s *inProcessStream) Context() context.Context { return s.ctx } + +func (s *inProcessStream) Send(r *SeriesResponse) error { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case s.recv <- r: + return nil + } +} + +type inProcessClientStream struct { + grpc.ClientStream + + srv *inProcessStream +} + +func (s *inProcessClientStream) Context() context.Context { return s.srv.ctx } + +func (s *inProcessClientStream) CloseSend() error { + s.srv.cancel() + return nil +} + +func (s *inProcessClientStream) Recv() (*SeriesResponse, error) { + select { + case <-s.srv.ctx.Done(): + return nil, s.srv.ctx.Err() + case r, ok := <-s.srv.recv: + if !ok { + return nil, io.EOF + } + return r, nil + case err := <-s.srv.err: + if err == nil { + return nil, io.EOF + } + return nil, err + } +} diff --git a/pkg/store/storepb/inprocess_test.go b/pkg/store/storepb/inprocess_test.go new file mode 100644 index 0000000000..9f67098c1e --- /dev/null +++ b/pkg/store/storepb/inprocess_test.go @@ -0,0 +1,265 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package storepb + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/testutil" +) + +type testStoreServer struct { + info *InfoResponse + infoLastReq *InfoRequest + + series []*SeriesResponse + seriesLastReq *SeriesRequest + + labelNames *LabelNamesResponse + labelNamesLastReq *LabelNamesRequest + + labelValues *LabelValuesResponse + labelValuesLastReq *LabelValuesRequest + + err error +} + +func (t *testStoreServer) Info(_ context.Context, r *InfoRequest) (*InfoResponse, error) { + t.infoLastReq = r + return t.info, t.err +} + +func (t *testStoreServer) Series(r *SeriesRequest, server Store_SeriesServer) error { + t.seriesLastReq = r + for i, s := range t.series { + if t.err != nil && i == len(t.series)/2 { + return t.err + } + if err := server.Send(s); err != nil { + return err + } + } + return nil +} + +func (t *testStoreServer) LabelNames(_ context.Context, r *LabelNamesRequest) (*LabelNamesResponse, error) { + t.labelNamesLastReq = r + return t.labelNames, t.err +} + +func (t *testStoreServer) LabelValues(_ context.Context, r *LabelValuesRequest) (*LabelValuesResponse, error) { + t.labelValuesLastReq = r + return t.labelValues, t.err +} + +func TestServerAsClient(t *testing.T) { + for _, bufferSize := range []int{0, 1, 20, 100} { + t.Run(fmt.Sprintf("buffer=%v", bufferSize), func(t *testing.T) { + t.Run("Info", func(t *testing.T) { + s := &testStoreServer{ + info: &InfoResponse{ + LabelSets: []labelpb.ZLabelSet{{Labels: []labelpb.ZLabel{{Name: "a", Value: "b"}}}}, + MinTime: -1, + MaxTime: 10, + StoreType: StoreType_DEBUG, + }} + t.Run("ok", func(t *testing.T) { + for i := 0; i < 20; i++ { + r := &InfoRequest{} + resp, err := ServerAsClient(s, 0).Info(context.TODO(), r) + testutil.Ok(t, err) + testutil.Equals(t, s.info, resp) + testutil.Equals(t, r, s.infoLastReq) + s.infoLastReq = nil + } + }) + t.Run("error", func(t *testing.T) { + s.err = errors.New("some error") + for i := 0; i < 20; i++ { + r := &InfoRequest{} + _, err := ServerAsClient(s, 0).Info(context.TODO(), r) + testutil.NotOk(t, err) + testutil.Equals(t, s.err, err) + } + }) + }) + t.Run("Series", func(t *testing.T) { + s := &testStoreServer{ + series: []*SeriesResponse{ + NewSeriesResponse(&Series{ + Labels: []labelpb.ZLabel{{Name: "a", Value: "b"}}, + Chunks: []AggrChunk{{MinTime: 123, MaxTime: 124}, {MinTime: 12455, MaxTime: 14124}}, + }), + NewSeriesResponse(&Series{ + Labels: []labelpb.ZLabel{{Name: "a", Value: "b1"}}, + Chunks: []AggrChunk{{MinTime: 1231, MaxTime: 124}, {MinTime: 12455, MaxTime: 14124}}, + }), + NewWarnSeriesResponse(errors.New("yolo")), + NewSeriesResponse(&Series{ + Labels: []labelpb.ZLabel{{Name: "a", Value: "b3"}}, + Chunks: []AggrChunk{{MinTime: 123, MaxTime: 124}, {MinTime: 124554, MaxTime: 14124}}, + }), + }} + t.Run("ok", func(t *testing.T) { + for i := 0; i < 20; i++ { + r := &SeriesRequest{ + MinTime: -214, + MaxTime: 213, + Matchers: []LabelMatcher{{Value: "wfsdfs", Name: "__name__", Type: LabelMatcher_EQ}}, + PartialResponseStrategy: PartialResponseStrategy_ABORT, + } + client, err := ServerAsClient(s, 0).Series(context.TODO(), r) + testutil.Ok(t, err) + var resps []*SeriesResponse + for { + resp, err := client.Recv() + if err == io.EOF { + break + } + testutil.Ok(t, err) + resps = append(resps, resp) + } + testutil.Equals(t, s.series, resps) + testutil.Equals(t, r, s.seriesLastReq) + s.seriesLastReq = nil + } + }) + t.Run("ok, close send", func(t *testing.T) { + s.err = errors.New("some error") + for i := 0; i < 20; i++ { + r := &SeriesRequest{ + MinTime: -214, + MaxTime: 213, + Matchers: []LabelMatcher{{Value: "wfsdfs", Name: "__name__", Type: LabelMatcher_EQ}}, + PartialResponseStrategy: PartialResponseStrategy_ABORT, + } + client, err := ServerAsClient(s, 0).Series(context.TODO(), r) + testutil.Ok(t, err) + var resps []*SeriesResponse + for { + if len(resps) == len(s.series)/2 { + testutil.Ok(t, client.CloseSend()) + break + } + resp, err := client.Recv() + if err == io.EOF { + break + } + testutil.Ok(t, err) + resps = append(resps, resp) + } + testutil.Equals(t, s.series[:len(s.series)/2], resps) + testutil.Equals(t, r, s.seriesLastReq) + s.seriesLastReq = nil + } + }) + t.Run("error", func(t *testing.T) { + for i := 0; i < 20; i++ { + r := &SeriesRequest{ + MinTime: -214, + MaxTime: 213, + Matchers: []LabelMatcher{{Value: "wfsdfs", Name: "__name__", Type: LabelMatcher_EQ}}, + PartialResponseStrategy: PartialResponseStrategy_ABORT, + } + client, err := ServerAsClient(s, 0).Series(context.TODO(), r) + testutil.Ok(t, err) + var resps []*SeriesResponse + for { + resp, err := client.Recv() + if err == io.EOF { + break + } + if err == s.err { + break + } + testutil.Ok(t, err) + resps = append(resps, resp) + } + testutil.Equals(t, s.series[:len(s.series)/2], resps) + testutil.Equals(t, r, s.seriesLastReq) + s.seriesLastReq = nil + } + }) + }) + t.Run("LabelNames", func(t *testing.T) { + s := &testStoreServer{ + info: &InfoResponse{ + LabelSets: []labelpb.ZLabelSet{{Labels: []labelpb.ZLabel{{Name: "a", Value: "b"}}}}, + MinTime: -1, + MaxTime: 10, + StoreType: StoreType_DEBUG, + }} + t.Run("ok", func(t *testing.T) { + for i := 0; i < 20; i++ { + r := &LabelNamesRequest{ + Start: -1, + End: 234, + PartialResponseStrategy: PartialResponseStrategy_ABORT, + } + resp, err := ServerAsClient(s, 0).LabelNames(context.TODO(), r) + testutil.Ok(t, err) + testutil.Equals(t, s.labelNames, resp) + testutil.Equals(t, r, s.labelNamesLastReq) + s.labelNamesLastReq = nil + } + }) + t.Run("error", func(t *testing.T) { + s.err = errors.New("some error") + for i := 0; i < 20; i++ { + r := &LabelNamesRequest{ + Start: -1, + End: 234, + PartialResponseStrategy: PartialResponseStrategy_ABORT, + } + _, err := ServerAsClient(s, 0).LabelNames(context.TODO(), r) + testutil.NotOk(t, err) + testutil.Equals(t, s.err, err) + } + }) + }) + t.Run("LabelValues", func(t *testing.T) { + s := &testStoreServer{ + labelValues: &LabelValuesResponse{ + Warnings: []string{"1", "a"}, + Values: []string{"abc1", "go_goroutines"}, + }, + } + t.Run("ok", func(t *testing.T) { + for i := 0; i < 20; i++ { + r := &LabelValuesRequest{ + Label: "__name__", + Start: -1, + End: 234, + PartialResponseStrategy: PartialResponseStrategy_ABORT, + } + resp, err := ServerAsClient(s, 0).LabelValues(context.TODO(), r) + testutil.Ok(t, err) + testutil.Equals(t, s.labelValues, resp) + testutil.Equals(t, r, s.labelValuesLastReq) + s.labelValuesLastReq = nil + } + }) + t.Run("error", func(t *testing.T) { + s.err = errors.New("some error") + for i := 0; i < 20; i++ { + r := &LabelValuesRequest{ + Label: "__name__", + Start: -1, + End: 234, + PartialResponseStrategy: PartialResponseStrategy_ABORT, + } + _, err := ServerAsClient(s, 0).LabelValues(context.TODO(), r) + testutil.NotOk(t, err) + testutil.Equals(t, s.err, err) + } + }) + }) + }) + } +} diff --git a/pkg/store/tsdb.go b/pkg/store/tsdb.go index 09e964c1ab..ca25072fb0 100644 --- a/pkg/store/tsdb.go +++ b/pkg/store/tsdb.go @@ -10,7 +10,6 @@ import ( "github.com/go-kit/kit/log" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/store/labelpb" @@ -55,7 +54,7 @@ type ReadWriteTSDBStore struct { // NewTSDBStore creates a new TSDBStore. // NOTE: Given lset has to be sorted. -func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db TSDBReader, component component.StoreAPI, extLset labels.Labels) *TSDBStore { +func NewTSDBStore(logger log.Logger, db TSDBReader, component component.StoreAPI, extLset labels.Labels) *TSDBStore { if logger == nil { logger = log.NewNopLogger() } @@ -102,7 +101,7 @@ type CloseDelegator interface { // Series returns all series for a requested time range and label matcher. The returned data may // exceed the requested time bounds. func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - match, newMatchers, err := matchesExternalLabels(r.Matchers, s.extLset) + match, matchers, err := matchesExternalLabels(r.Matchers, s.extLset) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } @@ -111,15 +110,10 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer return nil } - if len(newMatchers) == 0 { + if len(matchers) == 0 { return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) } - matchers, err := storepb.TranslateFromPromMatchers(newMatchers...) - if err != nil { - return status.Error(codes.InvalidArgument, err.Error()) - } - q, err := s.db.ChunkQuerier(context.Background(), r.MinTime, r.MaxTime) if err != nil { return status.Error(codes.Internal, err.Error()) @@ -136,16 +130,16 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer // Stream at most one series per frame; series may be split over multiple frames according to maxBytesInFrame. for set.Next() { series := set.At() - seriesLabels := storepb.Series{Labels: labelpb.ZLabelsFromPromLabels(labelpb.ExtendSortedLabels(series.Labels(), s.extLset))} + storeSeries := storepb.Series{Labels: labelpb.ZLabelsFromPromLabels(labelpb.ExtendSortedLabels(series.Labels(), s.extLset))} if r.SkipChunks { - if err := srv.Send(storepb.NewSeriesResponse(&seriesLabels)); err != nil { + if err := srv.Send(storepb.NewSeriesResponse(&storeSeries)); err != nil { return status.Error(codes.Aborted, err.Error()) } continue } bytesLeftForChunks := s.maxBytesPerFrame - for _, lbl := range seriesLabels.Labels { + for _, lbl := range storeSeries.Labels { bytesLeftForChunks -= lbl.Size() } frameBytesLeft := bytesLeftForChunks @@ -175,7 +169,7 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer if frameBytesLeft > 0 && isNext { continue } - if err := srv.Send(storepb.NewSeriesResponse(&storepb.Series{Labels: seriesLabels.Labels, Chunks: seriesChunks})); err != nil { + if err := srv.Send(storepb.NewSeriesResponse(&storepb.Series{Labels: storeSeries.Labels, Chunks: seriesChunks})); err != nil { return status.Error(codes.Aborted, err.Error()) } diff --git a/pkg/store/tsdb_test.go b/pkg/store/tsdb_test.go index d00d37737f..8a4b70769a 100644 --- a/pkg/store/tsdb_test.go +++ b/pkg/store/tsdb_test.go @@ -37,7 +37,7 @@ func TestTSDBStore_Info(t *testing.T) { defer func() { testutil.Ok(t, db.Close()) }() testutil.Ok(t, err) - tsdbStore := NewTSDBStore(nil, nil, db, component.Rule, labels.FromStrings("region", "eu-west")) + tsdbStore := NewTSDBStore(nil, db, component.Rule, labels.FromStrings("region", "eu-west")) resp, err := tsdbStore.Info(ctx, &storepb.InfoRequest{}) testutil.Ok(t, err) @@ -71,7 +71,7 @@ func TestTSDBStore_Series(t *testing.T) { defer func() { testutil.Ok(t, db.Close()) }() testutil.Ok(t, err) - tsdbStore := NewTSDBStore(nil, nil, db, component.Rule, labels.FromStrings("region", "eu-west")) + tsdbStore := NewTSDBStore(nil, db, component.Rule, labels.FromStrings("region", "eu-west")) appender := db.Appender(context.Background()) @@ -205,7 +205,7 @@ func TestTSDBStore_LabelNames(t *testing.T) { } } - tsdbStore := NewTSDBStore(nil, nil, db, component.Rule, labels.FromStrings("region", "eu-west")) + tsdbStore := NewTSDBStore(nil, db, component.Rule, labels.FromStrings("region", "eu-west")) now := time.Now() head := db.Head() @@ -311,7 +311,7 @@ func TestTSDBStore_LabelValues(t *testing.T) { } } - tsdbStore := NewTSDBStore(nil, nil, db, component.Rule, labels.FromStrings("region", "eu-west")) + tsdbStore := NewTSDBStore(nil, db, component.Rule, labels.FromStrings("region", "eu-west")) now := time.Now() head := db.Head() for _, tc := range []struct { @@ -399,7 +399,7 @@ func TestTSDBStore_Series_SplitSamplesIntoChunksWithMaxSizeOf120(t *testing.T) { testutil.Ok(t, err) testSeries_SplitSamplesIntoChunksWithMaxSizeOf120(t, db.Appender(context.Background()), func() storepb.StoreServer { - return NewTSDBStore(nil, nil, db, component.Rule, labels.FromStrings("region", "eu-west")) + return NewTSDBStore(nil, db, component.Rule, labels.FromStrings("region", "eu-west")) }) } @@ -457,7 +457,7 @@ func TestTSDBStore_SeriesAccessWithDelegateClosing(t *testing.T) { }) extLabels := labels.FromStrings("ext", "1") - store := NewTSDBStore(logger, nil, &mockedStartTimeDB{DBReadOnly: db, startTime: 0}, component.Receive, extLabels) + store := NewTSDBStore(logger, &mockedStartTimeDB{DBReadOnly: db, startTime: 0}, component.Receive, extLabels) srv := storetestutil.NewSeriesServer(context.Background()) csrv := &delegatorServer{SeriesServer: srv} @@ -618,7 +618,7 @@ func TestTSDBStore_SeriesAccessWithoutDelegateClosing(t *testing.T) { }) extLabels := labels.FromStrings("ext", "1") - store := NewTSDBStore(logger, nil, &mockedStartTimeDB{DBReadOnly: db, startTime: 0}, component.Receive, extLabels) + store := NewTSDBStore(logger, &mockedStartTimeDB{DBReadOnly: db, startTime: 0}, component.Receive, extLabels) srv := storetestutil.NewSeriesServer(context.Background()) t.Run("call series and access results", func(t *testing.T) { @@ -768,7 +768,7 @@ func benchTSDBStoreSeries(t testutil.TB, totalSamples, totalSeries int) { defer func() { testutil.Ok(t, db.Close()) }() extLabels := labels.FromStrings("ext", "1") - store := NewTSDBStore(logger, nil, &mockedStartTimeDB{DBReadOnly: db, startTime: 0}, component.Receive, extLabels) + store := NewTSDBStore(logger, &mockedStartTimeDB{DBReadOnly: db, startTime: 0}, component.Receive, extLabels) var expected []*storepb.Series for _, resp := range resps { diff --git a/test/e2e/query_frontend_test.go b/test/e2e/query_frontend_test.go index 54840459a9..9dff09f998 100644 --- a/test/e2e/query_frontend_test.go +++ b/test/e2e/query_frontend_test.go @@ -17,7 +17,6 @@ import ( "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/queryfrontend" - "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" ) @@ -306,7 +305,7 @@ func TestQueryFrontend(t *testing.T) { t, ctx, queryFrontend.HTTPEndpoint(), - []storepb.LabelMatcher{{Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "up"}}, + []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "__name__", "up")}, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []map[string]string) bool { @@ -342,7 +341,7 @@ func TestQueryFrontend(t *testing.T) { t, ctx, queryFrontend.HTTPEndpoint(), - []storepb.LabelMatcher{{Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "up"}}, + []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "__name__", "up")}, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []map[string]string) bool { diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 103e008358..c027abda22 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -21,11 +21,12 @@ import ( "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" - "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" ) @@ -486,7 +487,7 @@ func labelValues(t *testing.T, ctx context.Context, addr, label string, matchers })) } -func series(t *testing.T, ctx context.Context, addr string, matchers []storepb.LabelMatcher, start int64, end int64, check func(res []map[string]string) bool) { +func series(t *testing.T, ctx context.Context, addr string, matchers []*labels.Matcher, start int64, end int64, check func(res []map[string]string) bool) { t.Helper() logger := log.NewLogfmtLogger(os.Stdout)