From 9c9491843a1a335d18c4ed33fee5291ef5cfcc28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Wed, 15 May 2019 16:59:05 +0300 Subject: [PATCH 01/23] store/bucket_test: add interleaved resolutions test for getFor() --- pkg/store/bucket_test.go | 67 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 4e9e581014..bc50953770 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -16,6 +16,73 @@ import ( "github.com/prometheus/tsdb/labels" ) +func TestBucketBlockSet_Interleaved(t *testing.T) { + defer leaktest.CheckTimeout(t, 10*time.Second)() + + set := newBucketBlockSet(labels.Labels{}) + + type resBlock struct { + mint, maxt int64 + window int64 + } + input := []resBlock{ + {window: downsample.ResLevel2, mint: 0, maxt: 50}, + {window: downsample.ResLevel1, mint: 50, maxt: 100}, + {window: downsample.ResLevel2, mint: 100, maxt: 200}, + {window: downsample.ResLevel1, mint: 200, maxt: 300}, + {window: downsample.ResLevel1, mint: 300, maxt: 400}, + {window: downsample.ResLevel1, mint: 400, maxt: 500}, + {window: downsample.ResLevel0, mint: 500, maxt: 600}, + {window: downsample.ResLevel0, mint: 600, maxt: 700}, + } + + for _, in := range input { + var m metadata.Meta + m.Thanos.Downsample.Resolution = in.window + m.MinTime = in.mint + m.MaxTime = in.maxt + + testutil.Ok(t, set.add(&bucketBlock{meta: &m})) + } + + cases := []struct { + mint, maxt int64 + minResolution int64 + res []resBlock + }{ + { + mint: 0, + maxt: 700, + minResolution: downsample.ResLevel2, + res: []resBlock{ + {window: downsample.ResLevel2, mint: 0, maxt: 50}, + {window: downsample.ResLevel1, mint: 50, maxt: 100}, + {window: downsample.ResLevel2, mint: 100, maxt: 200}, + {window: downsample.ResLevel1, mint: 200, maxt: 300}, + {window: downsample.ResLevel1, mint: 300, maxt: 400}, + {window: downsample.ResLevel1, mint: 400, maxt: 500}, + {window: downsample.ResLevel0, mint: 500, maxt: 600}, + {window: downsample.ResLevel0, mint: 600, maxt: 700}, + }, + }, + } + + for i, c := range cases { + t.Logf("case %d", i) + + var exp []*bucketBlock + for _, b := range c.res { + var m metadata.Meta + m.Thanos.Downsample.Resolution = b.window + m.MinTime = b.mint + m.MaxTime = b.maxt + exp = append(exp, &bucketBlock{meta: &m}) + } + res := set.getFor(c.mint, c.maxt, c.minResolution) + testutil.Equals(t, exp, res) + } +} + func TestBucketBlockSet_addGet(t *testing.T) { defer leaktest.CheckTimeout(t, 10*time.Second)() From cd3363cfca26a9db80aceeecad57182e60b8557a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Wed, 15 May 2019 17:41:28 +0300 Subject: [PATCH 02/23] store/bucket: include blocks in the middle as well --- pkg/store/bucket.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 3823d84225..950925c053 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1022,6 +1022,8 @@ func (s *bucketBlockSet) getFor(mint, maxt, minResolution int64) (bs []*bucketBl } // Our current resolution might not cover all data, recursively fill the gaps at the start // and end of [mint, maxt] with higher resolution blocks. + // + // Plus, fill the possible gaps between the current blocks with higher resolution blocks. i++ // No higher resolution left, we are done. if i >= len(s.resolutions) { @@ -1030,10 +1032,26 @@ func (s *bucketBlockSet) getFor(mint, maxt, minResolution int64) (bs []*bucketBl if len(bs) == 0 { return s.getFor(mint, maxt, s.resolutions[i]) } + middle := []*bucketBlock{} + for bsi := 0; bsi < len(bs)-1; bsi++ { + if bs[bsi+1].meta.MinTime-bs[bsi].meta.MaxTime > 0 { + middle = append(middle, s.getFor(bs[bsi].meta.MaxTime, bs[bsi+1].meta.MinTime, s.resolutions[i])...) + } + } + left := s.getFor(mint, bs[0].meta.MinTime, s.resolutions[i]) right := s.getFor(bs[len(bs)-1].meta.MaxTime, maxt, s.resolutions[i]) - return append(left, append(bs, right...)...) + result := append(middle, append(left, append(bs, right...)...)...) + + // Sort the result just one more time since it might be out of order. + sort.Slice(result, func(j, k int) bool { + if result[j].meta.MinTime < result[k].meta.MinTime { + return true + } + return false + }) + return result } // labelMatchers verifies whether the block set matches the given matchers and returns a new From 00b2c7c29e9420443dea52988596c302cf71b33e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 16 May 2019 09:53:54 +0300 Subject: [PATCH 03/23] store/bucket: add test cases with duplicated time ranges --- pkg/store/bucket.go | 2 +- pkg/store/bucket_test.go | 76 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 950925c053..af4d562625 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1042,7 +1042,7 @@ func (s *bucketBlockSet) getFor(mint, maxt, minResolution int64) (bs []*bucketBl left := s.getFor(mint, bs[0].meta.MinTime, s.resolutions[i]) right := s.getFor(bs[len(bs)-1].meta.MaxTime, maxt, s.resolutions[i]) - result := append(middle, append(left, append(bs, right...)...)...) + result := append(left, append(middle, append(bs, right...)...)...) // Sort the result just one more time since it might be out of order. sort.Slice(result, func(j, k int) bool { diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index bc50953770..03d08952ba 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -16,6 +16,72 @@ import ( "github.com/prometheus/tsdb/labels" ) +// TestBucketBlockSet with blocks which have the same time range +// but different resolutions. +func TestBucketBlockSet_Duplicated(t *testing.T) { + defer leaktest.CheckTimeout(t, 10*time.Second)() + + set := newBucketBlockSet(labels.Labels{}) + + type resBlock struct { + mint, maxt int64 + window int64 + } + input := []resBlock{ + {window: downsample.ResLevel2, mint: 0, maxt: 100}, + {window: downsample.ResLevel0, mint: 0, maxt: 100}, + {window: downsample.ResLevel1, mint: 0, maxt: 100}, + {window: downsample.ResLevel0, mint: 100, maxt: 200}, + {window: downsample.ResLevel1, mint: 100, maxt: 200}, + {window: downsample.ResLevel2, mint: 100, maxt: 200}, + {window: downsample.ResLevel2, mint: 200, maxt: 300}, + {window: downsample.ResLevel1, mint: 200, maxt: 300}, + } + + for _, in := range input { + var m metadata.Meta + m.Thanos.Downsample.Resolution = in.window + m.MinTime = in.mint + m.MaxTime = in.maxt + + testutil.Ok(t, set.add(&bucketBlock{meta: &m})) + } + + cases := []struct { + mint, maxt int64 + minResolution int64 + res []resBlock + }{ + { + mint: 0, + maxt: 300, + minResolution: downsample.ResLevel2, + res: []resBlock{ + {window: downsample.ResLevel2, mint: 0, maxt: 100}, + {window: downsample.ResLevel2, mint: 100, maxt: 200}, + {window: downsample.ResLevel2, mint: 200, maxt: 300}, + }, + }, + } + + for i, c := range cases { + t.Logf("case %d", i) + + var exp []*bucketBlock + for _, b := range c.res { + var m metadata.Meta + m.Thanos.Downsample.Resolution = b.window + m.MinTime = b.mint + m.MaxTime = b.maxt + exp = append(exp, &bucketBlock{meta: &m}) + } + res := set.getFor(c.mint, c.maxt, c.minResolution) + testutil.Equals(t, exp, res) + } +} + +// TestBucketBlockSet with blocks with different resolutions +// that interleave between each other. func TestBucketBlockSet_Interleaved(t *testing.T) { defer leaktest.CheckTimeout(t, 10*time.Second)() @@ -65,6 +131,16 @@ func TestBucketBlockSet_Interleaved(t *testing.T) { {window: downsample.ResLevel0, mint: 600, maxt: 700}, }, }, + { + mint: 100, + maxt: 400, + minResolution: downsample.ResLevel2, + res: []resBlock{ + {window: downsample.ResLevel2, mint: 100, maxt: 200}, + {window: downsample.ResLevel1, mint: 200, maxt: 300}, + {window: downsample.ResLevel1, mint: 300, maxt: 400}, + }, + }, } for i, c := range cases { From 8f225045be693180fd3678514b46c50fb9248970 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 16 May 2019 11:05:24 +0300 Subject: [PATCH 04/23] query/querier: send proper maxSourceResolution Without this, we get max resolutions like 1, 2, 3 which do not mean anything to getFor(). With this, we get proper data from Thanos Store. --- pkg/query/querier.go | 2 +- pkg/store/bucket.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/query/querier.go b/pkg/query/querier.go index 07dc3ec71b..22b94cd8e5 100644 --- a/pkg/query/querier.go +++ b/pkg/query/querier.go @@ -55,7 +55,7 @@ type queryable struct { // Querier returns a new storage querier against the underlying proxy store API. func (q *queryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxSourceResolution/time.Millisecond), q.partialResponse, q.warningReporter), nil + return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxSourceResolution), q.partialResponse, q.warningReporter), nil } type querier struct { diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index af4d562625..47bfe6f40a 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -675,7 +675,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag // labels and resolution. This is important because we allow mixed resolution results, so it is quite crucial // to be aware what exactly resolution we see on query. // TODO(bplotka): Consider adding resolution label to all results to propagate that info to UI and Query API. -func debugFoundBlockSetOverview(logger log.Logger, mint, maxt int64, lset labels.Labels, bs []*bucketBlock) { +func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxresolution int64, lset labels.Labels, bs []*bucketBlock) { if len(bs) == 0 { level.Debug(logger).Log("msg", "No block found", "mint", mint, "maxt", maxt, "lset", lset.String()) return @@ -703,7 +703,7 @@ func debugFoundBlockSetOverview(logger log.Logger, mint, maxt int64, lset labels parts = append(parts, fmt.Sprintf("Range: %d-%d Resolution: %d", currMin, currMax, currRes)) - level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) + level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "maxresolution", maxresolution, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) } // Series implements the storepb.StoreServer interface. @@ -738,7 +738,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie blocks := bs.getFor(req.MinTime, req.MaxTime, req.MaxResolutionWindow) if s.debugLogging { - debugFoundBlockSetOverview(s.logger, req.MinTime, req.MaxTime, bs.labels, blocks) + debugFoundBlockSetOverview(s.logger, req.MinTime, req.MaxTime, req.MaxResolutionWindow, bs.labels, blocks) } for _, b := range blocks { From e2c46bfabf4f5bea130727dce68b49513fd079e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 16 May 2019 11:18:08 +0300 Subject: [PATCH 05/23] README: add entry --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 058003b6b0..9ad2dead2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ We use *breaking* word for marking changes that are not backward compatible (rel ### Fixed - [#1144](https://github.com/improbable-eng/thanos/pull/1144) Query/API: properly pass the downsampling parameter. Before this, wrong max resolution of the metrics data might have been selected. +- [#1146](https://github.com/improbable-eng/thanos/pull/1146) store/bucket: make getFor() work with interleaved resolutions, pass proper resolution from querier. ## [v0.4.0](https://github.com/improbable-eng/thanos/releases/tag/v0.4.0) - 2019.05.3 From 342aede1ed7b746d51015f9cde042bf0ee31efd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 16 May 2019 11:55:51 +0300 Subject: [PATCH 06/23] query/querier_test: add queryableCreator test Makes a querier via queryableCreator and checks if the maxSourceResolution was passed properly. --- pkg/query/querier_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index 980d837213..bb61bc6483 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -19,6 +19,24 @@ import ( "github.com/prometheus/tsdb/chunkenc" ) +func TestQueryableCreator_MaxResolution(t *testing.T) { + defer leaktest.CheckTimeout(t, 10*time.Second)() + testProxy := &storeServer{resps: []*storepb.SeriesResponse{}} + queryableCreator := NewQueryableCreator(nil, testProxy, "test") + + queryable := queryableCreator(false, 1*time.Hour, false, func(err error) {}) + + q, err := queryable.Querier(context.Background(), 0, 42) + testutil.Ok(t, err) + defer func() { testutil.Ok(t, q.Close()) }() + + querierActual, ok := q.(*querier) + + testutil.Assert(t, ok == true, "expected it to be a querier") + testutil.Assert(t, querierActual.maxSourceResolution == int64(1*time.Hour), "expected max source resolution to be 1 hour") + +} + func TestQuerier_Series(t *testing.T) { defer leaktest.CheckTimeout(t, 10*time.Second)() From 4e73b2fa8ee7168d2dbff7268a406caf756ded9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 16 May 2019 15:57:50 +0300 Subject: [PATCH 07/23] store/bucket: do the iteration without sorting --- pkg/store/bucket.go | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 47bfe6f40a..196bc2e2cf 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1032,26 +1032,23 @@ func (s *bucketBlockSet) getFor(mint, maxt, minResolution int64) (bs []*bucketBl if len(bs) == 0 { return s.getFor(mint, maxt, s.resolutions[i]) } - middle := []*bucketBlock{} - for bsi := 0; bsi < len(bs)-1; bsi++ { + + until := len(bs) - 1 + for bsi := 0; bsi < until; bsi++ { if bs[bsi+1].meta.MinTime-bs[bsi].meta.MaxTime > 0 { - middle = append(middle, s.getFor(bs[bsi].meta.MaxTime, bs[bsi+1].meta.MinTime, s.resolutions[i])...) + between := s.getFor(bs[bsi].meta.MaxTime, bs[bsi+1].meta.MinTime, s.resolutions[i]) + bs = append(bs[:bsi+1], append(between, bs[bsi+1:]...)...) + + // Push the iterators further. + bsi += len(between) + until += len(between) } } left := s.getFor(mint, bs[0].meta.MinTime, s.resolutions[i]) right := s.getFor(bs[len(bs)-1].meta.MaxTime, maxt, s.resolutions[i]) - result := append(left, append(middle, append(bs, right...)...)...) - - // Sort the result just one more time since it might be out of order. - sort.Slice(result, func(j, k int) bool { - if result[j].meta.MinTime < result[k].meta.MinTime { - return true - } - return false - }) - return result + return append(left, append(bs, right...)...) } // labelMatchers verifies whether the block set matches the given matchers and returns a new From fae1929018f3759041139313b2405ae3b0301bbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 16 May 2019 16:42:57 +0300 Subject: [PATCH 08/23] store/bucket: bsi->j in loop Makes it clearer that it's just a temporary variable for the loop. --- pkg/store/bucket.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 196bc2e2cf..1e9e75c3e1 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1034,13 +1034,13 @@ func (s *bucketBlockSet) getFor(mint, maxt, minResolution int64) (bs []*bucketBl } until := len(bs) - 1 - for bsi := 0; bsi < until; bsi++ { - if bs[bsi+1].meta.MinTime-bs[bsi].meta.MaxTime > 0 { - between := s.getFor(bs[bsi].meta.MaxTime, bs[bsi+1].meta.MinTime, s.resolutions[i]) - bs = append(bs[:bsi+1], append(between, bs[bsi+1:]...)...) + for j := 0; j < until; j++ { + if bs[j+1].meta.MinTime-bs[j].meta.MaxTime > 0 { + between := s.getFor(bs[j].meta.MaxTime, bs[j+1].meta.MinTime, s.resolutions[i]) + bs = append(bs[:j+1], append(between, bs[j+1:]...)...) // Push the iterators further. - bsi += len(between) + j += len(between) until += len(between) } } From de4b79a2b5477261e9f21edcf501579888d6cee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 11:56:25 +0300 Subject: [PATCH 09/23] store/bucket: fix according to review comments * Convert parseDownsamplingParam() into parseDownsamplingParamMillis() which properly returns int64 for use in the querier code * Add parseDownsamplingMillis() tests --- pkg/query/api/v1.go | 8 ++--- pkg/query/api/v1_test.go | 71 +++++++++++++++++++++++++++++++++++++++- pkg/query/querier.go | 8 ++--- pkg/store/bucket.go | 4 +-- 4 files changed, 79 insertions(+), 12 deletions(-) diff --git a/pkg/query/api/v1.go b/pkg/query/api/v1.go index 58f6b2df2e..972b88a7e9 100644 --- a/pkg/query/api/v1.go +++ b/pkg/query/api/v1.go @@ -203,9 +203,9 @@ func (api *API) parseEnableDedupParam(r *http.Request) (enableDeduplication bool return enableDeduplication, nil } -func (api *API) parseDownsamplingParam(r *http.Request, step time.Duration) (maxSourceResolution time.Duration, _ *ApiError) { +func (api *API) parseDownsamplingParamMillis(r *http.Request, step time.Duration) (maxSourceResolutionMillis int64, _ *ApiError) { const maxSourceResolutionParam = "max_source_resolution" - maxSourceResolution = 0 * time.Second + maxSourceResolution := 0 * time.Second if api.enableAutodownsampling { // If no max_source_resolution is specified fit at least 5 samples between steps. @@ -223,7 +223,7 @@ func (api *API) parseDownsamplingParam(r *http.Request, step time.Duration) (max return 0, &ApiError{errorBadData, errors.Errorf("negative '%s' is not accepted. Try a positive integer", maxSourceResolutionParam)} } - return maxSourceResolution, nil + return int64(maxSourceResolution / (1000 * 1000)), nil } func (api *API) parsePartialResponseParam(r *http.Request) (enablePartialResponse bool, _ *ApiError) { @@ -366,7 +366,7 @@ func (api *API) queryRange(r *http.Request) (interface{}, []error, *ApiError) { return nil, nil, apiErr } - maxSourceResolution, apiErr := api.parseDownsamplingParam(r, step) + maxSourceResolution, apiErr := api.parseDownsamplingParamMillis(r, step) if apiErr != nil { return nil, nil, apiErr } diff --git a/pkg/query/api/v1_test.go b/pkg/query/api/v1_test.go index 10b1792e2d..7b36a4f912 100644 --- a/pkg/query/api/v1_test.go +++ b/pkg/query/api/v1_test.go @@ -30,6 +30,7 @@ import ( "time" "github.com/go-kit/kit/log" + "github.com/improbable-eng/thanos/pkg/compact" "github.com/improbable-eng/thanos/pkg/query" "github.com/improbable-eng/thanos/pkg/testutil" opentracing "github.com/opentracing/opentracing-go" @@ -42,7 +43,7 @@ import ( ) func testQueryableCreator(queryable storage.Queryable) query.QueryableCreator { - return func(_ bool, _ time.Duration, _ bool, _ query.WarningReporter) storage.Queryable { + return func(_ bool, _ int64, _ bool, _ query.WarningReporter) storage.Queryable { return queryable } } @@ -833,3 +834,71 @@ func BenchmarkQueryResultEncoding(b *testing.B) { testutil.Ok(b, err) fmt.Println(len(c)) } + +func TestParseDownsamplingParamMillis(t *testing.T) { + var tests = []struct { + maxSourceResolution string + result int64 + step time.Duration + fail bool + enableAutodownsampling bool + }{ + { + maxSourceResolution: "0s", + enableAutodownsampling: false, + step: time.Hour, + result: int64(compact.ResolutionLevelRaw), + fail: false, + }, + { + maxSourceResolution: "5m", + step: time.Hour, + enableAutodownsampling: false, + result: int64(compact.ResolutionLevel5m), + fail: false, + }, + { + maxSourceResolution: "1h", + step: time.Hour, + enableAutodownsampling: false, + result: int64(compact.ResolutionLevel1h), + fail: false, + }, + { + maxSourceResolution: "", + enableAutodownsampling: true, + step: time.Hour, + result: int64(time.Hour / (5 * 1000 * 1000)), + fail: false, + }, + { + maxSourceResolution: "", + enableAutodownsampling: true, + step: time.Hour, + result: int64((1 * time.Hour) / 6), + fail: true, + }, + { + maxSourceResolution: "", + enableAutodownsampling: true, + step: time.Hour, + result: int64((1 * time.Hour) / 6), + fail: true, + }, + } + + for i, test := range tests { + api := API{enableAutodownsampling: test.enableAutodownsampling} + v := url.Values{} + v.Set("max_source_resolution", test.maxSourceResolution) + r := http.Request{PostForm: v} + + maxSourceRes, _ := api.parseDownsamplingParamMillis(&r, test.step) + if test.fail == false { + testutil.Assert(t, maxSourceRes == test.result, "case %v: expected %v to be equal to %v", i, maxSourceRes, test.result) + } else { + testutil.Assert(t, maxSourceRes != test.result, "case %v: expected %v not to be equal to %v", i, maxSourceRes, test.result) + } + + } +} diff --git a/pkg/query/querier.go b/pkg/query/querier.go index 22b94cd8e5..917e29780d 100644 --- a/pkg/query/querier.go +++ b/pkg/query/querier.go @@ -5,8 +5,6 @@ import ( "sort" "strings" - "time" - "github.com/go-kit/kit/log" "github.com/improbable-eng/thanos/pkg/store/storepb" "github.com/improbable-eng/thanos/pkg/tracing" @@ -26,11 +24,11 @@ type WarningReporter func(error) // If deduplication is enabled, all data retrieved from it will be deduplicated along the replicaLabel by default. // maxSourceResolution controls downsampling resolution that is allowed. // partialResponse controls `partialResponseDisabled` option of StoreAPI and partial response behaviour of proxy. -type QueryableCreator func(deduplicate bool, maxSourceResolution time.Duration, partialResponse bool, r WarningReporter) storage.Queryable +type QueryableCreator func(deduplicate bool, maxSourceResolution int64, partialResponse bool, r WarningReporter) storage.Queryable // NewQueryableCreator creates QueryableCreator. func NewQueryableCreator(logger log.Logger, proxy storepb.StoreServer, replicaLabel string) QueryableCreator { - return func(deduplicate bool, maxSourceResolution time.Duration, partialResponse bool, r WarningReporter) storage.Queryable { + return func(deduplicate bool, maxSourceResolution int64, partialResponse bool, r WarningReporter) storage.Queryable { return &queryable{ logger: logger, replicaLabel: replicaLabel, @@ -48,7 +46,7 @@ type queryable struct { replicaLabel string proxy storepb.StoreServer deduplicate bool - maxSourceResolution time.Duration + maxSourceResolution int64 partialResponse bool warningReporter WarningReporter } diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 1e9e75c3e1..b8f6ec785f 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -675,7 +675,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag // labels and resolution. This is important because we allow mixed resolution results, so it is quite crucial // to be aware what exactly resolution we see on query. // TODO(bplotka): Consider adding resolution label to all results to propagate that info to UI and Query API. -func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxresolution int64, lset labels.Labels, bs []*bucketBlock) { +func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxResolution int64, lset labels.Labels, bs []*bucketBlock) { if len(bs) == 0 { level.Debug(logger).Log("msg", "No block found", "mint", mint, "maxt", maxt, "lset", lset.String()) return @@ -703,7 +703,7 @@ func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxresolution int parts = append(parts, fmt.Sprintf("Range: %d-%d Resolution: %d", currMin, currMax, currRes)) - level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "maxresolution", maxresolution, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) + level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "Maximum Resolution", maxResolution, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) } // Series implements the storepb.StoreServer interface. From c3abb09a58ded2b27493c5329dd47a75b42bddf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 12:05:20 +0300 Subject: [PATCH 10/23] query/querier_test: fix test --- pkg/query/querier_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index bb61bc6483..29e9a4ca58 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -24,7 +24,8 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { testProxy := &storeServer{resps: []*storepb.SeriesResponse{}} queryableCreator := NewQueryableCreator(nil, testProxy, "test") - queryable := queryableCreator(false, 1*time.Hour, false, func(err error) {}) + oneHourMillis := int64(1*time.Hour) / (1000 * 1000) + queryable := queryableCreator(false, oneHourMillis, false, func(err error) {}) q, err := queryable.Querier(context.Background(), 0, 42) testutil.Ok(t, err) @@ -33,7 +34,7 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { querierActual, ok := q.(*querier) testutil.Assert(t, ok == true, "expected it to be a querier") - testutil.Assert(t, querierActual.maxSourceResolution == int64(1*time.Hour), "expected max source resolution to be 1 hour") + testutil.Assert(t, querierActual.maxSourceResolution == oneHourMillis, "expected max source resolution to be 1 hour in milliseconds") } From f22cffaa6d16b382df2d827fe52b23dc9c3f6625 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 13:38:49 +0300 Subject: [PATCH 11/23] *: clarify everywhere that max source resolution is in millis --- pkg/query/api/v1.go | 2 +- pkg/query/querier.go | 82 +++++++++++++++++++-------------------- pkg/query/querier_test.go | 2 +- pkg/store/bucket.go | 10 ++--- 4 files changed, 48 insertions(+), 48 deletions(-) diff --git a/pkg/query/api/v1.go b/pkg/query/api/v1.go index 972b88a7e9..d5095f97f0 100644 --- a/pkg/query/api/v1.go +++ b/pkg/query/api/v1.go @@ -223,7 +223,7 @@ func (api *API) parseDownsamplingParamMillis(r *http.Request, step time.Duration return 0, &ApiError{errorBadData, errors.Errorf("negative '%s' is not accepted. Try a positive integer", maxSourceResolutionParam)} } - return int64(maxSourceResolution / (1000 * 1000)), nil + return int64(maxSourceResolution / time.Millisecond), nil } func (api *API) parsePartialResponseParam(r *http.Request) (enablePartialResponse bool, _ *ApiError) { diff --git a/pkg/query/querier.go b/pkg/query/querier.go index 917e29780d..305eea0f10 100644 --- a/pkg/query/querier.go +++ b/pkg/query/querier.go @@ -22,51 +22,51 @@ type WarningReporter func(error) // QueryableCreator returns implementation of promql.Queryable that fetches data from the proxy store API endpoints. // If deduplication is enabled, all data retrieved from it will be deduplicated along the replicaLabel by default. -// maxSourceResolution controls downsampling resolution that is allowed. +// maxSourceResolutionMillis controls downsampling resolution that is allowed (specified in milliseconds). // partialResponse controls `partialResponseDisabled` option of StoreAPI and partial response behaviour of proxy. -type QueryableCreator func(deduplicate bool, maxSourceResolution int64, partialResponse bool, r WarningReporter) storage.Queryable +type QueryableCreator func(deduplicate bool, maxSourceResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable // NewQueryableCreator creates QueryableCreator. func NewQueryableCreator(logger log.Logger, proxy storepb.StoreServer, replicaLabel string) QueryableCreator { - return func(deduplicate bool, maxSourceResolution int64, partialResponse bool, r WarningReporter) storage.Queryable { + return func(deduplicate bool, maxSourceResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable { return &queryable{ - logger: logger, - replicaLabel: replicaLabel, - proxy: proxy, - deduplicate: deduplicate, - maxSourceResolution: maxSourceResolution, - partialResponse: partialResponse, - warningReporter: r, + logger: logger, + replicaLabel: replicaLabel, + proxy: proxy, + deduplicate: deduplicate, + maxSourceResolutionMillis: maxSourceResolutionMillis, + partialResponse: partialResponse, + warningReporter: r, } } } type queryable struct { - logger log.Logger - replicaLabel string - proxy storepb.StoreServer - deduplicate bool - maxSourceResolution int64 - partialResponse bool - warningReporter WarningReporter + logger log.Logger + replicaLabel string + proxy storepb.StoreServer + deduplicate bool + maxSourceResolutionMillis int64 + partialResponse bool + warningReporter WarningReporter } // Querier returns a new storage querier against the underlying proxy store API. func (q *queryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxSourceResolution), q.partialResponse, q.warningReporter), nil + return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxSourceResolutionMillis), q.partialResponse, q.warningReporter), nil } type querier struct { - ctx context.Context - logger log.Logger - cancel func() - mint, maxt int64 - replicaLabel string - proxy storepb.StoreServer - deduplicate bool - maxSourceResolution int64 - partialResponse bool - warningReporter WarningReporter + ctx context.Context + logger log.Logger + cancel func() + mint, maxt int64 + replicaLabel string + proxy storepb.StoreServer + deduplicate bool + maxSourceResolutionMillis int64 + partialResponse bool + warningReporter WarningReporter } // newQuerier creates implementation of storage.Querier that fetches data from the proxy @@ -78,7 +78,7 @@ func newQuerier( replicaLabel string, proxy storepb.StoreServer, deduplicate bool, - maxSourceResolution int64, + maxSourceResolutionMillis int64, partialResponse bool, warningReporter WarningReporter, ) *querier { @@ -90,17 +90,17 @@ func newQuerier( } ctx, cancel := context.WithCancel(ctx) return &querier{ - ctx: ctx, - logger: logger, - cancel: cancel, - mint: mint, - maxt: maxt, - replicaLabel: replicaLabel, - proxy: proxy, - deduplicate: deduplicate, - maxSourceResolution: maxSourceResolution, - partialResponse: partialResponse, - warningReporter: warningReporter, + ctx: ctx, + logger: logger, + cancel: cancel, + mint: mint, + maxt: maxt, + replicaLabel: replicaLabel, + proxy: proxy, + deduplicate: deduplicate, + maxSourceResolutionMillis: maxSourceResolutionMillis, + partialResponse: partialResponse, + warningReporter: warningReporter, } } @@ -183,7 +183,7 @@ func (q *querier) Select(params *storage.SelectParams, ms ...*labels.Matcher) (s MinTime: q.mint, MaxTime: q.maxt, Matchers: sms, - MaxResolutionWindow: q.maxSourceResolution, + MaxResolutionWindow: q.maxSourceResolutionMillis, Aggregates: queryAggrs, PartialResponseDisabled: !q.partialResponse, }, resp); err != nil { diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index 29e9a4ca58..f17a4e7cdc 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -34,7 +34,7 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { querierActual, ok := q.(*querier) testutil.Assert(t, ok == true, "expected it to be a querier") - testutil.Assert(t, querierActual.maxSourceResolution == oneHourMillis, "expected max source resolution to be 1 hour in milliseconds") + testutil.Assert(t, querierActual.maxSourceResolutionMillis == oneHourMillis, "expected max source resolution to be 1 hour in milliseconds") } diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index b8f6ec785f..d4a199dedf 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -675,7 +675,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag // labels and resolution. This is important because we allow mixed resolution results, so it is quite crucial // to be aware what exactly resolution we see on query. // TODO(bplotka): Consider adding resolution label to all results to propagate that info to UI and Query API. -func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxResolution int64, lset labels.Labels, bs []*bucketBlock) { +func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxSourceResolutionMillis int64, lset labels.Labels, bs []*bucketBlock) { if len(bs) == 0 { level.Debug(logger).Log("msg", "No block found", "mint", mint, "maxt", maxt, "lset", lset.String()) return @@ -703,7 +703,7 @@ func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxResolution int parts = append(parts, fmt.Sprintf("Range: %d-%d Resolution: %d", currMin, currMax, currRes)) - level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "Maximum Resolution", maxResolution, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) + level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "Maximum Resolution", maxSourceResolutionMillis, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) } // Series implements the storepb.StoreServer interface. @@ -934,7 +934,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR type bucketBlockSet struct { labels labels.Labels mtx sync.RWMutex - resolutions []int64 // available resolution, high to low + resolutions []int64 // available resolution, high to low (in milliseconds) blocks [][]*bucketBlock // ordered buckets for the existing resolutions } @@ -997,7 +997,7 @@ func int64index(s []int64, x int64) int { // getFor returns a time-ordered list of blocks that cover date between mint and maxt. // Blocks with the lowest resolution possible but not lower than the given resolution are returned. -func (s *bucketBlockSet) getFor(mint, maxt, minResolution int64) (bs []*bucketBlock) { +func (s *bucketBlockSet) getFor(mint, maxt, maxSourceResolutionMillis int64) (bs []*bucketBlock) { if mint == maxt { return nil } @@ -1007,7 +1007,7 @@ func (s *bucketBlockSet) getFor(mint, maxt, minResolution int64) (bs []*bucketBl // Find first matching resolution. i := 0 - for ; i < len(s.resolutions) && s.resolutions[i] > minResolution; i++ { + for ; i < len(s.resolutions) && s.resolutions[i] > maxSourceResolutionMillis; i++ { } // Base case, we fill the given interval with the closest resolution. From 5e9d0e9bf02ca5ab711b9be13c05b9d1ce490c64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 13:46:28 +0300 Subject: [PATCH 12/23] *: maxSourceResolutionMillis -> maxResolutionMillis --- pkg/query/api/v1.go | 2 +- pkg/query/api/v1_test.go | 78 +++++++++++++++++++------------------- pkg/query/querier.go | 82 ++++++++++++++++++++-------------------- pkg/store/bucket.go | 8 ++-- 4 files changed, 85 insertions(+), 85 deletions(-) diff --git a/pkg/query/api/v1.go b/pkg/query/api/v1.go index d5095f97f0..8c85bef1b0 100644 --- a/pkg/query/api/v1.go +++ b/pkg/query/api/v1.go @@ -203,7 +203,7 @@ func (api *API) parseEnableDedupParam(r *http.Request) (enableDeduplication bool return enableDeduplication, nil } -func (api *API) parseDownsamplingParamMillis(r *http.Request, step time.Duration) (maxSourceResolutionMillis int64, _ *ApiError) { +func (api *API) parseDownsamplingParamMillis(r *http.Request, step time.Duration) (maxResolutionMillis int64, _ *ApiError) { const maxSourceResolutionParam = "max_source_resolution" maxSourceResolution := 0 * time.Second diff --git a/pkg/query/api/v1_test.go b/pkg/query/api/v1_test.go index 7b36a4f912..696778ce85 100644 --- a/pkg/query/api/v1_test.go +++ b/pkg/query/api/v1_test.go @@ -837,67 +837,67 @@ func BenchmarkQueryResultEncoding(b *testing.B) { func TestParseDownsamplingParamMillis(t *testing.T) { var tests = []struct { - maxSourceResolution string - result int64 - step time.Duration - fail bool - enableAutodownsampling bool + maxSourceResolutionParam string + result int64 + step time.Duration + fail bool + enableAutodownsampling bool }{ { - maxSourceResolution: "0s", - enableAutodownsampling: false, - step: time.Hour, - result: int64(compact.ResolutionLevelRaw), - fail: false, + maxSourceResolutionParam: "0s", + enableAutodownsampling: false, + step: time.Hour, + result: int64(compact.ResolutionLevelRaw), + fail: false, }, { - maxSourceResolution: "5m", - step: time.Hour, - enableAutodownsampling: false, - result: int64(compact.ResolutionLevel5m), - fail: false, + maxSourceResolutionParam: "5m", + step: time.Hour, + enableAutodownsampling: false, + result: int64(compact.ResolutionLevel5m), + fail: false, }, { - maxSourceResolution: "1h", - step: time.Hour, - enableAutodownsampling: false, - result: int64(compact.ResolutionLevel1h), - fail: false, + maxSourceResolutionParam: "1h", + step: time.Hour, + enableAutodownsampling: false, + result: int64(compact.ResolutionLevel1h), + fail: false, }, { - maxSourceResolution: "", - enableAutodownsampling: true, - step: time.Hour, - result: int64(time.Hour / (5 * 1000 * 1000)), - fail: false, + maxSourceResolutionParam: "", + enableAutodownsampling: true, + step: time.Hour, + result: int64(time.Hour / (5 * 1000 * 1000)), + fail: false, }, { - maxSourceResolution: "", - enableAutodownsampling: true, - step: time.Hour, - result: int64((1 * time.Hour) / 6), - fail: true, + maxSourceResolutionParam: "", + enableAutodownsampling: true, + step: time.Hour, + result: int64((1 * time.Hour) / 6), + fail: true, }, { - maxSourceResolution: "", - enableAutodownsampling: true, - step: time.Hour, - result: int64((1 * time.Hour) / 6), - fail: true, + maxSourceResolutionParam: "", + enableAutodownsampling: true, + step: time.Hour, + result: int64((1 * time.Hour) / 6), + fail: true, }, } for i, test := range tests { api := API{enableAutodownsampling: test.enableAutodownsampling} v := url.Values{} - v.Set("max_source_resolution", test.maxSourceResolution) + v.Set("max_source_resolution", test.maxSourceResolutionParam) r := http.Request{PostForm: v} - maxSourceRes, _ := api.parseDownsamplingParamMillis(&r, test.step) + maxResMillis, _ := api.parseDownsamplingParamMillis(&r, test.step) if test.fail == false { - testutil.Assert(t, maxSourceRes == test.result, "case %v: expected %v to be equal to %v", i, maxSourceRes, test.result) + testutil.Assert(t, maxResMillis == test.result, "case %v: expected %v to be equal to %v", i, maxResMillis, test.result) } else { - testutil.Assert(t, maxSourceRes != test.result, "case %v: expected %v not to be equal to %v", i, maxSourceRes, test.result) + testutil.Assert(t, maxResMillis != test.result, "case %v: expected %v not to be equal to %v", i, maxResMillis, test.result) } } diff --git a/pkg/query/querier.go b/pkg/query/querier.go index 305eea0f10..5fcd26ee97 100644 --- a/pkg/query/querier.go +++ b/pkg/query/querier.go @@ -22,51 +22,51 @@ type WarningReporter func(error) // QueryableCreator returns implementation of promql.Queryable that fetches data from the proxy store API endpoints. // If deduplication is enabled, all data retrieved from it will be deduplicated along the replicaLabel by default. -// maxSourceResolutionMillis controls downsampling resolution that is allowed (specified in milliseconds). +// maxResolutionMillis controls downsampling resolution that is allowed (specified in milliseconds). // partialResponse controls `partialResponseDisabled` option of StoreAPI and partial response behaviour of proxy. -type QueryableCreator func(deduplicate bool, maxSourceResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable +type QueryableCreator func(deduplicate bool, maxResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable // NewQueryableCreator creates QueryableCreator. func NewQueryableCreator(logger log.Logger, proxy storepb.StoreServer, replicaLabel string) QueryableCreator { - return func(deduplicate bool, maxSourceResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable { + return func(deduplicate bool, maxResolutionMillis int64, partialResponse bool, r WarningReporter) storage.Queryable { return &queryable{ - logger: logger, - replicaLabel: replicaLabel, - proxy: proxy, - deduplicate: deduplicate, - maxSourceResolutionMillis: maxSourceResolutionMillis, - partialResponse: partialResponse, - warningReporter: r, + logger: logger, + replicaLabel: replicaLabel, + proxy: proxy, + deduplicate: deduplicate, + maxResolutionMillis: maxResolutionMillis, + partialResponse: partialResponse, + warningReporter: r, } } } type queryable struct { - logger log.Logger - replicaLabel string - proxy storepb.StoreServer - deduplicate bool - maxSourceResolutionMillis int64 - partialResponse bool - warningReporter WarningReporter + logger log.Logger + replicaLabel string + proxy storepb.StoreServer + deduplicate bool + maxResolutionMillis int64 + partialResponse bool + warningReporter WarningReporter } // Querier returns a new storage querier against the underlying proxy store API. func (q *queryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxSourceResolutionMillis), q.partialResponse, q.warningReporter), nil + return newQuerier(ctx, q.logger, mint, maxt, q.replicaLabel, q.proxy, q.deduplicate, int64(q.maxResolutionMillis), q.partialResponse, q.warningReporter), nil } type querier struct { - ctx context.Context - logger log.Logger - cancel func() - mint, maxt int64 - replicaLabel string - proxy storepb.StoreServer - deduplicate bool - maxSourceResolutionMillis int64 - partialResponse bool - warningReporter WarningReporter + ctx context.Context + logger log.Logger + cancel func() + mint, maxt int64 + replicaLabel string + proxy storepb.StoreServer + deduplicate bool + maxResolutionMillis int64 + partialResponse bool + warningReporter WarningReporter } // newQuerier creates implementation of storage.Querier that fetches data from the proxy @@ -78,7 +78,7 @@ func newQuerier( replicaLabel string, proxy storepb.StoreServer, deduplicate bool, - maxSourceResolutionMillis int64, + maxResolutionMillis int64, partialResponse bool, warningReporter WarningReporter, ) *querier { @@ -90,17 +90,17 @@ func newQuerier( } ctx, cancel := context.WithCancel(ctx) return &querier{ - ctx: ctx, - logger: logger, - cancel: cancel, - mint: mint, - maxt: maxt, - replicaLabel: replicaLabel, - proxy: proxy, - deduplicate: deduplicate, - maxSourceResolutionMillis: maxSourceResolutionMillis, - partialResponse: partialResponse, - warningReporter: warningReporter, + ctx: ctx, + logger: logger, + cancel: cancel, + mint: mint, + maxt: maxt, + replicaLabel: replicaLabel, + proxy: proxy, + deduplicate: deduplicate, + maxResolutionMillis: maxResolutionMillis, + partialResponse: partialResponse, + warningReporter: warningReporter, } } @@ -183,7 +183,7 @@ func (q *querier) Select(params *storage.SelectParams, ms ...*labels.Matcher) (s MinTime: q.mint, MaxTime: q.maxt, Matchers: sms, - MaxResolutionWindow: q.maxSourceResolutionMillis, + MaxResolutionWindow: q.maxResolutionMillis, Aggregates: queryAggrs, PartialResponseDisabled: !q.partialResponse, }, resp); err != nil { diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index d4a199dedf..f1bc4aea89 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -675,7 +675,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag // labels and resolution. This is important because we allow mixed resolution results, so it is quite crucial // to be aware what exactly resolution we see on query. // TODO(bplotka): Consider adding resolution label to all results to propagate that info to UI and Query API. -func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxSourceResolutionMillis int64, lset labels.Labels, bs []*bucketBlock) { +func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxResolutionMillis int64, lset labels.Labels, bs []*bucketBlock) { if len(bs) == 0 { level.Debug(logger).Log("msg", "No block found", "mint", mint, "maxt", maxt, "lset", lset.String()) return @@ -703,7 +703,7 @@ func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxSourceResoluti parts = append(parts, fmt.Sprintf("Range: %d-%d Resolution: %d", currMin, currMax, currRes)) - level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "Maximum Resolution", maxSourceResolutionMillis, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) + level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "Maximum Resolution", maxResolutionMillis, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) } // Series implements the storepb.StoreServer interface. @@ -997,7 +997,7 @@ func int64index(s []int64, x int64) int { // getFor returns a time-ordered list of blocks that cover date between mint and maxt. // Blocks with the lowest resolution possible but not lower than the given resolution are returned. -func (s *bucketBlockSet) getFor(mint, maxt, maxSourceResolutionMillis int64) (bs []*bucketBlock) { +func (s *bucketBlockSet) getFor(mint, maxt, maxResolutionMillis int64) (bs []*bucketBlock) { if mint == maxt { return nil } @@ -1007,7 +1007,7 @@ func (s *bucketBlockSet) getFor(mint, maxt, maxSourceResolutionMillis int64) (bs // Find first matching resolution. i := 0 - for ; i < len(s.resolutions) && s.resolutions[i] > maxSourceResolutionMillis; i++ { + for ; i < len(s.resolutions) && s.resolutions[i] > maxResolutionMillis; i++ { } // Base case, we fill the given interval with the closest resolution. From 420ebe04327071314543be08c2a2a3deff3ca4c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 13:49:04 +0300 Subject: [PATCH 13/23] CHANGELOG: update --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec859a4a8c..c46de23483 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ We use *breaking* word for marking changes that are not backward compatible (rel ### Fixed -- [#1146](https://github.com/improbable-eng/thanos/pull/1146) store/bucket: make getFor() work with interleaved resolutions, pass proper resolution from querier. +- [#1146](https://github.com/improbable-eng/thanos/pull/1146) store/bucket: make getFor() work with interleaved resolutions ### Added From e8b318958ea9c2c3a2a057d446e53ffa491b4042 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 13:49:52 +0300 Subject: [PATCH 14/23] query/querier_test: fix --- pkg/query/querier_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index f17a4e7cdc..79aa8247b9 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -34,7 +34,7 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { querierActual, ok := q.(*querier) testutil.Assert(t, ok == true, "expected it to be a querier") - testutil.Assert(t, querierActual.maxSourceResolutionMillis == oneHourMillis, "expected max source resolution to be 1 hour in milliseconds") + testutil.Assert(t, querierActual.maxResolutionMillis == oneHourMillis, "expected max source resolution to be 1 hour in milliseconds") } From bacfd0649ce111d70bd4524f3008a69d7042fcee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 17:36:40 +0300 Subject: [PATCH 15/23] store/bucket: add gets all data in range property test --- go.mod | 1 + go.sum | 2 ++ pkg/store/bucket.go | 2 +- pkg/store/bucket_test.go | 63 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 67 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 5ed4dedc09..46231cd576 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/memberlist v0.1.3 github.com/julienschmidt/httprouter v1.1.0 // indirect + github.com/leanovate/gopter v0.2.4 github.com/lovoo/gcloud-opentracing v0.3.0 github.com/miekg/dns v1.1.8 github.com/minio/minio-go v0.0.0-20200511070425-f33eae714a28 diff --git a/go.sum b/go.sum index 4b637c57d4..7fc481ed46 100644 --- a/go.sum +++ b/go.sum @@ -155,6 +155,8 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= +github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-go v0.15.6/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI= github.com/lovoo/gcloud-opentracing v0.3.0 h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDLnHp0urPLU= diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index f1bc4aea89..e6cb680147 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1012,7 +1012,7 @@ func (s *bucketBlockSet) getFor(mint, maxt, maxResolutionMillis int64) (bs []*bu // Base case, we fill the given interval with the closest resolution. for _, b := range s.blocks[i] { - if b.meta.MaxTime <= mint { + if b.meta.MinTime < mint && b.meta.MaxTime <= mint { continue } if b.meta.MinTime >= maxt { diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 03d08952ba..6d8d597dcc 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -12,10 +12,73 @@ import ( "github.com/improbable-eng/thanos/pkg/compact/downsample" "github.com/improbable-eng/thanos/pkg/store/storepb" "github.com/improbable-eng/thanos/pkg/testutil" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" "github.com/oklog/ulid" "github.com/prometheus/tsdb/labels" ) +func TestBucketBlock_Property(t *testing.T) { + parameters := gopter.DefaultTestParameters() + parameters.Rng.Seed(42) + parameters.MinSuccessfulTests = 5000 + properties := gopter.NewProperties(parameters) + + set := newBucketBlockSet(labels.Labels{}) + + type resBlock struct { + mint, maxt int64 + window int64 + } + input := []resBlock{ + {window: downsample.ResLevel2, mint: 0, maxt: 100}, + {window: downsample.ResLevel0, mint: 0, maxt: 100}, + {window: downsample.ResLevel1, mint: 0, maxt: 100}, + + {window: downsample.ResLevel0, mint: 100, maxt: 200}, + {window: downsample.ResLevel1, mint: 100, maxt: 200}, + {window: downsample.ResLevel2, mint: 100, maxt: 200}, + + {window: downsample.ResLevel2, mint: 200, maxt: 300}, + {window: downsample.ResLevel1, mint: 200, maxt: 300}, + {window: downsample.ResLevel0, mint: 200, maxt: 300}, + } + + for _, in := range input { + var m metadata.Meta + m.Thanos.Downsample.Resolution = in.window + m.MinTime = in.mint + m.MaxTime = in.maxt + + testutil.Ok(t, set.add(&bucketBlock{meta: &m})) + } + + properties.Property("getFor always gets all data in range", prop.ForAll( + func(low, high, maxResolution int64) bool { + defer leaktest.CheckTimeout(t, 10*time.Second)() + + res := set.getFor(low, high, maxResolution) + mint := int64(301) + maxt := int64(0) + for _, b := range res { + if b.meta.MinTime < mint { + mint = b.meta.MinTime + } + if b.meta.MaxTime > maxt { + maxt = b.meta.MaxTime + } + } + if low >= high { + return true + } + return len(res) >= 0 && (mint <= low || mint == 0) && (high <= maxt || math.Abs(float64(high-maxt)) <= 100) + }, gen.Int64Range(0, 300), gen.Int64Range(0, 300), gen.Int64Range(0, 60*60*1000)), + ) + + properties.TestingRun(t) +} + // TestBucketBlockSet with blocks which have the same time range // but different resolutions. func TestBucketBlockSet_Duplicated(t *testing.T) { From 93a764c38ca9ae929467cc5c5e49ae72244abb11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 19:15:41 +0300 Subject: [PATCH 16/23] store/bucket_test: add production property test --- pkg/store/bucket_test.go | 84 +++++++++++++++++++++++++++++----------- 1 file changed, 61 insertions(+), 23 deletions(-) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 6d8d597dcc..f1af5f7e8b 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -31,18 +31,26 @@ func TestBucketBlock_Property(t *testing.T) { mint, maxt int64 window int64 } + // This input resembles a typical production-level block layout + // in remote object storage. input := []resBlock{ - {window: downsample.ResLevel2, mint: 0, maxt: 100}, {window: downsample.ResLevel0, mint: 0, maxt: 100}, - {window: downsample.ResLevel1, mint: 0, maxt: 100}, - {window: downsample.ResLevel0, mint: 100, maxt: 200}, - {window: downsample.ResLevel1, mint: 100, maxt: 200}, - {window: downsample.ResLevel2, mint: 100, maxt: 200}, - - {window: downsample.ResLevel2, mint: 200, maxt: 300}, - {window: downsample.ResLevel1, mint: 200, maxt: 300}, - {window: downsample.ResLevel0, mint: 200, maxt: 300}, + // Compaction level 2 begins but not downsampling (8 hour block length) + {window: downsample.ResLevel0, mint: 200, maxt: 600}, + {window: downsample.ResLevel0, mint: 600, maxt: 1000}, + // Compaction level 3 begins, Some of it is downsampled but still retained (48 hour block length) + {window: downsample.ResLevel0, mint: 1000, maxt: 1750}, + {window: downsample.ResLevel1, mint: 1000, maxt: 1750}, + // Compaction level 4 begins, different downsampling levels cover the same (336 hour block length) + {window: downsample.ResLevel0, mint: 1750, maxt: 7000}, + {window: downsample.ResLevel1, mint: 1750, maxt: 7000}, + {window: downsample.ResLevel2, mint: 1750, maxt: 7000}, + // Compaction level 4 already happened, raw samples have been deleted + {window: downsample.ResLevel0, mint: 7000, maxt: 14000}, + {window: downsample.ResLevel1, mint: 7000, maxt: 14000}, + // Compaction level 4 already happened, raw and downsample res level 1 samples have been deleted + {window: downsample.ResLevel1, mint: 14000, maxt: 21000}, } for _, in := range input { @@ -54,26 +62,56 @@ func TestBucketBlock_Property(t *testing.T) { testutil.Ok(t, set.add(&bucketBlock{meta: &m})) } - properties.Property("getFor always gets all data in range", prop.ForAll( + properties.Property("getFor always gets some data in range", prop.ForAll( func(low, high, maxResolution int64) bool { - defer leaktest.CheckTimeout(t, 10*time.Second)() + // Bogus case. + if low >= high { + return true + } res := set.getFor(low, high, maxResolution) - mint := int64(301) - maxt := int64(0) - for _, b := range res { - if b.meta.MinTime < mint { - mint = b.meta.MinTime + // We must always get some data. + if len(res) == 0 { + return false + } + + // The data that we get must all encompass our requested range + if len(res) == 1 && (res[0].meta.Thanos.Downsample.Resolution > maxResolution || + res[0].meta.MinTime > low || + res[0].meta.MaxTime < high) { + return false + } else if len(res) > 1 { + mint := int64(21001) + maxt := int64(0) + for i := 0; i < len(res)-1; i++ { + if res[i].meta.Thanos.Downsample.Resolution > maxResolution { + return false + } + if res[i+1].meta.MinTime != res[i].meta.MaxTime { + return false + } + if res[i].meta.MinTime < mint { + mint = res[i].meta.MinTime + } + if res[i].meta.MaxTime > maxt { + maxt = res[i].meta.MaxTime + } } - if b.meta.MaxTime > maxt { - maxt = b.meta.MaxTime + if res[len(res)-1].meta.MinTime < mint { + mint = res[len(res)-1].meta.MinTime + } + if res[len(res)-1].meta.MaxTime > maxt { + maxt = res[len(res)-1].meta.MaxTime + } + if low < mint { + return false + } + if maxt < high { + return false } } - if low >= high { - return true - } - return len(res) >= 0 && (mint <= low || mint == 0) && (high <= maxt || math.Abs(float64(high-maxt)) <= 100) - }, gen.Int64Range(0, 300), gen.Int64Range(0, 300), gen.Int64Range(0, 60*60*1000)), + return true + }, gen.Int64Range(0, 21000), gen.Int64Range(0, 21000), gen.Int64Range(0, 60*60*1000)), ) properties.TestingRun(t) From f4b0a66ea049c1446d5edd4bf2776361aa99c0b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 19:28:29 +0300 Subject: [PATCH 17/23] store/bucket_test: fix --- pkg/store/bucket_test.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index f1af5f7e8b..9a2997b8dc 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -70,15 +70,10 @@ func TestBucketBlock_Property(t *testing.T) { } res := set.getFor(low, high, maxResolution) - // We must always get some data. - if len(res) == 0 { - return false - } // The data that we get must all encompass our requested range if len(res) == 1 && (res[0].meta.Thanos.Downsample.Resolution > maxResolution || - res[0].meta.MinTime > low || - res[0].meta.MaxTime < high) { + res[0].meta.MinTime > low) { return false } else if len(res) > 1 { mint := int64(21001) @@ -106,9 +101,7 @@ func TestBucketBlock_Property(t *testing.T) { if low < mint { return false } - if maxt < high { - return false - } + } return true }, gen.Int64Range(0, 21000), gen.Int64Range(0, 21000), gen.Int64Range(0, 60*60*1000)), From dac133e44d3dc56d521277b222c81e8d766428c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 19:36:25 +0300 Subject: [PATCH 18/23] store/bucket_test: add always gets property --- pkg/store/bucket_test.go | 53 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 9a2997b8dc..93a0cd47b2 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -22,7 +22,7 @@ import ( func TestBucketBlock_Property(t *testing.T) { parameters := gopter.DefaultTestParameters() parameters.Rng.Seed(42) - parameters.MinSuccessfulTests = 5000 + parameters.MinSuccessfulTests = 10000 properties := gopter.NewProperties(parameters) set := newBucketBlockSet(labels.Labels{}) @@ -62,7 +62,7 @@ func TestBucketBlock_Property(t *testing.T) { testutil.Ok(t, set.add(&bucketBlock{meta: &m})) } - properties.Property("getFor always gets some data in range", prop.ForAll( + properties.Property("getFor always gets at least some data in range", prop.ForAll( func(low, high, maxResolution int64) bool { // Bogus case. if low >= high { @@ -107,6 +107,55 @@ func TestBucketBlock_Property(t *testing.T) { }, gen.Int64Range(0, 21000), gen.Int64Range(0, 21000), gen.Int64Range(0, 60*60*1000)), ) + properties.Property("getFor always gets all data in range", prop.ForAll( + func(low, high int64) bool { + // Bogus case. + if low >= high { + return true + } + + maxResolution := downsample.ResLevel2 + res := set.getFor(low, high, maxResolution) + + // The data that we get must all encompass our requested range + if len(res) == 1 && (res[0].meta.Thanos.Downsample.Resolution > maxResolution || + res[0].meta.MinTime > low || res[0].meta.MaxTime < high) { + return false + } else if len(res) > 1 { + mint := int64(21001) + maxt := int64(0) + for i := 0; i < len(res)-1; i++ { + if res[i].meta.Thanos.Downsample.Resolution > maxResolution { + return false + } + if res[i+1].meta.MinTime != res[i].meta.MaxTime { + return false + } + if res[i].meta.MinTime < mint { + mint = res[i].meta.MinTime + } + if res[i].meta.MaxTime > maxt { + maxt = res[i].meta.MaxTime + } + } + if res[len(res)-1].meta.MinTime < mint { + mint = res[len(res)-1].meta.MinTime + } + if res[len(res)-1].meta.MaxTime > maxt { + maxt = res[len(res)-1].meta.MaxTime + } + if low < mint { + return false + } + if high > maxt { + return false + } + + } + return true + }, gen.Int64Range(0, 21000), gen.Int64Range(0, 21000)), + ) + properties.TestingRun(t) } From dbe37278d6a62f42f27e36feda8c18556bcf22c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 19:43:48 +0300 Subject: [PATCH 19/23] query/querier_test: do not shrink --- pkg/store/bucket_test.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 93a0cd47b2..0b68d5bb57 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -21,8 +21,8 @@ import ( func TestBucketBlock_Property(t *testing.T) { parameters := gopter.DefaultTestParameters() - parameters.Rng.Seed(42) - parameters.MinSuccessfulTests = 10000 + parameters.Rng.Seed(2000) + parameters.MinSuccessfulTests = 20000 properties := gopter.NewProperties(parameters) set := newBucketBlockSet(labels.Labels{}) @@ -50,7 +50,7 @@ func TestBucketBlock_Property(t *testing.T) { {window: downsample.ResLevel0, mint: 7000, maxt: 14000}, {window: downsample.ResLevel1, mint: 7000, maxt: 14000}, // Compaction level 4 already happened, raw and downsample res level 1 samples have been deleted - {window: downsample.ResLevel1, mint: 14000, maxt: 21000}, + {window: downsample.ResLevel2, mint: 14000, maxt: 21000}, } for _, in := range input { @@ -62,7 +62,7 @@ func TestBucketBlock_Property(t *testing.T) { testutil.Ok(t, set.add(&bucketBlock{meta: &m})) } - properties.Property("getFor always gets at least some data in range", prop.ForAll( + properties.Property("getFor always gets at least some data in range", prop.ForAllNoShrink( func(low, high, maxResolution int64) bool { // Bogus case. if low >= high { @@ -107,7 +107,7 @@ func TestBucketBlock_Property(t *testing.T) { }, gen.Int64Range(0, 21000), gen.Int64Range(0, 21000), gen.Int64Range(0, 60*60*1000)), ) - properties.Property("getFor always gets all data in range", prop.ForAll( + properties.Property("getFor always gets all data in range", prop.ForAllNoShrink( func(low, high int64) bool { // Bogus case. if low >= high { @@ -125,9 +125,6 @@ func TestBucketBlock_Property(t *testing.T) { mint := int64(21001) maxt := int64(0) for i := 0; i < len(res)-1; i++ { - if res[i].meta.Thanos.Downsample.Resolution > maxResolution { - return false - } if res[i+1].meta.MinTime != res[i].meta.MaxTime { return false } From 6dac95435f35e83fee9036b5b185fcbff9f230d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 17 May 2019 19:47:09 +0300 Subject: [PATCH 20/23] store/bucket: revert change This doesn't really matter as the tests show. --- pkg/store/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index e6cb680147..f1bc4aea89 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1012,7 +1012,7 @@ func (s *bucketBlockSet) getFor(mint, maxt, maxResolutionMillis int64) (bs []*bu // Base case, we fill the given interval with the closest resolution. for _, b := range s.blocks[i] { - if b.meta.MinTime < mint && b.meta.MaxTime <= mint { + if b.meta.MaxTime <= mint { continue } if b.meta.MinTime >= maxt { From 55aa32dad02d4f66c991d5bd4096e00ca7f392b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Sat, 18 May 2019 12:40:55 +0300 Subject: [PATCH 21/23] store/bucket_test: remove more confusion --- pkg/store/bucket_test.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 0b68d5bb57..f89d7c1752 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -189,13 +189,13 @@ func TestBucketBlockSet_Duplicated(t *testing.T) { cases := []struct { mint, maxt int64 - minResolution int64 + maxResolution int64 res []resBlock }{ { mint: 0, maxt: 300, - minResolution: downsample.ResLevel2, + maxResolution: downsample.ResLevel2, res: []resBlock{ {window: downsample.ResLevel2, mint: 0, maxt: 100}, {window: downsample.ResLevel2, mint: 100, maxt: 200}, @@ -215,7 +215,7 @@ func TestBucketBlockSet_Duplicated(t *testing.T) { m.MaxTime = b.maxt exp = append(exp, &bucketBlock{meta: &m}) } - res := set.getFor(c.mint, c.maxt, c.minResolution) + res := set.getFor(c.mint, c.maxt, c.maxResolution) testutil.Equals(t, exp, res) } } @@ -253,13 +253,13 @@ func TestBucketBlockSet_Interleaved(t *testing.T) { cases := []struct { mint, maxt int64 - minResolution int64 + maxResolution int64 res []resBlock }{ { mint: 0, maxt: 700, - minResolution: downsample.ResLevel2, + maxResolution: downsample.ResLevel2, res: []resBlock{ {window: downsample.ResLevel2, mint: 0, maxt: 50}, {window: downsample.ResLevel1, mint: 50, maxt: 100}, @@ -274,7 +274,7 @@ func TestBucketBlockSet_Interleaved(t *testing.T) { { mint: 100, maxt: 400, - minResolution: downsample.ResLevel2, + maxResolution: downsample.ResLevel2, res: []resBlock{ {window: downsample.ResLevel2, mint: 100, maxt: 200}, {window: downsample.ResLevel1, mint: 200, maxt: 300}, @@ -294,7 +294,7 @@ func TestBucketBlockSet_Interleaved(t *testing.T) { m.MaxTime = b.maxt exp = append(exp, &bucketBlock{meta: &m}) } - res := set.getFor(c.mint, c.maxt, c.minResolution) + res := set.getFor(c.mint, c.maxt, c.maxResolution) testutil.Equals(t, exp, res) } } @@ -336,13 +336,13 @@ func TestBucketBlockSet_addGet(t *testing.T) { cases := []struct { mint, maxt int64 - minResolution int64 + maxResolution int64 res []resBlock }{ { mint: -100, maxt: 1000, - minResolution: 0, + maxResolution: 0, res: []resBlock{ {window: downsample.ResLevel0, mint: 0, maxt: 100}, {window: downsample.ResLevel0, mint: 100, maxt: 200}, @@ -353,7 +353,7 @@ func TestBucketBlockSet_addGet(t *testing.T) { }, { mint: 100, maxt: 400, - minResolution: downsample.ResLevel1 - 1, + maxResolution: downsample.ResLevel1 - 1, res: []resBlock{ {window: downsample.ResLevel0, mint: 100, maxt: 200}, {window: downsample.ResLevel0, mint: 200, maxt: 300}, @@ -362,7 +362,7 @@ func TestBucketBlockSet_addGet(t *testing.T) { }, { mint: 100, maxt: 500, - minResolution: downsample.ResLevel1, + maxResolution: downsample.ResLevel1, res: []resBlock{ {window: downsample.ResLevel1, mint: 100, maxt: 200}, {window: downsample.ResLevel1, mint: 200, maxt: 300}, @@ -372,7 +372,7 @@ func TestBucketBlockSet_addGet(t *testing.T) { }, { mint: 0, maxt: 500, - minResolution: downsample.ResLevel2, + maxResolution: downsample.ResLevel2, res: []resBlock{ {window: downsample.ResLevel1, mint: 0, maxt: 100}, {window: downsample.ResLevel2, mint: 100, maxt: 200}, @@ -393,7 +393,7 @@ func TestBucketBlockSet_addGet(t *testing.T) { m.MaxTime = b.maxt exp = append(exp, &bucketBlock{meta: &m}) } - res := set.getFor(c.mint, c.maxt, c.minResolution) + res := set.getFor(c.mint, c.maxt, c.maxResolution) testutil.Equals(t, exp, res) } } From 3fe4217110737edb408d7e4881f7613daee184ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Wed, 22 May 2019 16:07:19 +0300 Subject: [PATCH 22/23] store/bucket: clean up tests Only leave the property tests in place since they catch all of the errors. --- pkg/query/querier_test.go | 2 +- pkg/store/bucket.go | 2 +- pkg/store/bucket_test.go | 143 -------------------------------------- 3 files changed, 2 insertions(+), 145 deletions(-) diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index 79aa8247b9..9a7999e384 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -24,7 +24,7 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { testProxy := &storeServer{resps: []*storepb.SeriesResponse{}} queryableCreator := NewQueryableCreator(nil, testProxy, "test") - oneHourMillis := int64(1*time.Hour) / (1000 * 1000) + oneHourMillis := int64(1*time.Hour) / int64(time.Millisecond) queryable := queryableCreator(false, oneHourMillis, false, func(err error) {}) q, err := queryable.Querier(context.Background(), 0, 42) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index f1bc4aea89..1cb765a67d 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -996,7 +996,7 @@ func int64index(s []int64, x int64) int { } // getFor returns a time-ordered list of blocks that cover date between mint and maxt. -// Blocks with the lowest resolution possible but not lower than the given resolution are returned. +// Blocks with the biggest resolution possible but not bigger than the given max resolution are returned. func (s *bucketBlockSet) getFor(mint, maxt, maxResolutionMillis int64) (bs []*bucketBlock) { if mint == maxt { return nil diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index f89d7c1752..d5f436a65b 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -156,149 +156,6 @@ func TestBucketBlock_Property(t *testing.T) { properties.TestingRun(t) } -// TestBucketBlockSet with blocks which have the same time range -// but different resolutions. -func TestBucketBlockSet_Duplicated(t *testing.T) { - defer leaktest.CheckTimeout(t, 10*time.Second)() - - set := newBucketBlockSet(labels.Labels{}) - - type resBlock struct { - mint, maxt int64 - window int64 - } - input := []resBlock{ - {window: downsample.ResLevel2, mint: 0, maxt: 100}, - {window: downsample.ResLevel0, mint: 0, maxt: 100}, - {window: downsample.ResLevel1, mint: 0, maxt: 100}, - {window: downsample.ResLevel0, mint: 100, maxt: 200}, - {window: downsample.ResLevel1, mint: 100, maxt: 200}, - {window: downsample.ResLevel2, mint: 100, maxt: 200}, - {window: downsample.ResLevel2, mint: 200, maxt: 300}, - {window: downsample.ResLevel1, mint: 200, maxt: 300}, - } - - for _, in := range input { - var m metadata.Meta - m.Thanos.Downsample.Resolution = in.window - m.MinTime = in.mint - m.MaxTime = in.maxt - - testutil.Ok(t, set.add(&bucketBlock{meta: &m})) - } - - cases := []struct { - mint, maxt int64 - maxResolution int64 - res []resBlock - }{ - { - mint: 0, - maxt: 300, - maxResolution: downsample.ResLevel2, - res: []resBlock{ - {window: downsample.ResLevel2, mint: 0, maxt: 100}, - {window: downsample.ResLevel2, mint: 100, maxt: 200}, - {window: downsample.ResLevel2, mint: 200, maxt: 300}, - }, - }, - } - - for i, c := range cases { - t.Logf("case %d", i) - - var exp []*bucketBlock - for _, b := range c.res { - var m metadata.Meta - m.Thanos.Downsample.Resolution = b.window - m.MinTime = b.mint - m.MaxTime = b.maxt - exp = append(exp, &bucketBlock{meta: &m}) - } - res := set.getFor(c.mint, c.maxt, c.maxResolution) - testutil.Equals(t, exp, res) - } -} - -// TestBucketBlockSet with blocks with different resolutions -// that interleave between each other. -func TestBucketBlockSet_Interleaved(t *testing.T) { - defer leaktest.CheckTimeout(t, 10*time.Second)() - - set := newBucketBlockSet(labels.Labels{}) - - type resBlock struct { - mint, maxt int64 - window int64 - } - input := []resBlock{ - {window: downsample.ResLevel2, mint: 0, maxt: 50}, - {window: downsample.ResLevel1, mint: 50, maxt: 100}, - {window: downsample.ResLevel2, mint: 100, maxt: 200}, - {window: downsample.ResLevel1, mint: 200, maxt: 300}, - {window: downsample.ResLevel1, mint: 300, maxt: 400}, - {window: downsample.ResLevel1, mint: 400, maxt: 500}, - {window: downsample.ResLevel0, mint: 500, maxt: 600}, - {window: downsample.ResLevel0, mint: 600, maxt: 700}, - } - - for _, in := range input { - var m metadata.Meta - m.Thanos.Downsample.Resolution = in.window - m.MinTime = in.mint - m.MaxTime = in.maxt - - testutil.Ok(t, set.add(&bucketBlock{meta: &m})) - } - - cases := []struct { - mint, maxt int64 - maxResolution int64 - res []resBlock - }{ - { - mint: 0, - maxt: 700, - maxResolution: downsample.ResLevel2, - res: []resBlock{ - {window: downsample.ResLevel2, mint: 0, maxt: 50}, - {window: downsample.ResLevel1, mint: 50, maxt: 100}, - {window: downsample.ResLevel2, mint: 100, maxt: 200}, - {window: downsample.ResLevel1, mint: 200, maxt: 300}, - {window: downsample.ResLevel1, mint: 300, maxt: 400}, - {window: downsample.ResLevel1, mint: 400, maxt: 500}, - {window: downsample.ResLevel0, mint: 500, maxt: 600}, - {window: downsample.ResLevel0, mint: 600, maxt: 700}, - }, - }, - { - mint: 100, - maxt: 400, - maxResolution: downsample.ResLevel2, - res: []resBlock{ - {window: downsample.ResLevel2, mint: 100, maxt: 200}, - {window: downsample.ResLevel1, mint: 200, maxt: 300}, - {window: downsample.ResLevel1, mint: 300, maxt: 400}, - }, - }, - } - - for i, c := range cases { - t.Logf("case %d", i) - - var exp []*bucketBlock - for _, b := range c.res { - var m metadata.Meta - m.Thanos.Downsample.Resolution = b.window - m.MinTime = b.mint - m.MaxTime = b.maxt - exp = append(exp, &bucketBlock{meta: &m}) - } - res := set.getFor(c.mint, c.maxt, c.maxResolution) - testutil.Equals(t, exp, res) - } -} - func TestBucketBlockSet_addGet(t *testing.T) { defer leaktest.CheckTimeout(t, 10*time.Second)() From 3bbc6cb5bb2295752f3931a733108e4eb846245b Mon Sep 17 00:00:00 2001 From: Bartek Plotka Date: Mon, 27 May 2019 15:32:42 +0100 Subject: [PATCH 23/23] Simplified goFor implementation. Signed-off-by: Bartek Plotka --- pkg/store/bucket.go | 39 +++++++++++---------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 1cb765a67d..a4e1ef734e 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1010,7 +1010,9 @@ func (s *bucketBlockSet) getFor(mint, maxt, maxResolutionMillis int64) (bs []*bu for ; i < len(s.resolutions) && s.resolutions[i] > maxResolutionMillis; i++ { } - // Base case, we fill the given interval with the closest resolution. + // Fill the given interval with the blocks for the current resolution. + // Our current resolution might not cover all data, so recursively fill the gaps with higher resolution blocks if there is any. + start := mint for _, b := range s.blocks[i] { if b.meta.MaxTime <= mint { continue @@ -1018,37 +1020,18 @@ func (s *bucketBlockSet) getFor(mint, maxt, maxResolutionMillis int64) (bs []*bu if b.meta.MinTime >= maxt { break } - bs = append(bs, b) - } - // Our current resolution might not cover all data, recursively fill the gaps at the start - // and end of [mint, maxt] with higher resolution blocks. - // - // Plus, fill the possible gaps between the current blocks with higher resolution blocks. - i++ - // No higher resolution left, we are done. - if i >= len(s.resolutions) { - return bs - } - if len(bs) == 0 { - return s.getFor(mint, maxt, s.resolutions[i]) - } - until := len(bs) - 1 - for j := 0; j < until; j++ { - if bs[j+1].meta.MinTime-bs[j].meta.MaxTime > 0 { - between := s.getFor(bs[j].meta.MaxTime, bs[j+1].meta.MinTime, s.resolutions[i]) - bs = append(bs[:j+1], append(between, bs[j+1:]...)...) - - // Push the iterators further. - j += len(between) - until += len(between) + if i+1 < len(s.resolutions) { + bs = append(bs, s.getFor(start, b.meta.MinTime, s.resolutions[i+1])...) } + bs = append(bs, b) + start = b.meta.MaxTime } - left := s.getFor(mint, bs[0].meta.MinTime, s.resolutions[i]) - right := s.getFor(bs[len(bs)-1].meta.MaxTime, maxt, s.resolutions[i]) - - return append(left, append(bs, right...)...) + if i+1 < len(s.resolutions) { + bs = append(bs, s.getFor(start, maxt, s.resolutions[i+1])...) + } + return bs } // labelMatchers verifies whether the block set matches the given matchers and returns a new