Skip to content

Commit

Permalink
Rename DefaultBytesLimiterFactory back to NewBytesLimiterFactory
Browse files Browse the repository at this point in the history
Signed-off-by: Justin Jung <[email protected]>
  • Loading branch information
justinjung04 committed Jun 6, 2024
1 parent 25d8cd3 commit c798438
Show file tree
Hide file tree
Showing 5 changed files with 32 additions and 32 deletions.
2 changes: 1 addition & 1 deletion cmd/thanos/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ func runStore(
dataDir,
store.NewChunksLimiterFactory(conf.storeRateLimits.SamplesPerRequest/store.MaxSamplesPerChunk), // The samples limit is an approximation based on the max number of samples per chunk.
store.NewSeriesLimiterFactory(conf.storeRateLimits.SeriesPerRequest),
store.DefaultBytesLimiterFactory(conf.maxDownloadedBytes),
store.NewBytesLimiterFactory(conf.maxDownloadedBytes),
store.NewGapBasedPartitioner(store.PartitionerMaxGapSize),
conf.blockSyncConcurrency,
conf.advertiseCompatibilityLabel,
Expand Down
2 changes: 1 addition & 1 deletion pkg/store/acceptance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -921,7 +921,7 @@ func TestBucketStore_Acceptance(t *testing.T) {
"",
NewChunksLimiterFactory(10e6),
NewSeriesLimiterFactory(10e6),
DefaultBytesLimiterFactory(10e6),
NewBytesLimiterFactory(10e6),
NewGapBasedPartitioner(PartitionerMaxGapSize),
20,
true,
Expand Down
16 changes: 8 additions & 8 deletions pkg/store/bucket_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ func TestBucketStore_e2e(t *testing.T) {

dir := t.TempDir()

s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), DefaultBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), NewBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)

if ok := t.Run("no index cache", func(t *testing.T) {
s.cache.SwapWith(noopCache{})
Expand Down Expand Up @@ -541,7 +541,7 @@ func TestBucketStore_ManyParts_e2e(t *testing.T) {

dir := t.TempDir()

s := prepareStoreWithTestBlocks(t, dir, bkt, true, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), DefaultBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s := prepareStoreWithTestBlocks(t, dir, bkt, true, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), NewBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)

indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(s.logger, nil, nil, storecache.InMemoryIndexCacheConfig{
MaxItemSize: 1e5,
Expand All @@ -567,7 +567,7 @@ func TestBucketStore_TimePartitioning_e2e(t *testing.T) {
// The query will fetch 2 series from 2 blocks, so we do expect to hit a total of 4 chunks.
expectedChunks := uint64(2 * 2)

s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(expectedChunks), NewSeriesLimiterFactory(0), DefaultBytesLimiterFactory(0), emptyRelabelConfig, &FilterConfig{
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(expectedChunks), NewSeriesLimiterFactory(0), NewBytesLimiterFactory(0), emptyRelabelConfig, &FilterConfig{
MinTime: minTimeDuration,
MaxTime: filterMaxTime,
})
Expand Down Expand Up @@ -649,7 +649,7 @@ func TestBucketStore_Series_ChunksLimiter_e2e(t *testing.T) {

dir := t.TempDir()

s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(testData.maxChunksLimit), NewSeriesLimiterFactory(testData.maxSeriesLimit), DefaultBytesLimiterFactory(units.Base2Bytes(testData.maxBytesLimit)), emptyRelabelConfig, allowAllFilterConf)
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(testData.maxChunksLimit), NewSeriesLimiterFactory(testData.maxSeriesLimit), NewBytesLimiterFactory(units.Base2Bytes(testData.maxBytesLimit)), emptyRelabelConfig, allowAllFilterConf)
testutil.Ok(t, s.store.SyncBlocks(ctx))

req := &storepb.SeriesRequest{
Expand Down Expand Up @@ -724,7 +724,7 @@ func TestBucketStore_LabelNames_e2e(t *testing.T) {

dir := t.TempDir()

s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), DefaultBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), NewBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s.cache.SwapWith(noopCache{})

mint, maxt := s.store.TimeRange()
Expand Down Expand Up @@ -844,7 +844,7 @@ func TestBucketStore_LabelNames_SeriesLimiter_e2e(t *testing.T) {

bkt := objstore.NewInMemBucket()
dir := t.TempDir()
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(testData.maxSeriesLimit), DefaultBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(testData.maxSeriesLimit), NewBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
testutil.Ok(t, s.store.SyncBlocks(ctx))
req := &storepb.LabelNamesRequest{
Matchers: []storepb.LabelMatcher{
Expand Down Expand Up @@ -879,7 +879,7 @@ func TestBucketStore_LabelValues_e2e(t *testing.T) {

dir := t.TempDir()

s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), DefaultBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), NewBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s.cache.SwapWith(noopCache{})

mint, maxt := s.store.TimeRange()
Expand Down Expand Up @@ -1003,7 +1003,7 @@ func TestBucketStore_LabelValues_SeriesLimiter_e2e(t *testing.T) {

dir := t.TempDir()

s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(testData.maxSeriesLimit), DefaultBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
s := prepareStoreWithTestBlocks(t, dir, bkt, false, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(testData.maxSeriesLimit), NewBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf)
testutil.Ok(t, s.store.SyncBlocks(ctx))

req := &storepb.LabelValuesRequest{
Expand Down
40 changes: 20 additions & 20 deletions pkg/store/bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ func TestBucketStore_TSDBInfo(t *testing.T) {
dir,
NewChunksLimiterFactory(0),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
20,
true,
Expand Down Expand Up @@ -734,7 +734,7 @@ func TestBucketStore_Info(t *testing.T) {
dir,
NewChunksLimiterFactory(0),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
20,
true,
Expand Down Expand Up @@ -978,7 +978,7 @@ func testSharding(t *testing.T, reuseDisk string, bkt objstore.Bucket, all ...ul
dir,
NewChunksLimiterFactory(0),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
20,
true,
Expand Down Expand Up @@ -1104,7 +1104,7 @@ func TestReadIndexCache_LoadSeries(t *testing.T) {
}

// Success with no refetches.
testutil.Ok(t, r.loadSeries(ctx, []storage.SeriesRef{2, 13, 24}, false, 2, 100, DefaultBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
testutil.Ok(t, r.loadSeries(ctx, []storage.SeriesRef{2, 13, 24}, false, 2, 100, NewBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
testutil.Equals(t, map[storage.SeriesRef][]byte{
2: []byte("aaaaaaaaaa"),
13: []byte("bbbbbbbbbb"),
Expand All @@ -1114,7 +1114,7 @@ func TestReadIndexCache_LoadSeries(t *testing.T) {

// Success with 2 refetches.
r.loadedSeries = map[storage.SeriesRef][]byte{}
testutil.Ok(t, r.loadSeries(ctx, []storage.SeriesRef{2, 13, 24}, false, 2, 15, DefaultBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
testutil.Ok(t, r.loadSeries(ctx, []storage.SeriesRef{2, 13, 24}, false, 2, 15, NewBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
testutil.Equals(t, map[storage.SeriesRef][]byte{
2: []byte("aaaaaaaaaa"),
13: []byte("bbbbbbbbbb"),
Expand All @@ -1124,7 +1124,7 @@ func TestReadIndexCache_LoadSeries(t *testing.T) {

// Success with refetch on first element.
r.loadedSeries = map[storage.SeriesRef][]byte{}
testutil.Ok(t, r.loadSeries(ctx, []storage.SeriesRef{2}, false, 2, 5, DefaultBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
testutil.Ok(t, r.loadSeries(ctx, []storage.SeriesRef{2}, false, 2, 5, NewBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
testutil.Equals(t, map[storage.SeriesRef][]byte{
2: []byte("aaaaaaaaaa"),
}, r.loadedSeries)
Expand All @@ -1138,7 +1138,7 @@ func TestReadIndexCache_LoadSeries(t *testing.T) {
testutil.Ok(t, bkt.Upload(ctx, filepath.Join(b.meta.ULID.String(), block.IndexFilename), bytes.NewReader(buf.Get())))

// Fail, but no recursion at least.
testutil.NotOk(t, r.loadSeries(ctx, []storage.SeriesRef{2, 13, 24}, false, 1, 15, DefaultBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
testutil.NotOk(t, r.loadSeries(ctx, []storage.SeriesRef{2, 13, 24}, false, 1, 15, NewBytesLimiterFactory(0)(nil), tenancy.DefaultTenant))
}

func TestBucketIndexReader_ExpandedPostings(t *testing.T) {
Expand Down Expand Up @@ -1323,7 +1323,7 @@ func benchmarkExpandedPostings(

t.ResetTimer()
for i := 0; i < t.N(); i++ {
p, err := indexr.ExpandedPostings(context.Background(), newSortedMatchers(c.matchers), DefaultBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant)
p, err := indexr.ExpandedPostings(context.Background(), newSortedMatchers(c.matchers), NewBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant)
testutil.Ok(t, err)
testutil.Equals(t, c.expectedLen, len(p.postings))
}
Expand Down Expand Up @@ -1358,7 +1358,7 @@ func TestExpandedPostingsEmptyPostings(t *testing.T) {
matcher2 := labels.MustNewMatcher(labels.MatchRegexp, "i", "500.*")
ctx := context.Background()
dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"})
ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2}), DefaultBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant)
ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2}), NewBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant)
testutil.Ok(t, err)
testutil.Equals(t, ps, (*lazyExpandedPostings)(nil))
// Make sure even if a matcher doesn't match any postings, we still cache empty expanded postings.
Expand Down Expand Up @@ -1394,7 +1394,7 @@ func TestLazyExpandedPostingsEmptyPostings(t *testing.T) {
matcher3 := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+")
ctx := context.Background()
dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"})
ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2, matcher3}), DefaultBytesLimiterFactory(0)(nil), true, dummyCounter, tenancy.DefaultTenant)
ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2, matcher3}), NewBytesLimiterFactory(0)(nil), true, dummyCounter, tenancy.DefaultTenant)
testutil.Ok(t, err)
// We expect emptyLazyPostings rather than lazy postings with 0 length but with matchers.
testutil.Equals(t, ps, emptyLazyPostings)
Expand Down Expand Up @@ -1537,7 +1537,7 @@ func benchBucketSeries(t testutil.TB, sampleType chunkenc.ValueType, skipChunk,
tmpDir,
NewChunksLimiterFactory(0),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
1,
false,
Expand Down Expand Up @@ -1786,7 +1786,7 @@ func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) {
queryGate: gate.NewNoop(),
chunksLimiterFactory: NewChunksLimiterFactory(0),
seriesLimiterFactory: NewSeriesLimiterFactory(0),
bytesLimiterFactory: DefaultBytesLimiterFactory(0),
bytesLimiterFactory: NewBytesLimiterFactory(0),
seriesBatchSize: SeriesBatchSize,
requestLoggerFunc: NoopRequestLoggerFunc,
}
Expand Down Expand Up @@ -1986,7 +1986,7 @@ func TestSeries_ErrorUnmarshallingRequestHints(t *testing.T) {
tmpDir,
NewChunksLimiterFactory(10000/MaxSamplesPerChunk),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
10,
false,
Expand Down Expand Up @@ -2078,7 +2078,7 @@ func TestSeries_BlockWithMultipleChunks(t *testing.T) {
tmpDir,
NewChunksLimiterFactory(100000/MaxSamplesPerChunk),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
10,
false,
Expand Down Expand Up @@ -2237,7 +2237,7 @@ func TestSeries_SeriesSortedWithoutReplicaLabels(t *testing.T) {
tmpDir,
NewChunksLimiterFactory(100000/MaxSamplesPerChunk),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
10,
false,
Expand Down Expand Up @@ -2424,7 +2424,7 @@ func setupStoreForHintsTest(t *testing.T) (testutil.TB, *BucketStore, []*storepb
tmpDir,
NewChunksLimiterFactory(10000/MaxSamplesPerChunk),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
10,
false,
Expand Down Expand Up @@ -2641,7 +2641,7 @@ func TestSeries_ChunksHaveHashRepresentation(t *testing.T) {
tmpDir,
NewChunksLimiterFactory(100000/MaxSamplesPerChunk),
NewSeriesLimiterFactory(0),
DefaultBytesLimiterFactory(0),
NewBytesLimiterFactory(0),
NewGapBasedPartitioner(PartitionerMaxGapSize),
10,
false,
Expand Down Expand Up @@ -2896,7 +2896,7 @@ func benchmarkBlockSeriesWithConcurrency(b *testing.B, concurrency int, blockMet
req,
seriesLimiter,
chunksLimiter,
DefaultBytesLimiterFactory(0)(nil),
NewBytesLimiterFactory(0)(nil),
matchers,
nil,
false,
Expand Down Expand Up @@ -3543,7 +3543,7 @@ func TestExpandedPostingsRace(t *testing.T) {
i := i
bb := bb
go func(i int, bb *bucketBlock) {
refs, err := bb.indexReader(logger).ExpandedPostings(context.Background(), m, DefaultBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant)
refs, err := bb.indexReader(logger).ExpandedPostings(context.Background(), m, NewBytesLimiterFactory(0)(nil), false, dummyCounter, tenancy.DefaultTenant)
testutil.Ok(t, err)
defer wg.Done()

Expand Down Expand Up @@ -3636,7 +3636,7 @@ func TestBucketStoreDedupOnBlockSeriesSet(t *testing.T) {
"",
NewChunksLimiterFactory(10e6),
NewSeriesLimiterFactory(10e6),
DefaultBytesLimiterFactory(10e6),
NewBytesLimiterFactory(10e6),
NewGapBasedPartitioner(PartitionerMaxGapSize),
20,
true,
Expand Down
4 changes: 2 additions & 2 deletions pkg/store/limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ func NewSeriesLimiterFactory(limit uint64) SeriesLimiterFactory {
}
}

// DefaultBytesLimiterFactory makes a new BytesLimiterFactory with a static limit.
func DefaultBytesLimiterFactory(limit units.Base2Bytes) BytesLimiterFactory {
// NewBytesLimiterFactory makes a new BytesLimiterFactory with a static limit.
func NewBytesLimiterFactory(limit units.Base2Bytes) BytesLimiterFactory {
return func(failedCounter prometheus.Counter) BytesLimiter {
return NewLimiter(uint64(limit), failedCounter)
}
Expand Down

0 comments on commit c798438

Please sign in to comment.