Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[dbnode] Fix AggregateQuery limits #3112

Merged
merged 22 commits into from
Jan 23, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
476dbd4
[dbnode] AggregateQuery limit fix
arnikola Jan 22, 2021
5a99682
Cleanup
arnikola Jan 22, 2021
a0ca9db
[integration test] Add label query limits integration test
wesleyk Jan 22, 2021
5d2173d
Add paren
wesleyk Jan 22, 2021
c4ee6c6
Fixed exhaustive case, remove dead code
arnikola Jan 22, 2021
522543c
Merge branch 'arnikola/fix-limits' of github.com:m3db/m3 into arnikol…
arnikola Jan 22, 2021
ebfaa35
Aggregate results changes
arnikola Jan 22, 2021
849c92b
Add proper require exhaustive conversion + integration test for agg q…
wesleyk Jan 22, 2021
bd35ca9
Merge branch 'arnikola/fix-limits' of github.com:m3db/m3 into arnikol…
wesleyk Jan 22, 2021
cad0e06
Merge branch 'master' into arnikola/fix-limits
wesleyk Jan 22, 2021
829e6b3
Avoid flakiness with high limits
wesleyk Jan 22, 2021
31f001b
Limit on docs or inserts
arnikola Jan 22, 2021
172100f
Fixup integration test
wesleyk Jan 22, 2021
2695b90
Merge branch 'arnikola/fix-limits' of github.com:m3db/m3 into arnikol…
wesleyk Jan 22, 2021
84d5a86
Add more precise assertions to label query limits integration test
wesleyk Jan 22, 2021
dd5ae2e
Finish test fixes and refactor
arnikola Jan 22, 2021
b18484e
Response + lint
arnikola Jan 22, 2021
ab538cd
Improve IT comments
wesleyk Jan 22, 2021
505319c
Merge branch 'master' into arnikola/fix-limits
wesleyk Jan 22, 2021
b3fdbba
Response + lint
arnikola Jan 22, 2021
4221a99
Fix integrations
arnikola Jan 22, 2021
704b172
Merge branch 'arnikola/fix-limits' of github.com:m3db/m3 into arnikol…
arnikola Jan 22, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 12 additions & 5 deletions src/dbnode/storage/index/aggregate_results.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ func (r *aggregatedResults) AggregateResultsOptions() AggregateResultsOptions {

func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) (int, int) {
r.Lock()
maxInsertions := r.aggregateOpts.SizeLimit - r.totalDocsCount
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we be subtracting totalDocsCount or r.resultsMap.size()?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah going to do some refactoring/renaming around this to make it clearer what each limit is; totalDocsCount is not quite correctly calculated at the moment, so will need to make a few touchups around it

valueInsertions := 0
for _, entry := range batch {
f := entry.Field
Expand All @@ -145,11 +146,17 @@ func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) (int, int)
if !valuesMap.Contains(t) {
// we can avoid the copy because we assume ownership of the passed ident.ID,
// but still need to finalize it.
valuesMap.SetUnsafe(t, struct{}{}, AggregateValuesMapSetUnsafeOptions{
NoCopyKey: true,
NoFinalizeKey: false,
})
valueInsertions++
if maxInsertions > valueInsertions {
valuesMap.SetUnsafe(t, struct{}{}, AggregateValuesMapSetUnsafeOptions{
NoCopyKey: true,
NoFinalizeKey: false,
})
valueInsertions++
} else {
// this value exceeds the limit, so should be released to the underling
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

underlying

// pool without adding to the map.
t.Finalize()
}
} else {
// because we already have a entry for this term, we release the ident back to
// the underlying pool.
Expand Down
14 changes: 14 additions & 0 deletions src/dbnode/storage/index/aggregate_results_entry_arraypool_gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

55 changes: 33 additions & 22 deletions src/dbnode/storage/index/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -641,15 +641,16 @@ func (b *block) aggregateWithSpan(
}

var (
source = opts.Source
size = results.Size()
docsCount = results.TotalDocsCount()
batch = b.opts.AggregateResultsEntryArrayPool().Get()
batchSize = cap(batch)
iterClosed = false // tracking whether we need to free the iterator at the end.
source = opts.Source
size = results.Size()
resultCount = results.TotalDocsCount()
batch = AggregateResultsEntries(b.opts.AggregateResultsEntryArrayPool().Get())
maxBatch = cap(batch)
iterClosed = false // tracking whether we need to free the iterator at the end.
exhaustive = false
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

may not need this

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah seems like opts.exhaustive(size, resultCount) accomplishes the same goal

)
if batchSize == 0 {
batchSize = defaultAggregateResultsEntryBatchSize
if maxBatch == 0 {
maxBatch = defaultAggregateResultsEntryBatchSize
}

// cleanup at the end
Expand All @@ -675,8 +676,17 @@ func (b *block) aggregateWithSpan(
}))
}

if opts.SeriesLimit < maxBatch {
maxBatch = opts.SeriesLimit
}

if opts.DocsLimit < maxBatch {
maxBatch = opts.DocsLimit
}

for _, reader := range readers {
if opts.LimitsExceeded(size, docsCount) {
if opts.LimitsExceeded(size, resultCount) {
exhaustive = true
break
}

Expand All @@ -685,19 +695,19 @@ func (b *block) aggregateWithSpan(
return false, err
}
iterClosed = false // only once the iterator has been successfully Reset().

for iter.Next() {
if opts.LimitsExceeded(size, docsCount) {
if opts.LimitsExceeded(size, resultCount) {
exhaustive = true
break
}

field, term := iter.Current()
batch = b.appendFieldAndTermToBatch(batch, field, term, iterateTerms)
if len(batch) < batchSize {
if batch.Size() < maxBatch {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we add a comment on this check?

continue
}

batch, size, docsCount, err = b.addAggregateResults(cancellable, results, batch, source)
batch, size, resultCount, err = b.addAggregateResults(cancellable, results, batch, source)
if err != nil {
return false, err
}
Expand All @@ -714,14 +724,14 @@ func (b *block) aggregateWithSpan(
}

// Add last batch to results if remaining.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

comment could be updated

if len(batch) > 0 {
batch, size, docsCount, err = b.addAggregateResults(cancellable, results, batch, source)
for len(batch) > 0 {
batch, size, resultCount, err = b.addAggregateResults(cancellable, results, batch, source)
if err != nil {
return false, err
}
}

return opts.exhaustive(size, docsCount), nil
return exhaustive || opts.exhaustive(size, resultCount), nil
}

func (b *block) appendFieldAndTermToBatch(
Expand Down Expand Up @@ -783,6 +793,7 @@ func (b *block) appendFieldAndTermToBatch(
} else {
batch = append(batch, entry)
}

return batch
}

Expand All @@ -797,12 +808,12 @@ func (b *block) pooledID(id []byte) ident.ID {
func (b *block) addAggregateResults(
cancellable *xresource.CancellableLifetime,
results AggregateResults,
batch []AggregateResultsEntry,
batch AggregateResultsEntries,
source []byte,
) ([]AggregateResultsEntry, int, int, error) {
) (AggregateResultsEntries, int, int, error) {
// update recently queried docs to monitor memory.
if results.EnforceLimits() {
if err := b.docsLimit.Inc(len(batch), source); err != nil {
if err := b.docsLimit.Inc(batch.Size(), source); err != nil {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will have to make a decision on how docs limit is affected in aggregate results; this is not correct

return batch, 0, 0, err
}
}
Expand All @@ -814,8 +825,8 @@ func (b *block) addAggregateResults(
return batch, 0, 0, errCancelledQuery
}

// try to add the docs to the xresource.
size, docsCount := results.AddFields(batch)
// try to add the docs to the resource.
size, resultCount := results.AddFields(batch)

// immediately release the checkout on the lifetime of query.
cancellable.ReleaseCheckout()
Expand All @@ -828,7 +839,7 @@ func (b *block) addAggregateResults(
batch = batch[:0]

// return results.
return batch, size, docsCount, nil
return batch, size, resultCount, nil
}

func (b *block) AddResults(
Expand Down