Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

colmem: improve memory accounting when memory limit is exceeded #86357

Merged
merged 2 commits into from
Aug 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/col/coldata/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ type Batch interface {
//
// NOTE: Reset can allocate a new Batch, so when calling from the vectorized
// engine consider either allocating a new Batch explicitly via
// colexec.Allocator or calling ResetInternalBatch.
// colmem.Allocator or calling ResetInternalBatch.
Reset(typs []*types.T, length int, factory ColumnFactory)
// ResetInternalBatch resets a batch and its underlying Vecs for reuse. It's
// important for callers to call ResetInternalBatch if they own internal
Expand Down
2 changes: 1 addition & 1 deletion pkg/col/colserde/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
)

var (
// testAllocator is a colexec.Allocator with an unlimited budget for use in
// testAllocator is a colmem.Allocator with an unlimited budget for use in
// tests.
testAllocator *colmem.Allocator
testColumnFactory coldata.ColumnFactory
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/colcontainer/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
)

var (
// testAllocator is a colexec.Allocator with an unlimited budget for use in
// testAllocator is a colmem.Allocator with an unlimited budget for use in
// tests.
testAllocator *colmem.Allocator
testColumnFactory coldata.ColumnFactory
Expand Down
44 changes: 34 additions & 10 deletions pkg/sql/colexec/colbuilder/execplan.go
Original file line number Diff line number Diff line change
Expand Up @@ -362,12 +362,16 @@ func (r opResult) createDiskBackedSort(
// There is a limit specified, so we know exactly how many rows the
// sorter should output. Use a top K sorter, which uses a heap to avoid
// storing more rows than necessary.
opName := opNamePrefix + "topk-sort"
var topKSorterMemAccount *mon.BoundAccount
topKSorterMemAccount, sorterMemMonitorName = args.MonitorRegistry.CreateMemAccountForSpillStrategyWithLimit(
ctx, flowCtx, spoolMemLimit, opNamePrefix+"topk-sort", processorID,
ctx, flowCtx, spoolMemLimit, opName, processorID,
)
unlimitedMemAcc := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, opName, processorID,
)
inMemorySorter = colexec.NewTopKSorter(
colmem.NewAllocator(ctx, topKSorterMemAccount, factory), input,
colmem.NewLimitedAllocator(ctx, topKSorterMemAccount, unlimitedMemAcc, factory), input,
inputTypes, ordering.Columns, int(matchLen), uint64(limit), maxOutputBatchMemSize,
)
} else if matchLen > 0 {
Expand All @@ -383,19 +387,27 @@ func (r opResult) createDiskBackedSort(
sortChunksMemAccount, sorterMemMonitorName = args.MonitorRegistry.CreateMemAccountForSpillStrategyWithLimit(
ctx, flowCtx, spoolMemLimit, opName, processorID,
)
unlimitedMemAcc := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, opName, processorID,
)
inMemorySorter = colexec.NewSortChunks(
deselectorUnlimitedAllocator, colmem.NewAllocator(ctx, sortChunksMemAccount, factory),
deselectorUnlimitedAllocator,
colmem.NewLimitedAllocator(ctx, sortChunksMemAccount, unlimitedMemAcc, factory),
input, inputTypes, ordering.Columns, int(matchLen), maxOutputBatchMemSize,
)
} else {
// No optimizations possible. Default to the standard sort operator.
var sorterMemAccount *mon.BoundAccount
opName := opNamePrefix + "sort-all"
sorterMemAccount, sorterMemMonitorName = args.MonitorRegistry.CreateMemAccountForSpillStrategyWithLimit(
ctx, flowCtx, spoolMemLimit, opNamePrefix+"sort-all", processorID,
ctx, flowCtx, spoolMemLimit, opName, processorID,
)
unlimitedMemAcc := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, opName, processorID,
)
inMemorySorter = colexec.NewSorter(
colmem.NewAllocator(ctx, sorterMemAccount, factory), input,
inputTypes, ordering.Columns, maxOutputBatchMemSize,
colmem.NewLimitedAllocator(ctx, sorterMemAccount, unlimitedMemAcc, factory),
input, inputTypes, ordering.Columns, maxOutputBatchMemSize,
)
}
if args.TestingKnobs.DiskSpillingDisabled {
Expand Down Expand Up @@ -930,9 +942,15 @@ func NewColOperator(
spillingQueueMemAccount := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, spillingQueueMemMonitorName, spec.ProcessorID,
)
newAggArgs.Allocator = colmem.NewAllocator(ctx, hashAggregatorMemAccount, factory)
hashAggUnlimitedAcc := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, opName, spec.ProcessorID,
)
newAggArgs.Allocator = colmem.NewLimitedAllocator(ctx, hashAggregatorMemAccount, hashAggUnlimitedAcc, factory)
newAggArgs.MemAccount = hashAggregatorMemAccount
hashTableAllocator := colmem.NewAllocator(ctx, hashTableMemAccount, factory)
hashTableUnlimitedAcc := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, opName, spec.ProcessorID,
)
hashTableAllocator := colmem.NewLimitedAllocator(ctx, hashTableMemAccount, hashTableUnlimitedAcc, factory)
inMemoryHashAggregator := colexec.NewHashAggregator(
newAggArgs,
&colexecutils.NewSpillingQueueArgs{
Expand Down Expand Up @@ -1024,7 +1042,10 @@ func NewColOperator(
// ordered distinct, and we should plan it when we have
// non-empty ordered columns and we think that the probability
// of distinct tuples in the input is about 0.01 or less.
allocator := colmem.NewAllocator(ctx, distinctMemAccount, factory)
unlimitedAcc := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, "distinct" /* opName */, spec.ProcessorID,
)
allocator := colmem.NewLimitedAllocator(ctx, distinctMemAccount, unlimitedAcc, factory)
inMemoryUnorderedDistinct := colexec.NewUnorderedDistinct(
allocator, inputs[0].Root, core.Distinct.DistinctColumns, result.ColumnTypes,
core.Distinct.NullsAreDistinct, core.Distinct.ErrorOnDup,
Expand Down Expand Up @@ -1111,8 +1132,11 @@ func NewColOperator(
core.HashJoiner.RightEqColumnsAreKey,
)

hashJoinerUnlimitedAcc := args.MonitorRegistry.CreateUnlimitedMemAccount(
ctx, flowCtx, opName, spec.ProcessorID,
)
inMemoryHashJoiner := colexecjoin.NewHashJoiner(
colmem.NewAllocator(ctx, hashJoinerMemAccount, factory),
colmem.NewLimitedAllocator(ctx, hashJoinerMemAccount, hashJoinerUnlimitedAcc, factory),
hashJoinerUnlimitedAllocator, hjSpec, inputs[0].Root, inputs[1].Root,
colexecjoin.HashJoinerInitialNumBuckets,
)
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/colexec/colexecagg/any_not_null_agg_tmpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func (a *anyNotNull_TYPE_AGGKINDAgg) Compute(
)
execgen.SETVARIABLESIZE(newCurAggSize, a.curAgg)
if newCurAggSize != oldCurAggSize {
a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize))
a.allocator.AdjustMemoryUsageAfterAllocation(int64(newCurAggSize - oldCurAggSize))
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/colexec/colexecagg/avg_agg_tmpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ func (a *avg_TYPE_AGGKINDAgg) Compute(
// {{end}}
execgen.SETVARIABLESIZE(newCurSumSize, a.curSum)
if newCurSumSize != oldCurSumSize {
a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize))
a.allocator.AdjustMemoryUsageAfterAllocation(int64(newCurSumSize - oldCurSumSize))
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/colexec/colexecagg/bool_and_or_agg_tmpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ func (a *bool_OP_TYPE_AGGKINDAgg) Compute(
// {{end}}
execgen.SETVARIABLESIZE(newCurAggSize, a.curAgg)
if newCurAggSize != oldCurAggSize {
a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize))
a.allocator.AdjustMemoryUsageAfterAllocation(int64(newCurAggSize - oldCurAggSize))
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/colexec/colexecagg/concat_agg_tmpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (a *concat_AGGKINDAgg) Compute(
// {{end}}
execgen.SETVARIABLESIZE(newCurAggSize, a.curAgg)
if newCurAggSize != oldCurAggSize {
a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize))
a.allocator.AdjustMemoryUsageAfterAllocation(int64(newCurAggSize - oldCurAggSize))
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/colexec/colexecagg/default_agg_tmpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func (a *default_AGGKINDAggAlloc) newAggFunc() AggregateFunc {
}
f.allocator = a.allocator
f.scratch.otherArgs = a.otherArgsScratch
a.allocator.AdjustMemoryUsage(f.fn.Size())
a.allocator.AdjustMemoryUsageAfterAllocation(f.fn.Size())
a.aggFuncs = a.aggFuncs[1:]
a.returnedFns = append(a.returnedFns, f)
return f
Expand Down
22 changes: 11 additions & 11 deletions pkg/sql/colexec/colexecagg/hash_any_not_null_agg.eg.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 6 additions & 6 deletions pkg/sql/colexec/colexecagg/hash_avg_agg.eg.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions pkg/sql/colexec/colexecagg/hash_bool_and_or_agg.eg.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pkg/sql/colexec/colexecagg/hash_concat_agg.eg.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pkg/sql/colexec/colexecagg/hash_default_agg.eg.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading