From cf682580c9c5db49cce6ef7f21ec5f1d6b54f90b Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Fri, 29 Apr 2022 12:57:56 -0700 Subject: [PATCH 1/6] colmem: improve the behavior of ResetMaybeReallocate Previously, `Allocator.ResetMaybeReallocate` would always grow the capacity of the batch until `coldata.BatchSize()` (unless the memory limit has been exceeded). This behavior allows us to have batches with dynamic size when we don't know how many rows we need to process (for example, in the `cFetcher` we start out with 1 row, then grow it exponentially - 2, 4, 8, etc). However, in some cases we know exactly how many rows we want to include into the batch, so that behavior can result in us re-allocating a batch needlessly when the old batch already have enough capacity. This commit improves the situation by adding a knob to indicate that if the desired capacity is satisfied by the old batch, then it should not be re-allocated. All callers have been audited accordingly. Release note: None --- pkg/sql/colexec/colexecjoin/crossjoiner.go | 1 + pkg/sql/colexec/colexecjoin/hashjoiner.go | 1 + .../colexecjoin/mergejoiner_exceptall.eg.go | 1 + .../colexecjoin/mergejoiner_fullouter.eg.go | 1 + .../colexecjoin/mergejoiner_inner.eg.go | 1 + .../mergejoiner_intersectall.eg.go | 1 + .../colexecjoin/mergejoiner_leftanti.eg.go | 1 + .../colexecjoin/mergejoiner_leftouter.eg.go | 1 + .../colexecjoin/mergejoiner_leftsemi.eg.go | 1 + .../colexecjoin/mergejoiner_rightanti.eg.go | 1 + .../colexecjoin/mergejoiner_rightouter.eg.go | 1 + .../colexecjoin/mergejoiner_rightsemi.eg.go | 1 + .../colexec/colexecjoin/mergejoiner_tmpl.go | 1 + pkg/sql/colexec/colexecutils/deselector.go | 1 + .../colexec/colexecutils/spilling_queue.go | 2 ++ .../colexec/colexecwindow/buffered_window.go | 1 + pkg/sql/colexec/columnarizer.go | 3 ++- pkg/sql/colexec/hash_aggregator.eg.go | 2 ++ pkg/sql/colexec/hash_aggregator_tmpl.go | 1 + pkg/sql/colexec/ordered_aggregator.go | 6 +++-- pkg/sql/colexec/ordered_synchronizer.eg.go | 1 + pkg/sql/colexec/ordered_synchronizer_tmpl.go | 1 + pkg/sql/colexec/sort.go | 5 +++- pkg/sql/colexec/sorttopk.go | 5 +++- pkg/sql/colfetcher/cfetcher.go | 1 + pkg/sql/colflow/colrpc/inbox.go | 5 +++- pkg/sql/colmem/allocator.go | 24 +++++++++++++++---- pkg/sql/colmem/allocator_test.go | 18 +++++++++----- 28 files changed, 73 insertions(+), 16 deletions(-) diff --git a/pkg/sql/colexec/colexecjoin/crossjoiner.go b/pkg/sql/colexec/colexecjoin/crossjoiner.go index c44084571cff..bb2c877e91b2 100644 --- a/pkg/sql/colexec/colexecjoin/crossjoiner.go +++ b/pkg/sql/colexec/colexecjoin/crossjoiner.go @@ -107,6 +107,7 @@ func (c *crossJoiner) Next() coldata.Batch { } c.output, _ = c.unlimitedAllocator.ResetMaybeReallocate( c.outputTypes, c.output, willEmit, c.maxOutputBatchMemSize, + true, /* desiredCapacitySufficient */ ) if willEmit > c.output.Capacity() { willEmit = c.output.Capacity() diff --git a/pkg/sql/colexec/colexecjoin/hashjoiner.go b/pkg/sql/colexec/colexecjoin/hashjoiner.go index eed7a551ff7b..4c76e13a0639 100644 --- a/pkg/sql/colexec/colexecjoin/hashjoiner.go +++ b/pkg/sql/colexec/colexecjoin/hashjoiner.go @@ -725,6 +725,7 @@ func (hj *hashJoiner) resetOutput(nResults int) { const maxOutputBatchMemSize = math.MaxInt64 hj.output, _ = hj.outputUnlimitedAllocator.ResetMaybeReallocate( hj.outputTypes, hj.output, nResults, maxOutputBatchMemSize, + true, /* desiredCapacitySufficient */ ) } diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go index b3eeb7007a04..785f2eb886db 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go @@ -14219,6 +14219,7 @@ func (o *mergeJoinExceptAllOp) buildFromBufferedGroup() (bufferedGroupComplete b func (o *mergeJoinExceptAllOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go index a4b80dc6dc37..e36363718a5c 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go @@ -15366,6 +15366,7 @@ func (o *mergeJoinFullOuterOp) buildFromBufferedGroup() (bufferedGroupComplete b func (o *mergeJoinFullOuterOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go index e6800eb3cead..67b4da51c57b 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go @@ -10900,6 +10900,7 @@ func (o *mergeJoinInnerOp) buildFromBufferedGroup() (bufferedGroupComplete bool) func (o *mergeJoinInnerOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go index 759f3c619d42..081238597e5c 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go @@ -11610,6 +11610,7 @@ func (o *mergeJoinIntersectAllOp) buildFromBufferedGroup() (bufferedGroupComplet func (o *mergeJoinIntersectAllOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go index 09cb19892d15..98699d35cd34 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go @@ -13129,6 +13129,7 @@ func (o *mergeJoinLeftAntiOp) buildFromBufferedGroup() (bufferedGroupComplete bo func (o *mergeJoinLeftAntiOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go index ba18ed6e7bc1..71f708345c82 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go @@ -13156,6 +13156,7 @@ func (o *mergeJoinLeftOuterOp) buildFromBufferedGroup() (bufferedGroupComplete b func (o *mergeJoinLeftOuterOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go index 7b0f7d529e0b..c770e70e7b9b 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go @@ -10853,6 +10853,7 @@ func (o *mergeJoinLeftSemiOp) buildFromBufferedGroup() (bufferedGroupComplete bo func (o *mergeJoinLeftSemiOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go index ba4c19f044f1..eec58431c897 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go @@ -13060,6 +13060,7 @@ func (o *mergeJoinRightAntiOp) buildFromBufferedGroup() (bufferedGroupComplete b func (o *mergeJoinRightAntiOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go index f628e0495d35..54722dc88882 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go @@ -13110,6 +13110,7 @@ func (o *mergeJoinRightOuterOp) buildFromBufferedGroup() (bufferedGroupComplete func (o *mergeJoinRightOuterOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go index bccc21907e56..046f95311dee 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go @@ -10813,6 +10813,7 @@ func (o *mergeJoinRightSemiOp) buildFromBufferedGroup() (bufferedGroupComplete b func (o *mergeJoinRightSemiOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go b/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go index 97beb827907a..af606bb76ed2 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go @@ -1355,6 +1355,7 @@ func _SOURCE_FINISHED_SWITCH(_JOIN_TYPE joinTypeInfo) { // */}} func (o *mergeJoin_JOIN_TYPE_STRINGOp) Next() coldata.Batch { o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate( o.outputTypes, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) o.outputCapacity = o.output.Capacity() o.bufferedGroup.helper.output = o.output diff --git a/pkg/sql/colexec/colexecutils/deselector.go b/pkg/sql/colexec/colexecutils/deselector.go index 9d3e87e3ef41..1c8fdbd8a379 100644 --- a/pkg/sql/colexec/colexecutils/deselector.go +++ b/pkg/sql/colexec/colexecutils/deselector.go @@ -62,6 +62,7 @@ func (p *deselectorOp) Next() coldata.Batch { const maxBatchMemSize = math.MaxInt64 p.output, _ = p.unlimitedAllocator.ResetMaybeReallocate( p.inputTypes, p.output, batch.Length(), maxBatchMemSize, + true, /* desiredCapacitySufficient */ ) sel := batch.Selection() p.unlimitedAllocator.PerformOperation(p.output.ColVecs(), func() { diff --git a/pkg/sql/colexec/colexecutils/spilling_queue.go b/pkg/sql/colexec/colexecutils/spilling_queue.go index 193a5fb2d783..6dae386d0d05 100644 --- a/pkg/sql/colexec/colexecutils/spilling_queue.go +++ b/pkg/sql/colexec/colexecutils/spilling_queue.go @@ -193,6 +193,7 @@ func (q *SpillingQueue) Enqueue(ctx context.Context, batch coldata.Batch) { const maxBatchMemSize = math.MaxInt64 q.diskQueueDeselectionScratch, _ = q.unlimitedAllocator.ResetMaybeReallocate( q.typs, q.diskQueueDeselectionScratch, n, maxBatchMemSize, + true, /* desiredCapacitySufficient */ ) q.unlimitedAllocator.PerformOperation(q.diskQueueDeselectionScratch.ColVecs(), func() { for i := range q.typs { @@ -295,6 +296,7 @@ func (q *SpillingQueue) Enqueue(ctx context.Context, batch coldata.Batch) { // attention to the memory registered with the unlimited allocator, and // we will stop adding tuples into this batch and spill when needed. math.MaxInt64, /* maxBatchMemSize */ + true, /* desiredCapacitySufficient */ ) q.unlimitedAllocator.PerformOperation(newBatch.ColVecs(), func() { for i := range q.typs { diff --git a/pkg/sql/colexec/colexecwindow/buffered_window.go b/pkg/sql/colexec/colexecwindow/buffered_window.go index a5f1cf83c27d..d5425789eca7 100644 --- a/pkg/sql/colexec/colexecwindow/buffered_window.go +++ b/pkg/sql/colexec/colexecwindow/buffered_window.go @@ -251,6 +251,7 @@ func (b *bufferedWindowOp) Next() coldata.Batch { const maxBatchMemSize = math.MaxInt64 b.currentBatch, _ = b.allocator.ResetMaybeReallocate( b.outputTypes, b.currentBatch, batch.Length(), maxBatchMemSize, + true, /* desiredCapacitySufficient */ ) b.allocator.PerformOperation(b.currentBatch.ColVecs(), func() { for colIdx, vec := range batch.ColVecs() { diff --git a/pkg/sql/colexec/columnarizer.go b/pkg/sql/colexec/columnarizer.go index 9029e1dd61fa..ef9bf388ab2e 100644 --- a/pkg/sql/colexec/columnarizer.go +++ b/pkg/sql/colexec/columnarizer.go @@ -198,7 +198,8 @@ func (c *Columnarizer) Next() coldata.Batch { switch c.mode { case columnarizerBufferingMode: c.batch, reallocated = c.allocator.ResetMaybeReallocate( - c.typs, c.batch, 1 /* minDesiredCapacity */, c.maxBatchMemSize, + c.typs, c.batch, 1, /* minDesiredCapacity */ + c.maxBatchMemSize, false, /* desiredCapacitySufficient */ ) case columnarizerStreamingMode: // Note that we're not using ResetMaybeReallocate because we will diff --git a/pkg/sql/colexec/hash_aggregator.eg.go b/pkg/sql/colexec/hash_aggregator.eg.go index 83530f9519fe..40dc0d6fca90 100644 --- a/pkg/sql/colexec/hash_aggregator.eg.go +++ b/pkg/sql/colexec/hash_aggregator.eg.go @@ -459,6 +459,7 @@ func getNext_true(op *hashAggregator) coldata.Batch { // len(op.buckets) capacity. op.output, _ = op.accountingHelper.ResetMaybeReallocate( op.outputTypes, op.output, len(op.buckets), op.maxOutputBatchMemSize, + true, /* desiredCapacitySufficient */ ) curOutputIdx := 0 for curOutputIdx < op.output.Capacity() && @@ -606,6 +607,7 @@ func getNext_false(op *hashAggregator) coldata.Batch { // len(op.buckets) capacity. op.output, _ = op.accountingHelper.ResetMaybeReallocate( op.outputTypes, op.output, len(op.buckets), op.maxOutputBatchMemSize, + true, /* desiredCapacitySufficient */ ) curOutputIdx := 0 for curOutputIdx < op.output.Capacity() && diff --git a/pkg/sql/colexec/hash_aggregator_tmpl.go b/pkg/sql/colexec/hash_aggregator_tmpl.go index f9c86e41e246..02b44dab0976 100644 --- a/pkg/sql/colexec/hash_aggregator_tmpl.go +++ b/pkg/sql/colexec/hash_aggregator_tmpl.go @@ -345,6 +345,7 @@ func getNext(op *hashAggregator, partialOrder bool) coldata.Batch { // len(op.buckets) capacity. op.output, _ = op.accountingHelper.ResetMaybeReallocate( op.outputTypes, op.output, len(op.buckets), op.maxOutputBatchMemSize, + true, /* desiredCapacitySufficient */ ) curOutputIdx := 0 for curOutputIdx < op.output.Capacity() && diff --git a/pkg/sql/colexec/ordered_aggregator.go b/pkg/sql/colexec/ordered_aggregator.go index c99d9a986060..56cc0024288e 100644 --- a/pkg/sql/colexec/ordered_aggregator.go +++ b/pkg/sql/colexec/ordered_aggregator.go @@ -285,14 +285,16 @@ func (a *orderedAggregator) Next() coldata.Batch { a.scratch.Batch = a.allocator.NewMemBatchWithFixedCapacity(a.outputTypes, newMinCapacity) } else { a.scratch.Batch, _ = a.allocator.ResetMaybeReallocate( - a.outputTypes, a.scratch.Batch, newMinCapacity, maxBatchMemSize, + a.outputTypes, a.scratch.Batch, newMinCapacity, + maxBatchMemSize, true, /* desiredCapacitySufficient */ ) } // We will never copy more than coldata.BatchSize() into the // temporary buffer, so a half of the scratch's capacity will always // be sufficient. a.scratch.tempBuffer, _ = a.allocator.ResetMaybeReallocate( - a.outputTypes, a.scratch.tempBuffer, newMinCapacity/2, maxBatchMemSize, + a.outputTypes, a.scratch.tempBuffer, newMinCapacity/2, + maxBatchMemSize, true, /* desiredCapacitySufficient */ ) for fnIdx, fn := range a.bucket.fns { fn.SetOutput(a.scratch.ColVec(fnIdx)) diff --git a/pkg/sql/colexec/ordered_synchronizer.eg.go b/pkg/sql/colexec/ordered_synchronizer.eg.go index 0aa24eb8ccaa..041c58dd1c12 100644 --- a/pkg/sql/colexec/ordered_synchronizer.eg.go +++ b/pkg/sql/colexec/ordered_synchronizer.eg.go @@ -270,6 +270,7 @@ func (o *OrderedSynchronizer) resetOutput() { var reallocated bool o.output, reallocated = o.accountingHelper.ResetMaybeReallocate( o.typs, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) if reallocated { o.outVecs.SetBatch(o.output) diff --git a/pkg/sql/colexec/ordered_synchronizer_tmpl.go b/pkg/sql/colexec/ordered_synchronizer_tmpl.go index f68da4c57c1b..c11e831eef24 100644 --- a/pkg/sql/colexec/ordered_synchronizer_tmpl.go +++ b/pkg/sql/colexec/ordered_synchronizer_tmpl.go @@ -220,6 +220,7 @@ func (o *OrderedSynchronizer) resetOutput() { var reallocated bool o.output, reallocated = o.accountingHelper.ResetMaybeReallocate( o.typs, o.output, 1 /* minDesiredCapacity */, o.memoryLimit, + false, /* desiredCapacitySufficient */ ) if reallocated { o.outVecs.SetBatch(o.output) diff --git a/pkg/sql/colexec/sort.go b/pkg/sql/colexec/sort.go index 37ff8570c685..b2c10115fdda 100644 --- a/pkg/sql/colexec/sort.go +++ b/pkg/sql/colexec/sort.go @@ -287,7 +287,10 @@ func (p *sortOp) Next() coldata.Batch { p.state = sortDone continue } - p.output, _ = p.allocator.ResetMaybeReallocate(p.inputTypes, p.output, toEmit, p.maxOutputBatchMemSize) + p.output, _ = p.allocator.ResetMaybeReallocate( + p.inputTypes, p.output, toEmit, p.maxOutputBatchMemSize, + true, /* desiredCapacitySufficient */ + ) if toEmit > p.output.Capacity() { toEmit = p.output.Capacity() } diff --git a/pkg/sql/colexec/sorttopk.go b/pkg/sql/colexec/sorttopk.go index 96b69f1af7eb..7bc269da030f 100644 --- a/pkg/sql/colexec/sorttopk.go +++ b/pkg/sql/colexec/sorttopk.go @@ -200,7 +200,10 @@ func (t *topKSorter) emit() coldata.Batch { // We're done. return coldata.ZeroBatch } - t.output, _ = t.allocator.ResetMaybeReallocate(t.inputTypes, t.output, toEmit, t.maxOutputBatchMemSize) + t.output, _ = t.allocator.ResetMaybeReallocate( + t.inputTypes, t.output, toEmit, t.maxOutputBatchMemSize, + true, /* desiredCapacitySufficient */ + ) if toEmit > t.output.Capacity() { toEmit = t.output.Capacity() } diff --git a/pkg/sql/colfetcher/cfetcher.go b/pkg/sql/colfetcher/cfetcher.go index e50731f36424..0ffa7619ae8b 100644 --- a/pkg/sql/colfetcher/cfetcher.go +++ b/pkg/sql/colfetcher/cfetcher.go @@ -325,6 +325,7 @@ func (cf *cFetcher) resetBatch() { } cf.machine.batch, reallocated = cf.accountingHelper.ResetMaybeReallocate( cf.table.typs, cf.machine.batch, minDesiredCapacity, cf.memoryLimit, + false, /* desiredCapacitySufficient */ ) if reallocated { cf.machine.colvecs.SetBatch(cf.machine.batch) diff --git a/pkg/sql/colflow/colrpc/inbox.go b/pkg/sql/colflow/colrpc/inbox.go index db642a1e19cc..67d7ea657a52 100644 --- a/pkg/sql/colflow/colrpc/inbox.go +++ b/pkg/sql/colflow/colrpc/inbox.go @@ -429,7 +429,10 @@ func (i *Inbox) Next() coldata.Batch { } // We rely on the outboxes to produce reasonably sized batches. const maxBatchMemSize = math.MaxInt64 - i.scratch.b, _ = i.allocator.ResetMaybeReallocate(i.typs, i.scratch.b, batchLength, maxBatchMemSize) + i.scratch.b, _ = i.allocator.ResetMaybeReallocate( + i.typs, i.scratch.b, batchLength, maxBatchMemSize, + true, /* desiredCapacitySufficient */ + ) i.allocator.PerformOperation(i.scratch.b.ColVecs(), func() { if err := i.converter.ArrowToBatch(i.scratch.data, batchLength, i.scratch.b); err != nil { colexecerror.InternalError(err) diff --git a/pkg/sql/colmem/allocator.go b/pkg/sql/colmem/allocator.go index e00867a0ed2e..3f4316a56248 100644 --- a/pkg/sql/colmem/allocator.go +++ b/pkg/sql/colmem/allocator.go @@ -156,7 +156,10 @@ func (a *Allocator) NewMemBatchNoCols(typs []*types.T, capacity int) coldata.Bat // // The method will grow the allocated capacity of the batch exponentially // (possibly incurring a reallocation), until the batch reaches -// coldata.BatchSize() in capacity or maxBatchMemSize in the memory footprint. +// coldata.BatchSize() in capacity or maxBatchMemSize in the memory footprint if +// desiredCapacitySufficient is false. When that parameter is true and the +// capacity of old batch is at least minDesiredCapacity, then the old batch is +// reused. // // NOTE: if the reallocation occurs, then the memory under the old batch is // released, so it is expected that the caller will lose the references to the @@ -164,7 +167,11 @@ func (a *Allocator) NewMemBatchNoCols(typs []*types.T, capacity int) coldata.Bat // Note: the method assumes that minDesiredCapacity is at least 0 and will clamp // minDesiredCapacity to be between 1 and coldata.BatchSize() inclusive. func (a *Allocator) ResetMaybeReallocate( - typs []*types.T, oldBatch coldata.Batch, minDesiredCapacity int, maxBatchMemSize int64, + typs []*types.T, + oldBatch coldata.Batch, + minDesiredCapacity int, + maxBatchMemSize int64, + desiredCapacitySufficient bool, ) (newBatch coldata.Batch, reallocated bool) { if minDesiredCapacity < 0 { colexecerror.InternalError(errors.AssertionFailedf("invalid minDesiredCapacity %d", minDesiredCapacity)) @@ -179,6 +186,11 @@ func (a *Allocator) ResetMaybeReallocate( } else { // If old batch is already of the largest capacity, we will reuse it. useOldBatch := oldBatch.Capacity() == coldata.BatchSize() + // If the old batch already satisfies the desired capacity which is + // sufficient, we will reuse it too. + if desiredCapacitySufficient && oldBatch.Capacity() >= minDesiredCapacity { + useOldBatch = true + } // Avoid calculating the memory footprint if possible. var oldBatchMemSize int64 if !useOldBatch { @@ -568,10 +580,14 @@ func (h *SetAccountingHelper) getBytesLikeTotalSize() int64 { // Allocator.ResetMaybeReallocate (and thus has the same contract) with an // additional logic for memory tracking purposes. func (h *SetAccountingHelper) ResetMaybeReallocate( - typs []*types.T, oldBatch coldata.Batch, minCapacity int, maxBatchMemSize int64, + typs []*types.T, + oldBatch coldata.Batch, + minCapacity int, + maxBatchMemSize int64, + desiredCapacitySufficient bool, ) (newBatch coldata.Batch, reallocated bool) { newBatch, reallocated = h.Allocator.ResetMaybeReallocate( - typs, oldBatch, minCapacity, maxBatchMemSize, + typs, oldBatch, minCapacity, maxBatchMemSize, desiredCapacitySufficient, ) if reallocated && !h.allFixedLength { // Allocator.ResetMaybeReallocate has released the precise memory diff --git a/pkg/sql/colmem/allocator_test.go b/pkg/sql/colmem/allocator_test.go index ed9cb524ac9f..ed819efd2c6c 100644 --- a/pkg/sql/colmem/allocator_test.go +++ b/pkg/sql/colmem/allocator_test.go @@ -119,13 +119,13 @@ func TestResetMaybeReallocate(t *testing.T) { typs := []*types.T{types.Bytes} // Allocate a new batch and modify it. - b, _ = testAllocator.ResetMaybeReallocate(typs, b, coldata.BatchSize(), math.MaxInt64) + b, _ = testAllocator.ResetMaybeReallocate(typs, b, coldata.BatchSize(), math.MaxInt64, false /* desiredCapacitySufficient */) b.SetSelection(true) b.Selection()[0] = 1 b.ColVec(0).Bytes().Set(1, []byte("foo")) oldBatch := b - b, _ = testAllocator.ResetMaybeReallocate(typs, b, coldata.BatchSize(), math.MaxInt64) + b, _ = testAllocator.ResetMaybeReallocate(typs, b, coldata.BatchSize(), math.MaxInt64, false /* desiredCapacitySufficient */) // We should have used the same batch, and now it should be in a "reset" // state. require.Equal(t, oldBatch, b) @@ -152,15 +152,21 @@ func TestResetMaybeReallocate(t *testing.T) { // Allocate a new batch attempting to use the batch with too small of a // capacity - new batch should **not** be allocated because the memory // limit is already exceeded. - b, _ = testAllocator.ResetMaybeReallocate(typs, smallBatch, minDesiredCapacity, smallMemSize) + b, _ = testAllocator.ResetMaybeReallocate(typs, smallBatch, minDesiredCapacity, smallMemSize, false /* desiredCapacitySufficient */) require.Equal(t, smallBatch, b) require.Equal(t, minDesiredCapacity/2, b.Capacity()) oldBatch := b + // Reset the batch asking for the same small desired capacity when it is + // sufficient - the same batch should be returned. + b, _ = testAllocator.ResetMaybeReallocate(typs, b, minDesiredCapacity/2, smallMemSize, true /* desiredCapacitySufficient */) + require.Equal(t, smallBatch, b) + require.Equal(t, minDesiredCapacity/2, b.Capacity()) + // Reset the batch and confirm that a new batch is allocated because we // have given larger memory limit. - b, _ = testAllocator.ResetMaybeReallocate(typs, b, minDesiredCapacity, largeMemSize) + b, _ = testAllocator.ResetMaybeReallocate(typs, b, minDesiredCapacity, largeMemSize, false /* desiredCapacitySufficient */) require.NotEqual(t, oldBatch, b) require.Equal(t, minDesiredCapacity, b.Capacity()) @@ -171,7 +177,7 @@ func TestResetMaybeReallocate(t *testing.T) { // ResetMaybeReallocate truncates the capacity at // coldata.BatchSize(), so we run this part of the test only when // doubled capacity will not be truncated. - b, _ = testAllocator.ResetMaybeReallocate(typs, b, minDesiredCapacity, largeMemSize) + b, _ = testAllocator.ResetMaybeReallocate(typs, b, minDesiredCapacity, largeMemSize, false /* desiredCapacitySufficient */) require.NotEqual(t, oldBatch, b) require.Equal(t, 2*minDesiredCapacity, b.Capacity()) } @@ -300,7 +306,7 @@ func TestSetAccountingHelper(t *testing.T) { // new batch with larger capacity might be allocated. maxBatchMemSize = largeMemSize } - batch, _ = helper.ResetMaybeReallocate(typs, batch, numRows, maxBatchMemSize) + batch, _ = helper.ResetMaybeReallocate(typs, batch, numRows, maxBatchMemSize, false /* desiredCapacitySufficient */) for rowIdx := 0; rowIdx < batch.Capacity(); rowIdx++ { for vecIdx, typ := range typs { From 5d09b054b0c76ba72232c4acd7014474ac15367c Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Thu, 19 May 2022 15:09:11 -0400 Subject: [PATCH 2/6] roachtest: fix pgjdbc test More tests pass now that we support ALTER SEQUENCE ... RESTART Release note: None --- pkg/cmd/roachtest/tests/pgjdbc_blocklist.go | 27 --------------------- 1 file changed, 27 deletions(-) diff --git a/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go b/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go index 831bab33587d..1bf87f7b58d9 100644 --- a/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go +++ b/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go @@ -542,35 +542,8 @@ var pgjdbcBlockList22_1 = blocklist{ "org.postgresql.test.jdbc2.TimezoneTest.testSetDate": "41776", "org.postgresql.test.jdbc2.TimezoneTest.testSetTime": "41776", "org.postgresql.test.jdbc2.TimezoneTest.testSetTimestamp": "41776", - "org.postgresql.test.jdbc2.UpdateableResultTest.simpleAndUpdateableSameQuery": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.test2193": "unknown", "org.postgresql.test.jdbc2.UpdateableResultTest.testArray": "26925", - "org.postgresql.test.jdbc2.UpdateableResultTest.testBadColumnIndexes": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testCancelRowUpdates": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testDeleteRows": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testInsertRowIllegalMethods": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testMultiColumnUpdate": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testMultiColumnUpdateWithoutAllColumns": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testMultiColumnUpdateWithoutPrimaryKey": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testNoUniqueNotUpdateable": "unknown", "org.postgresql.test.jdbc2.UpdateableResultTest.testOidUpdatable": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testPositioning": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testPrimaryAndUniqueUpdateableByPrimary": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testPrimaryAndUniqueUpdateableByUnique": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testReturnSerial": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUniqueWithNotNullableColumnUpdateable": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUniqueWithNullAndNotNullableColumnUpdateable": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUniqueWithNullableColumnNotUpdateable": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUniqueWithNullableColumnsNotUpdatable": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateBoolean": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateDate": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateReadOnlyResultSet": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateSelectOnly": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateStreams": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateTimestamp": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateable": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testUpdateablePreparedStatement": "unknown", - "org.postgresql.test.jdbc2.UpdateableResultTest.testZeroRowResult": "unknown", "org.postgresql.test.jdbc3.CompositeTest.testComplexArgumentSelect": "27793", "org.postgresql.test.jdbc3.CompositeTest.testComplexSelect": "27793", "org.postgresql.test.jdbc3.CompositeTest.testComplexTableNameMetadata": "27793", From 7201cc9fd869fa9555f1ba7a3cfe2ed9e905a0c3 Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Thu, 19 May 2022 15:47:09 -0400 Subject: [PATCH 3/6] roachtest: tolerate some transaction retry errors in typeorm Release note: None --- pkg/cmd/roachtest/tests/typeorm.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/roachtest/tests/typeorm.go b/pkg/cmd/roachtest/tests/typeorm.go index 46375ec65c6a..29d62388b591 100644 --- a/pkg/cmd/roachtest/tests/typeorm.go +++ b/pkg/cmd/roachtest/tests/typeorm.go @@ -166,8 +166,10 @@ func registerTypeORM(r registry.Registry) { rawResults := result.Stdout + result.Stderr t.L().Printf("Test Results: %s", rawResults) if err != nil { - if strings.Contains(rawResults, "1 failing") && - strings.Contains(rawResults, "Error: Cannot find connection better-sqlite3 because its not defined in any orm configuration files.") { + txnRetryErrCount := strings.Count(rawResults, "restart transaction") + if strings.Contains(rawResults, "1 failing") && txnRetryErrCount == 1 { + err = nil + } else if strings.Contains(rawResults, "2 failing") && txnRetryErrCount == 2 { err = nil } if err != nil { From 1cc25963812092dd2ebc6b102b7603dfa972d311 Mon Sep 17 00:00:00 2001 From: Jackson Owens Date: Tue, 17 May 2022 13:40:54 -0400 Subject: [PATCH 4/6] vendor: bump Pebble to e567fec84c6e This commit includes non-trivial changes to account for the change in the `vfs.WithDiskHealthChecks` function signature change. ``` e567fec8 db: ensure Open closes opened directories on error b8c9a560 internal/metamorphic: overwrite unused bounds buffers 7c5f0cbb db: copy user-provided bounds and optimize unchanged bounds f8897076 *: Add IterOption to optionally read L6 filter blocks. d79f9617 vfs: extend disk-health checking to filesystem metadata operations 5ae21746 db: remove newRangeKeyIter closure 6d975489 db: add BenchmarkIteratorScan 782d102e sstable: fix invariant check for sstable size estimation 738a7f0b db: fix NewIter regression resulting in extra memtable levels 37558663 *: Use keyspan.LevelIter for rangedels in compactions e6c60c71 db: use sublevel level iters for all compactions out of L0 d8f63bef db: extend documentation on ingested file overlap; improve test cases b9e970a8 internal/keyspan: correct and document MergingIter key stability 498177bb internal/keyspan: collapse fragmentBoundIterator into MergingIter ``` Release note (bug fix): Fix a gap in disk-stall detection. Previously, disk stalls during filesystem metadata operations could go undetected, inducing deadlocks. Now stalls during these types of operations will correctly fatal the process. --- DEPS.bzl | 6 +- build/bazelutil/distdir_files.bzl | 2 +- go.mod | 2 +- go.sum | 4 +- pkg/kv/kvserver/client_metrics_test.go | 10 ++-- pkg/server/config.go | 1 + pkg/storage/disk_map_test.go | 1 - pkg/storage/engine_test.go | 20 +++---- pkg/storage/open.go | 19 ++++--- pkg/storage/pebble.go | 49 +++++++++++++--- pkg/storage/temp_engine.go | 16 ++++-- pkg/testutils/colcontainerutils/BUILD.bazel | 1 - .../colcontainerutils/diskqueuecfg.go | 56 ++++++++++--------- vendor | 2 +- 14 files changed, 114 insertions(+), 75 deletions(-) diff --git a/DEPS.bzl b/DEPS.bzl index 77e1c9a60df8..ddb322c68332 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -1347,10 +1347,10 @@ def go_deps(): patches = [ "@com_github_cockroachdb_cockroach//build/patches:com_github_cockroachdb_pebble.patch", ], - sha256 = "dfa5ce136f7d8d40ddf24077323df27de81f0e1889c1058e4453c77cd8c2cb75", - strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20220426173801-b33d6e173cae", + sha256 = "65c359674e777445a63c2268e62d8fc740992c1aa86f042a07344371ba01e46b", + strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20220517003944-e567fec84c6e", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220426173801-b33d6e173cae.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220517003944-e567fec84c6e.zip", ], ) go_repository( diff --git a/build/bazelutil/distdir_files.bzl b/build/bazelutil/distdir_files.bzl index a695791587e9..60997e35127c 100644 --- a/build/bazelutil/distdir_files.bzl +++ b/build/bazelutil/distdir_files.bzl @@ -179,7 +179,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/go-test-teamcity/com_github_cockroachdb_go_test_teamcity-v0.0.0-20191211140407-cff980ad0a55.zip": "bac30148e525b79d004da84d16453ddd2d5cd20528e9187f1d7dac708335674b", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/gostdlib/com_github_cockroachdb_gostdlib-v1.13.0.zip": "b3d43d8f95edf65f73a5348f29e1159823cac64b148f8d3bb48340bf55d70872", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/logtags/com_github_cockroachdb_logtags-v0.0.0-20211118104740-dabe8e521a4f.zip": "1972c3f171f118add3fd9e64bcea6cbb9959a3b7fa0ada308e8a7310813fea74", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220426173801-b33d6e173cae.zip": "dfa5ce136f7d8d40ddf24077323df27de81f0e1889c1058e4453c77cd8c2cb75", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220517003944-e567fec84c6e.zip": "65c359674e777445a63c2268e62d8fc740992c1aa86f042a07344371ba01e46b", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/redact/com_github_cockroachdb_redact-v1.1.3.zip": "7778b1e4485e4f17f35e5e592d87eb99c29e173ac9507801d000ad76dd0c261e", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/returncheck/com_github_cockroachdb_returncheck-v0.0.0-20200612231554-92cdbca611dd.zip": "ce92ba4352deec995b1f2eecf16eba7f5d51f5aa245a1c362dfe24c83d31f82b", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/sentry-go/com_github_cockroachdb_sentry_go-v0.6.1-cockroachdb.2.zip": "fbb2207d02aecfdd411b1357efe1192dbb827959e36b7cab7491731ac55935c9", diff --git a/go.mod b/go.mod index f38b10b9db00..4a18cd6dc432 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/cockroachdb/go-test-teamcity v0.0.0-20191211140407-cff980ad0a55 github.com/cockroachdb/gostdlib v1.13.0 github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f - github.com/cockroachdb/pebble v0.0.0-20220426173801-b33d6e173cae + github.com/cockroachdb/pebble v0.0.0-20220517003944-e567fec84c6e github.com/cockroachdb/redact v1.1.3 github.com/cockroachdb/returncheck v0.0.0-20200612231554-92cdbca611dd github.com/cockroachdb/stress v0.0.0-20220310203902-58fb4627376e diff --git a/go.sum b/go.sum index 90fb6b96e6cb..7fdf03c60f06 100644 --- a/go.sum +++ b/go.sum @@ -453,8 +453,8 @@ github.com/cockroachdb/gostdlib v1.13.0/go.mod h1:eXX95p9QDrYwJfJ6AgeN9QnRa/lqqi github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f h1:6jduT9Hfc0njg5jJ1DdKCFPdMBrp/mdZfCpa5h+WM74= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20220426173801-b33d6e173cae h1:7lGpwt2wTBh7FApXTEdTDX5OFWsEg9CCe8wMlHJZDwA= -github.com/cockroachdb/pebble v0.0.0-20220426173801-b33d6e173cae/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU= +github.com/cockroachdb/pebble v0.0.0-20220517003944-e567fec84c6e h1:PU73bIcAcMerOI+xzYa4f3Grrd4I5cxO2ffT9+OcRt0= +github.com/cockroachdb/pebble v0.0.0-20220517003944-e567fec84c6e/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU= github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= diff --git a/pkg/kv/kvserver/client_metrics_test.go b/pkg/kv/kvserver/client_metrics_test.go index 28575385104d..4ba8a48b007a 100644 --- a/pkg/kv/kvserver/client_metrics_test.go +++ b/pkg/kv/kvserver/client_metrics_test.go @@ -132,7 +132,7 @@ func verifyStats(t *testing.T, tc *testcluster.TestCluster, storeIdxSlice ...int } } -func verifyRocksDBStats(t *testing.T, s *kvserver.Store) { +func verifyStorageStats(t *testing.T, s *kvserver.Store) { if err := s.ComputeMetrics(context.Background(), 0); err != nil { t.Fatal(err) } @@ -146,8 +146,8 @@ func verifyRocksDBStats(t *testing.T, s *kvserver.Store) { {m.RdbBlockCacheMisses, 0}, {m.RdbBlockCacheUsage, 0}, {m.RdbBlockCachePinnedUsage, 0}, - {m.RdbBloomFilterPrefixChecked, 20}, - {m.RdbBloomFilterPrefixUseful, 20}, + {m.RdbBloomFilterPrefixChecked, 1}, + {m.RdbBloomFilterPrefixUseful, 1}, {m.RdbMemtableTotalSize, 5000}, {m.RdbFlushes, 1}, {m.RdbCompactions, 0}, @@ -356,8 +356,8 @@ func TestStoreMetrics(t *testing.T) { // Verify all stats on all stores after range is removed. verifyStats(t, tc, 1, 2) - verifyRocksDBStats(t, tc.GetFirstStoreFromServer(t, 1)) - verifyRocksDBStats(t, tc.GetFirstStoreFromServer(t, 2)) + verifyStorageStats(t, tc.GetFirstStoreFromServer(t, 1)) + verifyStorageStats(t, tc.GetFirstStoreFromServer(t, 2)) } // TestStoreMaxBehindNanosOnlyTracksEpochBasedLeases ensures that the metric diff --git a/pkg/server/config.go b/pkg/server/config.go index ce427f3f2f4b..4d6af93ffc8a 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -584,6 +584,7 @@ func (cfg *Config) CreateEngines(ctx context.Context) (Engines, error) { storage.CacheSize(cfg.CacheSize), storage.MaxSize(sizeInBytes), storage.EncryptionAtRest(spec.EncryptionOptions), + storage.DisableFilesystemMiddlewareTODO, storage.Settings(cfg.Settings)) if err != nil { return Engines{}, err diff --git a/pkg/storage/disk_map_test.go b/pkg/storage/disk_map_test.go index 4bc4bade791d..afde670d624d 100644 --- a/pkg/storage/disk_map_test.go +++ b/pkg/storage/disk_map_test.go @@ -179,7 +179,6 @@ func TestPebbleMap(t *testing.T) { defer e.Close() runTestForEngine(ctx, t, testutils.TestDataPath(t, "diskmap"), e) - } func TestPebbleMultiMap(t *testing.T) { diff --git a/pkg/storage/engine_test.go b/pkg/storage/engine_test.go index 82fdf731ca1d..03fec4795853 100644 --- a/pkg/storage/engine_test.go +++ b/pkg/storage/engine_test.go @@ -574,20 +574,16 @@ func TestEngineMustExist(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - test := func(errStr string) { - tempDir, dirCleanupFn := testutils.TempDir(t) - defer dirCleanupFn() + tempDir, dirCleanupFn := testutils.TempDir(t) + defer dirCleanupFn() - _, err := Open(context.Background(), Filesystem(tempDir), MustExist) - if err == nil { - t.Fatal("expected error related to missing directory") - } - if !strings.Contains(fmt.Sprint(err), errStr) { - t.Fatal(err) - } + _, err := Open(context.Background(), Filesystem(tempDir), MustExist) + if err == nil { + t.Fatal("expected error related to missing directory") + } + if !strings.Contains(fmt.Sprint(err), "no such file or directory") { + t.Fatal(err) } - - test("no such file or directory") } func TestEngineTimeBound(t *testing.T) { diff --git a/pkg/storage/open.go b/pkg/storage/open.go index e5943a0441ca..90fef7a5cc66 100644 --- a/pkg/storage/open.go +++ b/pkg/storage/open.go @@ -55,6 +55,15 @@ var DisableAutomaticCompactions ConfigOption = func(cfg *engineConfig) error { return nil } +// DisableFilesystemMiddlewareTODO configures an engine to not include +// filesystem middleware like disk-health checking and ENOSPC-detection. This is +// a temporary option while some units leak file descriptors, and by extension, +// disk-health checking goroutines. +var DisableFilesystemMiddlewareTODO = func(cfg *engineConfig) error { + cfg.DisableFilesystemMiddlewareTODO = true + return nil +} + // ForTesting configures the engine for use in testing. It may randomize some // config options to improve test coverage. var ForTesting ConfigOption = func(cfg *engineConfig) error { @@ -164,11 +173,7 @@ type Location struct { func Filesystem(dir string) Location { return Location{ dir: dir, - // fs is left nil intentionally, so that it will be left as the - // default of vfs.Default wrapped in vfs.WithDiskHealthChecks - // (initialized by DefaultPebbleOptions). - // TODO(jackson): Refactor to make it harder to accidentally remove - // disk health checks by setting your own VFS in a call to NewPebble. + fs: vfs.Default, } } @@ -196,9 +201,7 @@ func Open(ctx context.Context, loc Location, opts ...ConfigOption) (*Pebble, err var cfg engineConfig cfg.Dir = loc.dir cfg.Opts = DefaultPebbleOptions() - if loc.fs != nil { - cfg.Opts.FS = loc.fs - } + cfg.Opts.FS = loc.fs for _, opt := range opts { if err := opt(&cfg); err != nil { return nil, err diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go index f28bb73e732b..7011a28a511e 100644 --- a/pkg/storage/pebble.go +++ b/pkg/storage/pebble.go @@ -579,6 +579,7 @@ func DefaultPebbleOptions() *pebble.Options { opts := &pebble.Options{ Comparer: EngineComparer, + FS: vfs.Default, L0CompactionThreshold: 2, L0StopWritesThreshold: 1000, LBaseMaxBytes: 64 << 20, // 64 MB @@ -635,8 +636,15 @@ func DefaultPebbleOptions() *pebble.Options { // of the benefit of having bloom filters on every level for only 10% of the // memory cost. opts.Levels[6].FilterPolicy = nil + return opts +} - // Set disk health check interval to min(5s, maxSyncDurationDefault). This +// wrapFilesystemMiddleware wraps the Option's vfs.FS with disk-health checking +// and ENOSPC detection. It mutates the provided options to set the FS and +// returns a Closer that should be invoked when the filesystem will no longer be +// used. +func wrapFilesystemMiddleware(opts *pebble.Options) io.Closer { + // Set disk-health check interval to min(5s, maxSyncDurationDefault). This // is mostly to ease testing; the default of 5s is too infrequent to test // conveniently. See the disk-stalled roachtest for an example of how this // is used. @@ -644,9 +652,11 @@ func DefaultPebbleOptions() *pebble.Options { if diskHealthCheckInterval.Seconds() > maxSyncDurationDefault.Seconds() { diskHealthCheckInterval = maxSyncDurationDefault } - // Instantiate a file system with disk health checking enabled. This FS wraps - // vfs.Default, and can be wrapped for encryption-at-rest. - opts.FS = vfs.WithDiskHealthChecks(vfs.Default, diskHealthCheckInterval, + // Instantiate a file system with disk health checking enabled. This FS + // wraps the filesystem with a layer that times all write-oriented + // operations. + var closer io.Closer + opts.FS, closer = vfs.WithDiskHealthChecks(opts.FS, diskHealthCheckInterval, func(name string, duration time.Duration) { opts.EventListener.DiskSlow(pebble.DiskSlowInfo{ Path: name, @@ -657,7 +667,7 @@ func DefaultPebbleOptions() *pebble.Options { opts.FS = vfs.OnDiskFull(opts.FS, func() { exit.WithCode(exit.DiskFull()) }) - return opts + return closer } type pebbleLogger struct { @@ -682,6 +692,9 @@ type PebbleConfig struct { base.StorageConfig // Pebble specific options. Opts *pebble.Options + // Temporary option while there exist file descriptor leaks. See the + // DisableFilesystemMiddlewareTODO ConfigOption that sets this, and #81389. + DisableFilesystemMiddlewareTODO bool } // EncryptionStatsHandler provides encryption related stats. @@ -733,6 +746,9 @@ type Pebble struct { syncutil.Mutex flushCompletedCallback func() } + // closer is populated when the database is opened. The closer is associated + // with the filesyetem + closer io.Closer wrappedIntentWriter intentDemuxWriter @@ -825,13 +841,26 @@ func ResolveEncryptedEnvOptions(cfg *PebbleConfig) (*PebbleFileRegistry, *Encryp } // NewPebble creates a new Pebble instance, at the specified path. -func NewPebble(ctx context.Context, cfg PebbleConfig) (*Pebble, error) { +func NewPebble(ctx context.Context, cfg PebbleConfig) (p *Pebble, err error) { // pebble.Open also calls EnsureDefaults, but only after doing a clone. Call // EnsureDefaults beforehand so we have a matching cfg here for when we save // cfg.FS and cfg.ReadOnly later on. if cfg.Opts == nil { cfg.Opts = DefaultPebbleOptions() } + + // Initialize the FS, wrapping it with disk health-checking and + // ENOSPC-detection. + var filesystemCloser io.Closer + if !cfg.DisableFilesystemMiddlewareTODO { + filesystemCloser = wrapFilesystemMiddleware(cfg.Opts) + defer func() { + if err != nil { + filesystemCloser.Close() + } + }() + } + cfg.Opts.EnsureDefaults() cfg.Opts.ErrorIfNotExists = cfg.MustExist if settings := cfg.Settings; settings != nil { @@ -853,8 +882,6 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (*Pebble, error) { // FS for those that need it. Some call sites need the unencrypted // FS for the purpose of atomic renames. unencryptedFS := cfg.Opts.FS - // TODO(jackson): Assert that unencryptedFS provides atomic renames. - fileRegistry, env, err := ResolveEncryptedEnvOptions(&cfg) if err != nil { return nil, err @@ -899,7 +926,7 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (*Pebble, error) { storeProps := computeStoreProperties(ctx, cfg.Dir, cfg.Opts.ReadOnly, env != nil /* encryptionEnabled */) - p := &Pebble{ + p = &Pebble{ readOnly: cfg.Opts.ReadOnly, path: cfg.Dir, auxDir: auxDir, @@ -915,6 +942,7 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (*Pebble, error) { unencryptedFS: unencryptedFS, logger: cfg.Opts.Logger, storeIDPebbleLog: storeIDContainer, + closer: filesystemCloser, } cfg.Opts.EventListener = pebble.TeeEventListener( pebble.MakeLoggingEventListener(pebbleLogger{ @@ -1030,6 +1058,9 @@ func (p *Pebble) Close() { if p.encryption != nil { _ = p.encryption.Closer.Close() } + if p.closer != nil { + _ = p.closer.Close() + } } // Closed implements the Engine interface. diff --git a/pkg/storage/temp_engine.go b/pkg/storage/temp_engine.go index e5fadfada634..bbdae5baf9ca 100644 --- a/pkg/storage/temp_engine.go +++ b/pkg/storage/temp_engine.go @@ -12,6 +12,7 @@ package storage import ( "context" + "io" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/diskmap" @@ -29,13 +30,16 @@ func NewTempEngine( } type pebbleTempEngine struct { - db *pebble.DB + db *pebble.DB + closer io.Closer } // Close implements the diskmap.Factory interface. func (r *pebbleTempEngine) Close() { - err := r.db.Close() - if err != nil { + if err := r.db.Close(); err != nil { + log.Fatalf(context.TODO(), "%v", err) + } + if err := r.closer.Close(); err != nil { log.Fatalf(context.TODO(), "%v", err) } } @@ -93,6 +97,8 @@ func newPebbleTempEngine( // Set store ID for the pebble engine. p.SetStoreID(ctx, base.TempStoreID) - - return &pebbleTempEngine{db: p.db}, p, nil + return &pebbleTempEngine{ + db: p.db, + closer: p.closer, + }, p, nil } diff --git a/pkg/testutils/colcontainerutils/BUILD.bazel b/pkg/testutils/colcontainerutils/BUILD.bazel index eb846c900f58..7149a5b0087b 100644 --- a/pkg/testutils/colcontainerutils/BUILD.bazel +++ b/pkg/testutils/colcontainerutils/BUILD.bazel @@ -8,7 +8,6 @@ go_library( deps = [ "//pkg/sql/colcontainer", "//pkg/storage", - "//pkg/storage/fs", "//pkg/testutils", ], ) diff --git a/pkg/testutils/colcontainerutils/diskqueuecfg.go b/pkg/testutils/colcontainerutils/diskqueuecfg.go index c3fada0fdaba..b41f4ce9fe21 100644 --- a/pkg/testutils/colcontainerutils/diskqueuecfg.go +++ b/pkg/testutils/colcontainerutils/diskqueuecfg.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/storage" - "github.com/cockroachdb/cockroach/pkg/storage/fs" "github.com/cockroachdb/cockroach/pkg/testutils" ) @@ -27,41 +26,46 @@ func NewTestingDiskQueueCfg(t testing.TB, inMem bool) (colcontainer.DiskQueueCfg t.Helper() var ( - cfg colcontainer.DiskQueueCfg - cleanup func() - testingFS fs.FS - path string + cfg colcontainer.DiskQueueCfg + cleanup []func() + path string + loc storage.Location ) if inMem { - ngn := storage.NewDefaultInMemForTesting() - testingFS = ngn.(fs.FS) - if err := testingFS.MkdirAll(inMemDirName); err != nil { - t.Fatal(err) - } + loc = storage.InMemory() path = inMemDirName - cleanup = ngn.Close } else { - tempPath, dirCleanup := testutils.TempDir(t) - path = tempPath - ngn, err := storage.Open( - context.Background(), - storage.Filesystem(tempPath), - storage.CacheSize(0)) - if err != nil { + var cleanupFunc func() + path, cleanupFunc = testutils.TempDir(t) + loc = storage.Filesystem(path) + cleanup = append(cleanup, cleanupFunc) + } + + ngn, err := storage.Open( + context.Background(), + loc, + storage.ForTesting, + storage.CacheSize(0)) + if err != nil { + t.Fatal(err) + } + + if inMem { + if err := ngn.MkdirAll(inMemDirName); err != nil { t.Fatal(err) } - testingFS = ngn - cleanup = func() { - ngn.Close() - dirCleanup() - } } - cfg.FS = testingFS + + cleanup = append(cleanup, ngn.Close) + cfg.FS = ngn cfg.GetPather = colcontainer.GetPatherFunc(func(context.Context) string { return path }) if err := cfg.EnsureDefaults(); err != nil { t.Fatal(err) } - - return cfg, cleanup + return cfg, func() { + for _, f := range cleanup { + f() + } + } } diff --git a/vendor b/vendor index 73c75fbaf35e..c404bd45895f 160000 --- a/vendor +++ b/vendor @@ -1 +1 @@ -Subproject commit 73c75fbaf35e68220c2f5d3a2acfed24dcd9fbef +Subproject commit c404bd45895f1a010fb547a4f6c833d392fc4062 From 077025470af81b32634880be250b92964ec1db05 Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Thu, 19 May 2022 16:10:48 -0700 Subject: [PATCH 5/6] sql,stmtdiagnostics: remove some version gates This commit addresses several TODOs with my name on them about removing the version gates, mostly around the conditional statement diagnostics introduced in 22.1 cycle. Release note: None --- pkg/cli/clisqlclient/statement_diag.go | 62 +++++-------------- pkg/server/statement_diagnostics_requests.go | 36 ++++------- pkg/sql/row/BUILD.bazel | 1 - pkg/sql/row/kv_batch_streamer.go | 7 +-- pkg/sql/stmtdiagnostics/BUILD.bazel | 1 - .../stmtdiagnostics/statement_diagnostics.go | 59 +++++------------- 6 files changed, 44 insertions(+), 122 deletions(-) diff --git a/pkg/cli/clisqlclient/statement_diag.go b/pkg/cli/clisqlclient/statement_diag.go index 3c514f8094ca..6741950dbcff 100644 --- a/pkg/cli/clisqlclient/statement_diag.go +++ b/pkg/cli/clisqlclient/statement_diag.go @@ -13,7 +13,6 @@ package clisqlclient import ( "context" "database/sql/driver" - "fmt" "io" "os" "time" @@ -100,51 +99,22 @@ func StmtDiagListOutstandingRequests( return result, nil } -// TODO(yuzefovich): remove this in 22.2. -func isAtLeast22dot1ClusterVersion(ctx context.Context, conn Conn) (bool, error) { - // Check whether the upgrade to add the conditional diagnostics columns to - // the statement_diagnostics_requests system table has already been run. - row, err := conn.QueryRow(ctx, ` -SELECT - count(*) -FROM - [SHOW COLUMNS FROM system.statement_diagnostics_requests] -WHERE - column_name = 'min_execution_latency';`) - if err != nil { - return false, err - } - c, ok := row[0].(int64) - if !ok { - return false, nil - } - return c == 1, nil -} - func stmtDiagListOutstandingRequestsInternal( ctx context.Context, conn Conn, ) ([]StmtDiagActivationRequest, error) { - var extraColumns string - atLeast22dot1, err := isAtLeast22dot1ClusterVersion(ctx, conn) - if err != nil { - return nil, err - } - if atLeast22dot1 { - // Converting an INTERVAL to a number of milliseconds within that - // interval is a pain - we extract the number of seconds and multiply it - // by 1000, then we extract the number of milliseconds and add that up - // to the previous result; however, we have now double counted the - // seconds field, so we have to remove that times 1000. - getMilliseconds := `EXTRACT(epoch FROM min_execution_latency)::INT8 * 1000 + + // Converting an INTERVAL to a number of milliseconds within that interval + // is a pain - we extract the number of seconds and multiply it by 1000, + // then we extract the number of milliseconds and add that up to the + // previous result; however, we have now double counted the seconds field, + // so we have to remove that times 1000. + getMilliseconds := `EXTRACT(epoch FROM min_execution_latency)::INT8 * 1000 + EXTRACT(millisecond FROM min_execution_latency)::INT8 - EXTRACT(second FROM min_execution_latency)::INT8 * 1000` - extraColumns = ", " + getMilliseconds + ", expires_at" - } rows, err := conn.Query(ctx, - fmt.Sprintf(`SELECT id, statement_fingerprint, requested_at%s - FROM system.statement_diagnostics_requests - WHERE NOT completed - ORDER BY requested_at DESC`, extraColumns), + "SELECT id, statement_fingerprint, requested_at, "+getMilliseconds+`, expires_at + FROM system.statement_diagnostics_requests + WHERE NOT completed + ORDER BY requested_at DESC`, ) if err != nil { return nil, err @@ -159,13 +129,11 @@ func stmtDiagListOutstandingRequestsInternal( } var minExecutionLatency time.Duration var expiresAt time.Time - if atLeast22dot1 { - if ms, ok := vals[3].(int64); ok { - minExecutionLatency = time.Millisecond * time.Duration(ms) - } - if e, ok := vals[4].(time.Time); ok { - expiresAt = e - } + if ms, ok := vals[3].(int64); ok { + minExecutionLatency = time.Millisecond * time.Duration(ms) + } + if e, ok := vals[4].(time.Time); ok { + expiresAt = e } info := StmtDiagActivationRequest{ ID: vals[0].(int64), diff --git a/pkg/server/statement_diagnostics_requests.go b/pkg/server/statement_diagnostics_requests.go index 268d3aa30c27..5584d81e46a5 100644 --- a/pkg/server/statement_diagnostics_requests.go +++ b/pkg/server/statement_diagnostics_requests.go @@ -12,10 +12,8 @@ package server import ( "context" - "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -131,27 +129,21 @@ func (s *statusServer) StatementDiagnosticsRequests( var err error - // TODO(yuzefovich): remove this version gating in 22.2. - var extraColumns string - if s.admin.server.st.Version.IsActive(ctx, clusterversion.AlterSystemStmtDiagReqs) { - extraColumns = `, - min_execution_latency, - expires_at` - } - // TODO(davidh): Add pagination to this request. it, err := s.internalExecutor.QueryIteratorEx(ctx, "stmt-diag-get-all", nil, /* txn */ sessiondata.InternalExecutorOverride{ User: username.RootUserName(), }, - fmt.Sprintf(`SELECT + `SELECT id, statement_fingerprint, completed, statement_diagnostics_id, - requested_at%s + requested_at, + min_execution_latency, + expires_at FROM - system.statement_diagnostics_requests`, extraColumns)) + system.statement_diagnostics_requests`) if err != nil { return nil, err } @@ -175,16 +167,14 @@ func (s *statusServer) StatementDiagnosticsRequests( if requestedAt, ok := row[4].(*tree.DTimestampTZ); ok { req.RequestedAt = requestedAt.Time } - if extraColumns != "" { - if minExecutionLatency, ok := row[5].(*tree.DInterval); ok { - req.MinExecutionLatency = time.Duration(minExecutionLatency.Duration.Nanos()) - } - if expiresAt, ok := row[6].(*tree.DTimestampTZ); ok { - req.ExpiresAt = expiresAt.Time - // Don't return already expired requests. - if req.ExpiresAt.Before(timeutil.Now()) { - continue - } + if minExecutionLatency, ok := row[5].(*tree.DInterval); ok { + req.MinExecutionLatency = time.Duration(minExecutionLatency.Duration.Nanos()) + } + if expiresAt, ok := row[6].(*tree.DTimestampTZ); ok { + req.ExpiresAt = expiresAt.Time + // Don't return already expired requests. + if req.ExpiresAt.Before(timeutil.Now()) { + continue } } diff --git a/pkg/sql/row/BUILD.bazel b/pkg/sql/row/BUILD.bazel index 3c7cef433b7e..b4147f6458bf 100644 --- a/pkg/sql/row/BUILD.bazel +++ b/pkg/sql/row/BUILD.bazel @@ -23,7 +23,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/row", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", diff --git a/pkg/sql/row/kv_batch_streamer.go b/pkg/sql/row/kv_batch_streamer.go index 9349a99b143e..94330314d13a 100644 --- a/pkg/sql/row/kv_batch_streamer.go +++ b/pkg/sql/row/kv_batch_streamer.go @@ -13,7 +13,6 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvstreamer" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" @@ -24,13 +23,11 @@ import ( // CanUseStreamer returns whether the kvstreamer.Streamer API should be used. func CanUseStreamer(ctx context.Context, settings *cluster.Settings) bool { - // TODO(yuzefovich): remove the version gate in 22.2 cycle. - return settings.Version.IsActive(ctx, clusterversion.ScanWholeRows) && - useStreamerEnabled.Get(&settings.SV) + return useStreamerEnabled.Get(&settings.SV) } // useStreamerEnabled determines whether the Streamer API should be used. -// TODO(yuzefovich): remove this in 22.2. +// TODO(yuzefovich): remove this in 23.1. var useStreamerEnabled = settings.RegisterBoolSetting( settings.TenantReadOnly, "sql.distsql.use_streamer.enabled", diff --git a/pkg/sql/stmtdiagnostics/BUILD.bazel b/pkg/sql/stmtdiagnostics/BUILD.bazel index 49623d067569..203a7e623fed 100644 --- a/pkg/sql/stmtdiagnostics/BUILD.bazel +++ b/pkg/sql/stmtdiagnostics/BUILD.bazel @@ -6,7 +6,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/gossip", "//pkg/kv", "//pkg/roachpb", diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics.go b/pkg/sql/stmtdiagnostics/statement_diagnostics.go index 6b8950b5b7e1..a3b46e31188e 100644 --- a/pkg/sql/stmtdiagnostics/statement_diagnostics.go +++ b/pkg/sql/stmtdiagnostics/statement_diagnostics.go @@ -16,7 +16,6 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -195,11 +194,6 @@ func (r *Registry) poll(ctx context.Context) { } } -// TODO(yuzefovich): remove this in 22.2. -func (r *Registry) isMinExecutionLatencySupported(ctx context.Context) bool { - return r.st.Version.IsActive(ctx, clusterversion.AlterSystemStmtDiagReqs) -} - // RequestID is the ID of a diagnostics request, corresponding to the id // column in statement_diagnostics_requests. // A zero ID is invalid. @@ -285,29 +279,19 @@ func (r *Registry) insertRequestInternal( return 0, err } - if !r.isMinExecutionLatencySupported(ctx) { - if minExecutionLatency != 0 || expiresAfter != 0 { - return 0, errors.New( - "conditional statement diagnostics are only supported " + - "after 22.1 version migrations have completed", - ) - } - } - var reqID RequestID var expiresAt time.Time err = r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Check if there's already a pending request for this fingerprint. - var extraConditions string - if r.isMinExecutionLatencySupported(ctx) { - extraConditions = " AND (expires_at IS NULL OR expires_at > now())" - } row, err := r.ie.QueryRowEx(ctx, "stmt-diag-check-pending", txn, sessiondata.InternalExecutorOverride{ User: username.RootUserName(), }, - fmt.Sprintf("SELECT count(1) FROM system.statement_diagnostics_requests "+ - "WHERE completed = false AND statement_fingerprint = $1%s", extraConditions), + `SELECT count(1) FROM system.statement_diagnostics_requests + WHERE + completed = false AND + statement_fingerprint = $1 AND + (expires_at IS NULL OR expires_at > now())`, stmtFingerprint) if err != nil { return err @@ -386,14 +370,6 @@ func (r *Registry) CancelRequest(ctx context.Context, requestID int64) error { return err } - if !r.isMinExecutionLatencySupported(ctx) { - // If conditional diagnostics are not supported for this cluster yet, - // then we cannot cancel the request. - return errors.New( - "statement diagnostics can only be canceled after 22.1 version migrations have completed", - ) - } - row, err := r.ie.QueryRowEx(ctx, "stmt-diag-cancel-request", nil, /* txn */ sessiondata.InternalExecutorOverride{ User: username.RootUserName(), @@ -636,25 +612,20 @@ func (r *Registry) InsertStatementDiagnostics( // updates r.mu.requests accordingly. func (r *Registry) pollRequests(ctx context.Context) error { var rows []tree.Datums - isMinExecutionLatencySupported := r.isMinExecutionLatencySupported(ctx) // Loop until we run the query without straddling an epoch increment. for { r.mu.Lock() epoch := r.mu.epoch r.mu.Unlock() - var extraColumns string - var extraConditions string - if isMinExecutionLatencySupported { - extraColumns = ", min_execution_latency, expires_at" - extraConditions = " AND (expires_at IS NULL OR expires_at > now())" - } it, err := r.ie.QueryIteratorEx(ctx, "stmt-diag-poll", nil, /* txn */ sessiondata.InternalExecutorOverride{ User: username.RootUserName(), }, - fmt.Sprintf("SELECT id, statement_fingerprint%s FROM system.statement_diagnostics_requests "+ - "WHERE completed = false%s", extraColumns, extraConditions)) + `SELECT id, statement_fingerprint, min_execution_latency, expires_at + FROM system.statement_diagnostics_requests + WHERE completed = false AND (expires_at IS NULL OR expires_at > now())`, + ) if err != nil { return err } @@ -686,13 +657,11 @@ func (r *Registry) pollRequests(ctx context.Context) error { stmtFingerprint := string(*row[1].(*tree.DString)) var minExecutionLatency time.Duration var expiresAt time.Time - if isMinExecutionLatencySupported { - if minExecLatency, ok := row[2].(*tree.DInterval); ok { - minExecutionLatency = time.Duration(minExecLatency.Nanos()) - } - if e, ok := row[3].(*tree.DTimestampTZ); ok { - expiresAt = e.Time - } + if minExecLatency, ok := row[2].(*tree.DInterval); ok { + minExecutionLatency = time.Duration(minExecLatency.Nanos()) + } + if e, ok := row[3].(*tree.DTimestampTZ); ok { + expiresAt = e.Time } ids.Add(int(id)) r.addRequestInternalLocked(ctx, id, stmtFingerprint, minExecutionLatency, expiresAt) From 01e5ea043dc87c4833bb465c17d71fedf708fa44 Mon Sep 17 00:00:00 2001 From: Renato Costa Date: Fri, 20 May 2022 10:37:16 -0400 Subject: [PATCH 6/6] roachtest: allow TC_BUILDTYPE_ID to be accessible by Docker In #81103, the process of generating TeamCity links in test failure reports started relying on the `TC_BUILDTYPE_ID` environment variable. While that variable was added to TeamCity builds, it was not being passed down to Docker where the tests actually run. As a result, links generated by the GitHub poster were broken (see, for example, #81572). This commit makes `TC_BUILDTYPE_ID` accessible by Docker for every build that was already passing `TC_BUILD_BRANCH`. This should be sufficient to cover all existing cases and more, in case having access to this variable becomes useful in the future. Release note: None --- build/teamcity/cockroach/nightlies/cloud_unit_tests.sh | 2 +- build/teamcity/cockroach/nightlies/lint_urls.sh | 2 +- build/teamcity/cockroach/nightlies/optimizer_tests.sh | 2 +- .../cockroach/nightlies/pebble_nightly_metamorphic.sh | 2 +- .../cockroach/nightlies/pebble_nightly_metamorphic_race.sh | 2 +- .../cockroach/nightlies/pebble_nightly_write_throughput.sh | 2 +- build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh | 2 +- .../teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh | 2 +- build/teamcity/cockroach/nightlies/random_syntax_tests.sh | 2 +- build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh | 2 +- build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh | 2 +- build/teamcity/cockroach/nightlies/roachtest_weekly.sh | 2 +- build/teamcity/cockroach/nightlies/sqlite_logic_test.sh | 2 +- .../cockroach/nightlies/sqllogic_hi_vmodule_nightly.sh | 2 +- build/teamcity/cockroach/nightlies/stress.sh | 2 +- build/teamcity/cockroach/nightlies/stress_trigger.sh | 2 +- build/teamcity/cockroach/post-merge/publish-bleeding-edge.sh | 2 +- .../release/process/make-and-publish-build-artifacts.sh | 2 +- .../internal/release/process/publish-cockroach-release.sh | 4 ++-- 19 files changed, 20 insertions(+), 20 deletions(-) diff --git a/build/teamcity/cockroach/nightlies/cloud_unit_tests.sh b/build/teamcity/cockroach/nightlies/cloud_unit_tests.sh index d37b140e781b..da132097608c 100755 --- a/build/teamcity/cockroach/nightlies/cloud_unit_tests.sh +++ b/build/teamcity/cockroach/nightlies/cloud_unit_tests.sh @@ -8,6 +8,6 @@ source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel tc_start_block "Run cloud unit tests" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e GOOGLE_EPHEMERAL_CREDENTIALS -e GOOGLE_KMS_KEY_NAME" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e GOOGLE_EPHEMERAL_CREDENTIALS -e GOOGLE_KMS_KEY_NAME" \ run_bazel build/teamcity/cockroach/nightlies/cloud_unit_tests_impl.sh "$@" tc_end_block "Run cloud unit tests" diff --git a/build/teamcity/cockroach/nightlies/lint_urls.sh b/build/teamcity/cockroach/nightlies/lint_urls.sh index cf51c9dfdf73..fc01628c9eb8 100755 --- a/build/teamcity/cockroach/nightlies/lint_urls.sh +++ b/build/teamcity/cockroach/nightlies/lint_urls.sh @@ -8,6 +8,6 @@ source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel tc_start_block "lint urls" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/lint_urls_impl.sh tc_end_block "lint urls" diff --git a/build/teamcity/cockroach/nightlies/optimizer_tests.sh b/build/teamcity/cockroach/nightlies/optimizer_tests.sh index 912dba858d01..2b7d43b12bc5 100755 --- a/build/teamcity/cockroach/nightlies/optimizer_tests.sh +++ b/build/teamcity/cockroach/nightlies/optimizer_tests.sh @@ -7,5 +7,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/optimizer_tests_impl.sh diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh index 85c68ce97551..56b7d661663d 100755 --- a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh @@ -26,5 +26,5 @@ echo "$NEW_DEPS_BZL_CONTENT" > DEPS.bzl PEBBLE_SHA=$(grep 'version =' DEPS.bzl | cut -d'"' -f2 | cut -d'-' -f3) echo "Pebble module Git SHA: $PEBBLE_SHA" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER=$PEBBLE_SHA -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER=$PEBBLE_SHA -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_impl.sh diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh index 5fcbd471ad9f..bb4fe0bacb70 100755 --- a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh @@ -26,5 +26,5 @@ echo "$NEW_DEPS_BZL_CONTENT" > DEPS.bzl PEBBLE_SHA=$(grep 'version =' DEPS.bzl | cut -d'"' -f2 | cut -d'-' -f3) echo "Pebble module Git SHA: $PEBBLE_SHA" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER=$PEBBLE_SHA -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER=$PEBBLE_SHA -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race_impl.sh diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh index 5208a7232b42..f0c87d3ceffa 100755 --- a/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh @@ -12,5 +12,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput_impl.sh diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh index f7168f9396c3..d77d08bb0e13 100755 --- a/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh @@ -12,5 +12,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_impl.sh diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh index 970b7d6630cd..6820a7cb83bb 100755 --- a/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh @@ -14,5 +14,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race_impl.sh diff --git a/build/teamcity/cockroach/nightlies/random_syntax_tests.sh b/build/teamcity/cockroach/nightlies/random_syntax_tests.sh index 2d3e94e7a0b1..0d25c89ffe83 100755 --- a/build/teamcity/cockroach/nightlies/random_syntax_tests.sh +++ b/build/teamcity/cockroach/nightlies/random_syntax_tests.sh @@ -8,6 +8,6 @@ source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel tc_start_block "Run random syntax tests" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/random_syntax_tests_impl.sh tc_end_block "Run random syntax tests" diff --git a/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh b/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh index 8e33f25a97bd..40e71fd3eb6e 100755 --- a/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh @@ -7,5 +7,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_ACCESS_KEY_ID_ASSUME_ROLE -e AWS_KMS_KEY_ARN_A -e AWS_KMS_KEY_ARN_B -e AWS_KMS_REGION_A -e AWS_KMS_REGION_B -e AWS_ROLE_ARN -e AWS_SECRET_ACCESS_KEY -e AWS_SECRET_ACCESS_KEY_ASSUME_ROLE -e BUILD_TAG -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_ACCESS_KEY_ID_ASSUME_ROLE -e AWS_KMS_KEY_ARN_A -e AWS_KMS_KEY_ARN_B -e AWS_KMS_REGION_A -e AWS_KMS_REGION_B -e AWS_ROLE_ARN -e AWS_SECRET_ACCESS_KEY -e AWS_SECRET_ACCESS_KEY_ASSUME_ROLE -e BUILD_TAG -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh diff --git a/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh b/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh index 0e4a0552efdc..ebca9214a17f 100755 --- a/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh @@ -7,5 +7,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e BUILD_TAG -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e GOOGLE_KMS_KEY_A -e GOOGLE_KMS_KEY_B -e SLACK_TOKEN -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e BUILD_TAG -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e GOOGLE_KMS_KEY_A -e GOOGLE_KMS_KEY_B -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh diff --git a/build/teamcity/cockroach/nightlies/roachtest_weekly.sh b/build/teamcity/cockroach/nightlies/roachtest_weekly.sh index 9087ce4f673b..6697306b7b27 100755 --- a/build/teamcity/cockroach/nightlies/roachtest_weekly.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_weekly.sh @@ -7,5 +7,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_TAG -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_TAG -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/roachtest_weekly_impl.sh diff --git a/build/teamcity/cockroach/nightlies/sqlite_logic_test.sh b/build/teamcity/cockroach/nightlies/sqlite_logic_test.sh index 1a183d2306c8..1869f86a9b89 100755 --- a/build/teamcity/cockroach/nightlies/sqlite_logic_test.sh +++ b/build/teamcity/cockroach/nightlies/sqlite_logic_test.sh @@ -8,6 +8,6 @@ source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel tc_start_block "Run SQLite logic tests" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/sqlite_logic_test_impl.sh tc_end_block "Run SQLite logic tests" diff --git a/build/teamcity/cockroach/nightlies/sqllogic_hi_vmodule_nightly.sh b/build/teamcity/cockroach/nightlies/sqllogic_hi_vmodule_nightly.sh index 979db09bf1d1..ac728175ac97 100755 --- a/build/teamcity/cockroach/nightlies/sqllogic_hi_vmodule_nightly.sh +++ b/build/teamcity/cockroach/nightlies/sqllogic_hi_vmodule_nightly.sh @@ -8,6 +8,6 @@ source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel tc_start_block "Run SQL Logic Test High VModule" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e TC_BUILD_BRANCH -e GITHUB_API_TOKEN -e BUILD_VCS_NUMBER -e TC_BUILD_ID -e TC_SERVER_URL" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e GITHUB_API_TOKEN -e BUILD_VCS_NUMBER -e TC_BUILD_ID -e TC_SERVER_URL" \ run_bazel build/teamcity/cockroach/nightlies/sqllogic_hi_vmodule_nightly_impl.sh tc_end_block "Run SQL Logic Test High VModule" diff --git a/build/teamcity/cockroach/nightlies/stress.sh b/build/teamcity/cockroach/nightlies/stress.sh index 6f752f154074..a97c268b5baa 100755 --- a/build/teamcity/cockroach/nightlies/stress.sh +++ b/build/teamcity/cockroach/nightlies/stress.sh @@ -8,6 +8,6 @@ source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel tc_start_block "Run stress" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_TAG -e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e PKG -e TAGS -e STRESSFLAGS -e TESTTIMEOUTSECS -e EXTRA_BAZEL_FLAGS" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e BUILD_TAG -e BUILD_VCS_NUMBER -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e PKG -e TAGS -e STRESSFLAGS -e TESTTIMEOUTSECS -e EXTRA_BAZEL_FLAGS" \ run_bazel build/teamcity/cockroach/nightlies/stress_impl.sh tc_end_block "Run stress" diff --git a/build/teamcity/cockroach/nightlies/stress_trigger.sh b/build/teamcity/cockroach/nightlies/stress_trigger.sh index 7138bbc12aaa..b676c5ed02d7 100755 --- a/build/teamcity/cockroach/nightlies/stress_trigger.sh +++ b/build/teamcity/cockroach/nightlies/stress_trigger.sh @@ -8,6 +8,6 @@ source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel tc_start_block "Run stress trigger" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e TC_API_USER -e TC_API_PASSWORD -e TC_SERVER_URL -e TC_BUILD_BRANCH" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e TC_API_USER -e TC_API_PASSWORD -e TC_SERVER_URL -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH" \ run_bazel build/teamcity/cockroach/nightlies/stress_trigger_impl.sh "$@" tc_end_block "Run stress trigger" diff --git a/build/teamcity/cockroach/post-merge/publish-bleeding-edge.sh b/build/teamcity/cockroach/post-merge/publish-bleeding-edge.sh index 0adeebdfb208..1278f0dfe15a 100755 --- a/build/teamcity/cockroach/post-merge/publish-bleeding-edge.sh +++ b/build/teamcity/cockroach/post-merge/publish-bleeding-edge.sh @@ -9,7 +9,7 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" source "$dir/teamcity-bazel-support.sh" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILD_BRANCH" run_bazel << 'EOF' +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH" run_bazel << 'EOF' bazel build --config ci //pkg/cmd/publish-artifacts BAZEL_BIN=$(bazel info bazel-bin --config ci) $BAZEL_BIN/pkg/cmd/publish-artifacts/publish-artifacts_/publish-artifacts diff --git a/build/teamcity/internal/release/process/make-and-publish-build-artifacts.sh b/build/teamcity/internal/release/process/make-and-publish-build-artifacts.sh index 2bcec40c3569..52d07fd9c563 100755 --- a/build/teamcity/internal/release/process/make-and-publish-build-artifacts.sh +++ b/build/teamcity/internal/release/process/make-and-publish-build-artifacts.sh @@ -47,7 +47,7 @@ git tag "${build_name}" tc_end_block "Tag the release" tc_start_block "Compile and publish S3 artifacts" -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILD_BRANCH=$build_name -e bucket=$bucket" run_bazel << 'EOF' +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH=$build_name -e bucket=$bucket" run_bazel << 'EOF' bazel build --config ci //pkg/cmd/publish-provisional-artifacts BAZEL_BIN=$(bazel info bazel-bin --config ci) $BAZEL_BIN/pkg/cmd/publish-provisional-artifacts/publish-provisional-artifacts_/publish-provisional-artifacts -provisional -release -bucket "$bucket" diff --git a/build/teamcity/internal/release/process/publish-cockroach-release.sh b/build/teamcity/internal/release/process/publish-cockroach-release.sh index 5f8d6de8effa..21d0ce068a46 100755 --- a/build/teamcity/internal/release/process/publish-cockroach-release.sh +++ b/build/teamcity/internal/release/process/publish-cockroach-release.sh @@ -77,7 +77,7 @@ tc_end_block "Tag the release" tc_start_block "Make and publish release S3 artifacts" # Using publish-provisional-artifacts here is funky. We're directly publishing # the official binaries, not provisional ones. Legacy naming. To clean up... -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILD_BRANCH=$build_name -e bucket=$bucket" run_bazel << 'EOF' +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH=$build_name -e bucket=$bucket" run_bazel << 'EOF' bazel build --config ci //pkg/cmd/publish-provisional-artifacts BAZEL_BIN=$(bazel info bazel-bin --config ci) $BAZEL_BIN/pkg/cmd/publish-provisional-artifacts/publish-provisional-artifacts_/publish-provisional-artifacts -provisional -release -bucket "$bucket" @@ -127,7 +127,7 @@ tc_start_block "Publish S3 binaries and archive as latest" # Only push the "latest" for our most recent release branch. # https://github.com/cockroachdb/cockroach/issues/41067 if [[ -n "${PUBLISH_LATEST}" && -z "${PRE_RELEASE}" ]]; then - BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILD_BRANCH=$build_name -e bucket=$bucket" run_bazel << 'EOF' + BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH=$build_name -e bucket=$bucket" run_bazel << 'EOF' bazel build --config ci //pkg/cmd/publish-provisional-artifacts BAZEL_BIN=$(bazel info bazel-bin --config ci) $BAZEL_BIN/pkg/cmd/publish-provisional-artifacts/publish-provisional-artifacts_/publish-provisional-artifacts -bless -release -bucket "$bucket"