diff --git a/pkg/col/coldata/bytes.go b/pkg/col/coldata/bytes.go index 2616ad60dbd4..da4488a72d5a 100644 --- a/pkg/col/coldata/bytes.go +++ b/pkg/col/coldata/bytes.go @@ -244,6 +244,14 @@ func (b *Bytes) copyElements(srcElementsToCopy []element, src *Bytes, destIdx in // Optimize copying of the elements by copying all of them directly into the // destination. This way all inlined values become correctly set, and we // only need to set the non-inlined values separately. + // + // Note that this behavior results in losing the references to the old + // non-inlined values, even if they could be reused. If Bytes is not Reset, + // then that unused space in Bytes.buffer can accumulate. However, checking + // whether there are old non-inlined values with non-zero capacity leads to + // performance regressions, and in the production code we do reset the Bytes + // in all cases, so we accept this poor behavior in such a hypothetical / + // test-only scenario. See #78703 for more details. copy(destElements, srcElementsToCopy) // Early bounds checks. _ = destElements[len(srcElementsToCopy)-1] diff --git a/pkg/sql/colexecop/testutils.go b/pkg/sql/colexecop/testutils.go index 886f3b8df646..e050c2611715 100644 --- a/pkg/sql/colexecop/testutils.go +++ b/pkg/sql/colexecop/testutils.go @@ -108,8 +108,9 @@ func (s *RepeatableBatchSource) Next() coldata.Batch { if s.batchesToReturn != 0 && s.batchesReturned > s.batchesToReturn { return coldata.ZeroBatch } - s.output.SetSelection(s.sel != nil) + s.output.ResetInternalBatch() if s.sel != nil { + s.output.SetSelection(true) copy(s.output.Selection()[:s.batchLen], s.sel[:s.batchLen]) } for i, colVec := range s.colVecs {