From 82716a18be006fab8ae5050ed257cdc6f8818ba4 Mon Sep 17 00:00:00 2001 From: Hangjie Mo Date: Mon, 21 Aug 2023 15:18:33 +0800 Subject: [PATCH] This is an automated cherry-pick of #46111 Signed-off-by: ti-chi-bot --- executor/index_merge_reader.go | 40 +++++++++++++++-------------- executor/index_merge_reader_test.go | 13 ++++++++++ 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index a75e855851718..382fa7acccbda 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -677,7 +677,11 @@ func (w *partialTableWorker) getDatumRow(table table.Table, row chunk.Row, handl func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, handleCols plannercore.HandleCols) ( handles []kv.Handle, retChk *chunk.Chunk, err error) { handles = make([]kv.Handle, 0, w.batchSize) + if len(w.byItems) != 0 { + retChk = chunk.NewChunkWithCapacity(w.getRetTpsForTableScan(), w.batchSize) + } var memUsage int64 + var chunkRowOffset int defer w.memTracker.Consume(-memUsage) tbl := w.tableReader.(*TableReaderExecutor).table for len(handles) < w.batchSize { @@ -692,7 +696,7 @@ func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk. start := time.Now() err = errors.Trace(w.tableReader.Next(ctx, chk)) if err != nil { - return handles, nil, err + return nil, nil, err } if be := w.tableReader.base(); be != nil && be.runtimeStats != nil { be.runtimeStats.Record(time.Since(start), chk.NumRows()) @@ -706,15 +710,15 @@ func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk. memDelta := chk.MemoryUsage() memUsage += memDelta w.memTracker.Consume(memDelta) - for i := 0; i < chk.NumRows(); i++ { + for chunkRowOffset = 0; chunkRowOffset < chk.NumRows(); chunkRowOffset++ { if w.pushedLimit != nil { w.scannedKeys++ if w.scannedKeys > (w.pushedLimit.Offset + w.pushedLimit.Count) { // Skip the handles after Offset+Count. - return handles, retChk, nil + break } } - r := chk.GetRow(i) + r := chk.GetRow(chunkRowOffset) handle, err := handleCols.BuildHandleFromIndexRow(r) if err != nil { return nil, nil, err @@ -729,12 +733,9 @@ func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk. } handles = append(handles, handle) } - // used for limit embedded. + // used for order by if len(w.byItems) != 0 { - if retChk == nil { - retChk = chunk.NewChunkWithCapacity(w.getRetTpsForTableScan(), w.batchSize) - } - retChk.Append(chk, 0, chk.NumRows()) + retChk.Append(chk, 0, chunkRowOffset) } } w.batchSize *= 2 @@ -1545,7 +1546,11 @@ func (w *partialIndexWorker) getRetTpsForIndexScan(handleCols plannercore.Handle func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult distsql.SelectResult, handleCols plannercore.HandleCols) ( handles []kv.Handle, retChk *chunk.Chunk, err error) { handles = make([]kv.Handle, 0, w.batchSize) + if len(w.byItems) != 0 { + retChk = chunk.NewChunkWithCapacity(w.getRetTpsForIndexScan(handleCols, w.hasExtralPidCol()), w.batchSize) + } var memUsage int64 + var chunkRowOffset int defer w.memTracker.Consume(-memUsage) for len(handles) < w.batchSize { requiredRows := w.batchSize - len(handles) @@ -1559,7 +1564,7 @@ func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk. start := time.Now() err = errors.Trace(idxResult.Next(ctx, chk)) if err != nil { - return handles, nil, err + return nil, nil, err } if w.stats != nil && w.idxID != 0 { w.sc.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(w.idxID).Record(time.Since(start), chk.NumRows()) @@ -1573,31 +1578,28 @@ func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk. memDelta := chk.MemoryUsage() memUsage += memDelta w.memTracker.Consume(memDelta) - for i := 0; i < chk.NumRows(); i++ { + for chunkRowOffset = 0; chunkRowOffset < chk.NumRows(); chunkRowOffset++ { if w.pushedLimit != nil { w.scannedKeys++ if w.scannedKeys > (w.pushedLimit.Offset + w.pushedLimit.Count) { // Skip the handles after Offset+Count. - return handles, retChk, nil + break } } var handle kv.Handle if w.hasExtralPidCol() { - handle, err = handleCols.BuildPartitionHandleFromIndexRow(chk.GetRow(i)) + handle, err = handleCols.BuildPartitionHandleFromIndexRow(chk.GetRow(chunkRowOffset)) } else { - handle, err = handleCols.BuildHandleFromIndexRow(chk.GetRow(i)) + handle, err = handleCols.BuildHandleFromIndexRow(chk.GetRow(chunkRowOffset)) } if err != nil { return nil, nil, err } handles = append(handles, handle) } - // used for limit embedded. + // used for order by if len(w.byItems) != 0 { - if retChk == nil { - retChk = chunk.NewChunkWithCapacity(w.getRetTpsForIndexScan(handleCols, w.hasExtralPidCol()), w.batchSize) - } - retChk.Append(chk, 0, chk.NumRows()) + retChk.Append(chk, 0, chunkRowOffset) } } w.batchSize *= 2 diff --git a/executor/index_merge_reader_test.go b/executor/index_merge_reader_test.go index 8842064e63fa2..961222385a35f 100644 --- a/executor/index_merge_reader_test.go +++ b/executor/index_merge_reader_test.go @@ -1105,3 +1105,16 @@ func TestProcessInfoRaceWithIndexScan(t *testing.T) { } wg.Wait() } + +func TestIssues46005(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set tidb_index_lookup_size = 1024") + tk.MustExec("create table t(a int, b int, c int, index idx1(a, c), index idx2(b, c))") + for i := 0; i < 1500; i++ { + tk.MustExec(fmt.Sprintf("insert into t(a,b,c) values (1, 1, %d)", i)) + } + + tk.MustQuery("select /*+ USE_INDEX_MERGE(t, idx1, idx2) */ * from t where a = 1 or b = 1 order by c limit 1025") +}