Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DNM]try fix #52178

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion pkg/ddl/backfilling_scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ func (b *ingestBackfillScheduler) createWorker() workerpool.Worker[IndexRecordCh
worker, err := newAddIndexIngestWorker(
b.ctx, b.tbl, reorgInfo.d, engines, b.resultCh, job.ID,
reorgInfo.SchemaName, indexIDs, b.writerMaxID,
b.copReqSenderPool, sessCtx, b.checkpointMgr, b.distribute)
b.copReqSenderPool, sessCtx, bcCtx, b.checkpointMgr, b.distribute)
if err != nil {
// Return an error only if it is the first worker.
if b.writerMaxID == 0 {
Expand Down Expand Up @@ -574,6 +574,11 @@ func (w *addIndexIngestWorker) HandleTask(rs IndexRecordChunk, _ func(workerpool
result.addedCount = count
result.scanCount = count
result.nextKey = nextKey
// needs to flush and import to avoid too much use of disk.
flushed, _, _, err := ingest.TryFlushAllIndexes(w.backendCtx, ingest.FlushModeAuto, w.indexIDs)
if !flushed || err != nil {
result.err = err
}
}
if ResultCounterForTest != nil && result.err == nil {
ResultCounterForTest.Add(1)
Expand Down
5 changes: 5 additions & 0 deletions pkg/ddl/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -1701,8 +1701,10 @@ type addIndexIngestWorker struct {

tbl table.PhysicalTable
indexes []table.Index
indexIDs []int64
writers []ingest.Writer
copReqSenderPool *copReqSenderPool
backendCtx ingest.BackendCtx
checkpointMgr *ingest.CheckpointManager

resultCh chan *backfillResult
Expand All @@ -1722,6 +1724,7 @@ func newAddIndexIngestWorker(
writerID int,
copReqSenderPool *copReqSenderPool,
sessCtx sessionctx.Context,
backendCtx ingest.BackendCtx,
checkpointMgr *ingest.CheckpointManager,
distribute bool,
) (*addIndexIngestWorker, error) {
Expand All @@ -1745,11 +1748,13 @@ func newAddIndexIngestWorker(
metricCounter: metrics.BackfillTotalCounter.WithLabelValues(
metrics.GenerateReorgLabel("add_idx_rate", schemaName, t.Meta().Name.O)),
tbl: t,
indexIDs: indexIDs,
indexes: indexes,
writers: writers,
copReqSenderPool: copReqSenderPool,
resultCh: resultCh,
jobID: jobID,
backendCtx: backendCtx,
checkpointMgr: checkpointMgr,
distribute: distribute,
}, nil
Expand Down
1 change: 1 addition & 0 deletions pkg/ddl/ingest/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ go_library(
"engine.go",
"engine_mgr.go",
"env.go",
"flush.go",
"mem_root.go",
"message.go",
"mock.go",
Expand Down
18 changes: 2 additions & 16 deletions pkg/ddl/ingest/checkpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ func (s *CheckpointManager) UpdateCurrent(taskID int, added int) error {
cp.currentKeys += added
s.mu.Unlock()

flushed, imported, err := s.tryFlushAllIndexes(FlushModeAuto)
flushed, imported, _, err := TryFlushAllIndexes(s.flushCtrl, FlushModeAuto, s.indexIDs)
if !flushed || err != nil {
return err
}
Expand All @@ -194,20 +194,6 @@ func (s *CheckpointManager) UpdateCurrent(taskID int, added int) error {
return nil
}

func (s *CheckpointManager) tryFlushAllIndexes(mode FlushMode) (flushed, imported bool, err error) {
allFlushed := true
allImported := true
for _, idxID := range s.indexIDs {
flushed, imported, err := s.flushCtrl.Flush(idxID, mode)
if err != nil {
return false, false, err
}
allFlushed = allFlushed && flushed
allImported = allImported && imported
}
return allFlushed, allImported, nil
}

func (s *CheckpointManager) progressLocalSyncMinKey() {
for {
cp := s.checkpoints[s.minTaskIDSynced+1]
Expand All @@ -232,7 +218,7 @@ func (s *CheckpointManager) Close() {

// Sync syncs the checkpoint.
func (s *CheckpointManager) Sync() {
_, _, err := s.tryFlushAllIndexes(FlushModeForceLocal)
_, _, _, err := TryFlushAllIndexes(s.flushCtrl, FlushModeForceLocal, s.indexIDs)
if err != nil {
logutil.BgLogger().Warn("flush local engine failed", zap.String("category", "ddl-ingest"), zap.Error(err))
}
Expand Down
30 changes: 30 additions & 0 deletions pkg/ddl/ingest/flush.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// Copyright 2024 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package ingest

// TryFlushAllIndexes tries to flush and import all indexes.
func TryFlushAllIndexes(flushCtrl FlushController, mode FlushMode, indexIDs []int64) (flushed, imported bool, failedIdxID int64, err error) {
allFlushed := true
allImported := true
for _, idxID := range indexIDs {
flushed, imported, err := flushCtrl.Flush(idxID, mode)
if err != nil {
return false, false, idxID, err
}
allFlushed = allFlushed && flushed
allImported = allImported && imported
}
return allFlushed, allImported, -1, nil
}
5 changes: 3 additions & 2 deletions pkg/sessionctx/variable/sysvar.go
Original file line number Diff line number Diff line change
Expand Up @@ -2458,10 +2458,11 @@ var defaultSysVars = []*SysVar{
return nil
}},
// This system var is set disk quota for lightning sort dir, from 100 GB to 1PB.
{Scope: ScopeGlobal, Name: TiDBDDLDiskQuota, Value: strconv.Itoa(DefTiDBDDLDiskQuota), Type: TypeInt, MinValue: DefTiDBDDLDiskQuota, MaxValue: 1024 * 1024 * DefTiDBDDLDiskQuota / 100, GetGlobal: func(_ context.Context, sv *SessionVars) (string, error) {
{Scope: ScopeGlobal, Name: TiDBDDLDiskQuota, Value: strconv.Itoa(DefTiDBDDLDiskQuota), Type: TypeInt, MinValue: 0, MaxValue: 1024 * 1024 * DefTiDBDDLDiskQuota / 100, GetGlobal: func(_ context.Context, sv *SessionVars) (string, error) {
return strconv.FormatUint(DDLDiskQuota.Load(), 10), nil
}, SetGlobal: func(_ context.Context, s *SessionVars, val string) error {
DDLDiskQuota.Store(TidbOptUint64(val, DefTiDBDDLDiskQuota))
DDLDiskQuota.Store(TidbOptUint64("10", DefTiDBDDLDiskQuota))
logutil.BgLogger().Info("ywq test disk quota", zap.Any("val", DDLDiskQuota.Load()))
return nil
}},
// can't assign validate function here. Because validation function will run after GetGlobal function
Expand Down
7 changes: 7 additions & 0 deletions tests/realtikvtest/addindextest/add_index_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,11 +129,15 @@ func TestIssue51162(t *testing.T) {
PRIMARY KEY (col_47,col_46(2)) /*T![clustered_index] CLUSTERED */
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;`)

tk.MustExec(`set global tidb_ddl_disk_quota="10";`)
tk.MustExec(`set global tidb_enable_dist_task=on;`)

tk.MustExec(`INSERT INTO tl VALUES
('[\"1\"]',0,'1','[1]','Wxup81','1','10:14:20');`)

tk.MustExec("alter table tl add index idx_16(`col_48`,(cast(`col_45` as signed array)),`col_46`(5));")
tk.MustExec("admin check table tl")
tk.MustExec(`set global tidb_enable_dist_task=off;`)
}

func TestAddUKWithSmallIntHandles(t *testing.T) {
Expand All @@ -142,8 +146,11 @@ func TestAddUKWithSmallIntHandles(t *testing.T) {
tk.MustExec("drop database if exists small;")
tk.MustExec("create database small;")
tk.MustExec("use small;")
tk.MustExec(`set global tidb_enable_dist_task=on;`)
tk.MustExec(`set global tidb_ddl_enable_fast_reorg=1;`)
tk.MustExec("create table t (a bigint, b int, primary key (a) clustered)")
tk.MustExec("insert into t values (-9223372036854775808, 1),(-9223372036854775807, 1)")
tk.MustContainErrMsg("alter table t add unique index uk(b)", "Duplicate entry '1' for key 't.uk'")
tk.MustExec(`set global tidb_enable_dist_task=off;`)
tk.MustContainErrMsg("alter table t add unique index uk(b)", "Duplicate entry '1' for key 't.uk'")
}