From aaca081cec39c6a120085c840acaf32c38885ab2 Mon Sep 17 00:00:00 2001 From: tangenta Date: Wed, 31 Jul 2024 19:07:28 +0800 Subject: [PATCH] ddl: record get owner TS and compare it before runReorgJob quit (#55049) close pingcap/tidb#54897 --- pkg/ddl/ddl.go | 15 ++++++++++++++- pkg/ddl/job_scheduler.go | 1 + pkg/ddl/reorg.go | 24 +++++++++++++++++++++--- 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/pkg/ddl/ddl.go b/pkg/ddl/ddl.go index 48a021793cc73..236dc403a2ec6 100644 --- a/pkg/ddl/ddl.go +++ b/pkg/ddl/ddl.go @@ -519,6 +519,19 @@ type reorgContexts struct { sync.RWMutex // reorgCtxMap maps job ID to reorg context. reorgCtxMap map[int64]*reorgCtx + beOwnerTS int64 +} + +func (r *reorgContexts) getOwnerTS() int64 { + r.RLock() + defer r.RUnlock() + return r.beOwnerTS +} + +func (r *reorgContexts) setOwnerTS(ts int64) { + r.Lock() + r.beOwnerTS = ts + r.Unlock() } func (dc *ddlCtx) getReorgCtx(jobID int64) *reorgCtx { @@ -536,7 +549,7 @@ func (dc *ddlCtx) newReorgCtx(jobID int64, rowCount int64) *reorgCtx { return existedRC } rc := &reorgCtx{} - rc.doneCh = make(chan error, 1) + rc.doneCh = make(chan reorgFnResult, 1) // initial reorgCtx rc.setRowCount(rowCount) rc.mu.warnings = make(map[errors.ErrorID]*terror.Error) diff --git a/pkg/ddl/job_scheduler.go b/pkg/ddl/job_scheduler.go index 683af6bff20ef..b97c9de5cb3b8 100644 --- a/pkg/ddl/job_scheduler.go +++ b/pkg/ddl/job_scheduler.go @@ -112,6 +112,7 @@ func (l *ownerListener) OnBecomeOwner() { sessPool: l.ddl.sessPool, delRangeMgr: l.ddl.delRangeMgr, } + l.ddl.reorgCtx.setOwnerTS(time.Now().Unix()) l.scheduler.start() } diff --git a/pkg/ddl/reorg.go b/pkg/ddl/reorg.go index c5295281ee686..2154974826a5a 100644 --- a/pkg/ddl/reorg.go +++ b/pkg/ddl/reorg.go @@ -65,7 +65,7 @@ type reorgCtx struct { // If the reorganization job is done, we will use this channel to notify outer. // TODO: Now we use goroutine to simulate reorganization jobs, later we may // use a persistent job list. - doneCh chan error + doneCh chan reorgFnResult // rowCount is used to simulate a job's row count. rowCount int64 jobState model.JobState @@ -80,6 +80,13 @@ type reorgCtx struct { references atomicutil.Int32 } +// reorgFnResult records the DDL owner TS before executing reorg function, in order to help +// receiver determine if the result is from reorg function of previous DDL owner in this instance. +type reorgFnResult struct { + ownerTS int64 + err error +} + func newReorgExprCtx() exprctx.ExprContext { evalCtx := contextstatic.NewStaticEvalContext( contextstatic.WithSQLMode(mysql.ModeNone), @@ -251,11 +258,13 @@ func (w *worker) runReorgJob( return dbterror.ErrCancelledDDLJob } + beOwnerTS := w.ddlCtx.reorgCtx.getOwnerTS() rc = w.newReorgCtx(reorgInfo.Job.ID, reorgInfo.Job.GetRowCount()) w.wg.Add(1) go func() { defer w.wg.Done() - rc.doneCh <- reorgFn() + err := reorgFn() + rc.doneCh <- reorgFnResult{ownerTS: beOwnerTS, err: err} }() } @@ -271,7 +280,16 @@ func (w *worker) runReorgJob( // wait reorganization job done or timeout select { - case err := <-rc.doneCh: + case res := <-rc.doneCh: + err := res.err + curTS := w.ddlCtx.reorgCtx.getOwnerTS() + if res.ownerTS != curTS { + d.removeReorgCtx(job.ID) + logutil.DDLLogger().Warn("owner ts mismatch, return timeout error and retry", + zap.Int64("prevTS", res.ownerTS), + zap.Int64("curTS", curTS)) + return dbterror.ErrWaitReorgTimeout + } // Since job is cancelled,we don't care about its partial counts. if rc.isReorgCanceled() || terror.ErrorEqual(err, dbterror.ErrCancelledDDLJob) { d.removeReorgCtx(job.ID)