-
Notifications
You must be signed in to change notification settings - Fork 5.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ddl: support dropping columns with composite indices #19196
Comments
After some discussions with @blacktear23 , we've some raw ideas about this. Considering table
|
@bb7133 I just wrote a pseudo code for drop composite index covered column. We can discuss on this base code. func (w *worker) onDropColumn(d *ddlCtx,t *meta.Meta, job *model.Job) (ver int64, _ error) {
tblInfo, colInfo, sidxInfos, cidxInfos, err := checkDropColumn(t, job)
colOriginalState := colInfo.State
if len(cidxInfos) > 0 && job.IsRollingback() {
// Handle the rolling back job.
ver, err = onDropCompositeIndices(t, job, cidxInfos)
if err != nil {
return ver, errors.Trace(err)
}
return ver, nil
}
if len(cidxInfos) > 0 && colOriginState == model.StatePublic {
fallThrough := false
ctidxInfos := getOrCreateTempIndices(tblInfo, colInfo, cidxInfos)
idxOriginalState := ctidxInfos[0].State
switch ctidxInfos[0].State {
case model.StateNone:
// none -> delete only
job.SchemaState = model.StateDeleteOnly
setIndicesState(ctidxInfos, model.StateDeleteOnly)
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, idxOriginalState != indexInfo.State)
metrics.AddIndexProgress.Set(0)
case model.StateDeleteOnly:
// delete only -> write only
job.SchemaState = model.StateWriteOnly
setIndicesState(ctidxInfos, model.StateWriteOnly)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, idxOriginalState != indexInfo.State)
case model.StateWriteOnly:
// write only -> reorganization
job.SchemaState = model.StateWriteReorganization
setIndicesState(ctidxInfos, model.StateWriteReorganization)
// Initialize SnapshotVer to 0 for later reorganization check.
job.SnapshotVer = 0
ver, err = updateVersionAndTableInfo(t, job, tblInfo, idxOriginalState != indexInfo.State)
case model.StateWriteReorganization:
tbl, err := getTable(d.store, schemaID, tblInfo)
if err != nil {
return ver, errors.Trace(err)
}
for _, idxInfo := range ctidxInfos {
// Run reorg job
ver, err = func(indexInfo *model.IndexInfo) (int64, error) {
reorgInfo, err := getReorgInfo(d, t, job, tbl)
if err != nil || reorgInfo.first {
// If we run reorg firstly, we should update the job snapshot version
// and then run the reorg next time.
return ver, errors.Trace(err)
}
err = w.runReorgJob(t, reorgInfo, tbl.Meta(), d.lease, func() (addIndexErr error) {
defer util.Recover(metrics.LabelDDL, "onDropColumn",
func() {
addIndexErr = errCancelledDDLJob.GenWithStack("add table `%v` index `%v` panic", tblInfo.Name, indexInfo.Name)
}, false)
return w.addTableIndex(tbl, indexInfo, reorgInfo)
})
if err != nil {
if errWaitReorgTimeout.Equal(err) {
// if timeout, we should return, check for the owner and re-wait job done.
return ver, nil
}
if kv.ErrKeyExists.Equal(err) || errCancelledDDLJob.Equal(err) || errCantDecodeIndex.Equal(err) {
logutil.BgLogger().Warn("[ddl] run add index job failed, convert job to rollback", zap.String("job", job.String()), zap.Error(err))
ver, err = convertDropColumnWithCompositeIdxJob2RollbackJob(t, job, tblInfo, err)
}
// Clean up the channel of notifyCancelReorgJob. Make sure it can't affect other jobs.
w.reorgCtx.cleanNotifyReorgCancel()
return ver, errors.Trace(err)
}
// Clean up the channel of notifyCancelReorgJob. Make sure it can't affect other jobs.
w.reorgCtx.cleanNotifyReorgCancel()
}(idxInfo)
if err != nil {
return ver, errors.Trace(err)
}
// Set column index flag.
addIndexColumnFlag(tblInfo, indexInfo)
}
ver, err = updateVersionAndTableInfo(t, job, tblInfo, idxOriginalState != indexInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
fallThrough = true
default:
err = ErrInvalidDDLState.GenWithStackByArgs("index", tblInfo.State)
}
if !fallThrough {
return ver, errors.Trace(err)
}
}
// Drop columns
switch colInfo.State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
colInfo.State = model.StateWriteOnly
setIndicesState(sidxInfos, model.StateWriteOnly)
setIndicesState(cidxInfos, model.StateWriteOnly)
err = checkDropColumnForStatePublic(tblInfo, colInfo)
if err != nil {
return ver, errors.Trace(err)
}
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, colOriginalState != colInfo.State)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
colInfo.State = model.StateDeleteOnly
setIndicesState(sidxInfos, model.StateDeleteOnly)
setIndicesState(cidxInfos, model.StateDeleteOnly)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, colOriginalState != colInfo.State)
case model.StateDeleteOnly:
// delete only -> reorganization
job.SchemaState = model.StateDeleteReorganization
colInfo.State = model.StateDeleteReorganization
setIndicesState(sidxInfos, model.StateDeleteReorganization)
setIndicesState(cidxInfos, model.StateDeleteReorganization)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, colOriginalState != colInfo.State)
case model.StateDeleteReorganization:
// reorganization -> absent
// All reorganization jobs are done, drop this column.
if len(sidxInfos) > 0 {
newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices))
for _, idx := range tblInfo.Indices {
if !indexInfoContains(idx.ID, sidxInfos) {
newIndices = append(newIndices, idx)
}
}
tblInfo.Indices = newIndices
}
indexIDs := indexInfosToIDList(sidxInfos)
if len(cidxInfos) > 0 {
ctidxInfos := getOrCreateTempIndices(tblInfo, colInfo, cidxInfos)
// Drop origin indices
newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices))
for _, idx := range tblInfo.Indices {
if !indexInfoContains(idx.ID, cidxInfos) {
newIndices = append(newIndices, idx)
}
}
tblInfo.Indices = newIndices
for _, idx := range ctidxInfos {
// Rename temp index
idx.Name = renameToOrigin(idx, cidxInfos)
// Set state to public
idx.State = model.StatePublic
}
indexIDs = append(indexIDs, indexInfoToIDList(cidxInfos)...)
}
tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-1]
colInfo.State = model.StateNone
ver, err = updateVersionAndTableInfo(t, job, tblInfo, colOriginalState != colInfo.State)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
if job.IsRollingback() {
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
} else {
// We should set related index IDs for job
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
job.Args = append(job.Args, indexIDs, getPartitionIDs(tblInfo))
}
default:
err = errInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State)
}
return ver, errors.Trace(err)
} |
@bb7133 and there still has some unclear area:
|
And there has another point, we need IndexInfo add a field that track the relationship between origin composite index and new created temp composite index. |
@bb7133 below is rollback POC func rollingbackDropColumn(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
tblInfo, colInfo, sidxInfos, cidxInfos, err := checkDropColumn(t, job)
if err != nil {
return ver, errors.Trace(err)
}
if len(cidxInfos) > 0 && colInfo.State == model.StatePublic {
ctidxInfos := getOrCreateTempIndices(tblInfo, colInfo, cidxInfos)
switch ctidxInfos[0].State {
case model.StateWriteReorganization:
if job.SnapshotVer != 0 {
// add index workers are started. need to ask them to exit.
logutil.Logger(w.logCtx).Info("[ddl] run the cancelling DDL job", zap.String("job", job.String()))
w.reorgCtx.notifyReorgCancel()
return onDropColumn(d, t, job)
} else {
return convertDropColumnWithCompositeIdxJob2RollbackJob(t, job, tblInfo)
}
case model.StateNone, model.StateDeleteOnly, model.StateWriteOnly:
return convertDropColumnWithCompositeIdxJob2RollbackJob(t, job, tblInfo)
}
}
for _, indexInfo := range sidxInfos {
switch indexInfo.State {
case model.StateWriteOnly, model.StateDeleteOnly, model.StateDeleteReorganization, model.StateNone:
// We can not rollback now, so just continue to drop index.
// In function isJobRollbackable will let job rollback when state is StateNone.
// When there is no index related to the drop column job it is OK, but when there has indices, we should
// make sure the job is not rollback.
job.State = model.JobStateRunning
return ver, nil
case model.StatePublic:
default:
return ver, ErrInvalidDDLState.GenWithStackByArgs("index", indexInfo.State)
}
}
// StatePublic means when the job is not running yet.
if colInfo.State == model.StatePublic {
job.State = model.JobStateCancelled
job.FinishTableJob(model.JobStateRollbackDone, model.StatePublic, ver, tblInfo)
return ver, errCancelledDDLJob
}
// In the state of drop column `write only -> delete only -> reorganization`,
// We can not rollback now, so just continue to drop column.
job.State = model.JobStateRunning
return ver, nil
} |
In fact, we've used the pattern(adding some specific prefix) already, like https://github.com/pingcap/tidb/pull/19059/files#diff-edfa54cbe2fdb418f0a77bd0ec4a56f2R64, can we follow the same style? |
I think we can try to reuse |
yes we can follow this style. |
@bb7133 I add [DNM] on pingcap/parser#974. After this feature implemented I will close it. |
Feature Request
Is your feature request related to a problem? Please describe:
After PR #18852, #18812 is supported in TiDB.
Now we'd like to go on improving the
ALTER TABLE ... DROP COLUMN
to support dropping columns with composite indices like:Describe the feature you'd like:
See #3364
Describe alternatives you've considered:
Manually, the
ALTER TABLE ... DROP COLUMN
in TiDB can be archived by 2 separate steps:However, for some third-party frameworks that relies on dropping columns directly, workarounds have to be done for this limitation. This can be disappointing.
Teachability, Documentation, Adoption, Migration Strategy:
More help is available by referring:
The internal algorithm of TiDB online DDL, more specifically AddColumn action and AddIndex action.
The key schema state change codes:
The text was updated successfully, but these errors were encountered: