From 8ee5acbae1792b1ded328c2fd0871f16382aa389 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Mon, 23 Oct 2023 13:31:38 +0800 Subject: [PATCH] fix git conflict Signed-off-by: lance6716 --- executor/builder.go | 6 + executor/executor.go | 4 - executor/insert_common.go | 38 ++-- executor/load_data.go | 354 +------------------------------------- executor/replace.go | 56 ------ executor/utils_test.go | 16 -- 6 files changed, 21 insertions(+), 453 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index 582a0fd0c75e3..a8be45751f012 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -926,6 +926,8 @@ func (b *executorBuilder) buildLoadData(v *plannercore.LoadData) Executor { isLoadData: true, txnInUse: sync.Mutex{}, } + restrictive := b.ctx.GetSessionVars().SQLMode.HasStrictMode() && + v.OnDuplicate != ast.OnDuplicateKeyHandlingIgnore loadDataInfo := &LoadDataInfo{ row: make([]types.Datum, 0, len(insertVal.insertColumns)), InsertValues: insertVal, @@ -937,6 +939,10 @@ func (b *executorBuilder) buildLoadData(v *plannercore.LoadData) Executor { ColumnAssignments: v.ColumnAssignments, ColumnsAndUserVars: v.ColumnsAndUserVars, Ctx: b.ctx, + restrictive: restrictive, + } + if !restrictive { + b.ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true } columnNames := loadDataInfo.initFieldMappings() err := loadDataInfo.initLoadColumns(columnNames) diff --git a/executor/executor.go b/executor/executor.go index 8354a957e02ec..011a6bb231730 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -2100,15 +2100,11 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.NoZeroDate = vars.SQLMode.HasNoZeroDateMode() sc.TruncateAsWarning = !vars.StrictSQLMode case *ast.LoadDataStmt: -<<<<<<< HEAD - sc.DupKeyAsWarning = true sc.BadNullAsWarning = true // With IGNORE or LOCAL, data-interpretation errors become warnings and the load operation continues, // even if the SQL mode is restrictive. For details: https://dev.mysql.com/doc/refman/8.0/en/load-data.html // TODO: since TiDB only support the LOCAL by now, so the TruncateAsWarning are always true here. sc.TruncateAsWarning = true -======= ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) sc.InLoadDataStmt = true // return warning instead of error when load data meet no partition for value sc.IgnoreNoPartition = true diff --git a/executor/insert_common.go b/executor/insert_common.go index 9f8939e8e76b3..8b955900b3bcc 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -1183,18 +1183,6 @@ func (e *InsertValues) batchCheckAndInsert(ctx context.Context, rows [][]types.D for _, uk := range r.uniqueKeys { _, err := txn.Get(ctx, uk.newKey) if err == nil { -<<<<<<< HEAD - // If duplicate keys were found in BatchGet, mark row = nil. - e.ctx.GetSessionVars().StmtCtx.AppendWarning(uk.dupErr) - if txnCtx := e.ctx.GetSessionVars().TxnCtx; txnCtx.IsPessimistic { - // lock duplicated unique key on insert-ignore - txnCtx.AddUnchangedRowKey(uk.newKey) - } - skip = true - break - } - if !kv.IsErrNotFound(err) { -======= if replace { _, handle, err := tables.FetchDuplicatedHandle( ctx, @@ -1217,11 +1205,14 @@ func (e *InsertValues) batchCheckAndInsert(ctx context.Context, rows [][]types.D } else { // If duplicate keys were found in BatchGet, mark row = nil. e.ctx.GetSessionVars().StmtCtx.AppendWarning(uk.dupErr) + if txnCtx := e.ctx.GetSessionVars().TxnCtx; txnCtx.IsPessimistic { + // lock duplicated unique key on insert-ignore + txnCtx.AddUnchangedRowKey(uk.newKey) + } skip = true break } } else if !kv.IsErrNotFound(err) { ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) return err } } @@ -1270,28 +1261,25 @@ func (e *InsertValues) removeRow( return false, err } if identical { -<<<<<<< HEAD - _, err := appendUnchangedRowForLock(e.ctx, r.t, handle, oldRow) - if err != nil { - return err - } - return nil -======= if inReplace { e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) } + _, err := appendUnchangedRowForLock(e.ctx, r.t, handle, oldRow) + if err != nil { + return false, err + } return true, nil ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) } err = r.t.RemoveRecord(e.ctx, handle, oldRow) if err != nil { return false, err } - err = onRemoveRowForFK(e.ctx, oldRow, e.fkChecks, e.fkCascades) - if err != nil { - return false, err - } + // need https://github.com/pingcap/tidb/pull/40069 + //err = onRemoveRowForFK(e.ctx, oldRow, e.fkChecks, e.fkCascades) + //if err != nil { + // return false, err + //} if inReplace { e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) } else { diff --git a/executor/load_data.go b/executor/load_data.go index b5e5c87865e95..491035f804b19 100644 --- a/executor/load_data.go +++ b/executor/load_data.go @@ -117,7 +117,6 @@ type LoadDataInfo struct { rows [][]types.Datum Drained bool -<<<<<<< HEAD ColumnAssignments []*ast.Assignment ColumnAssignmentExprs []expression.Expression // sessionCtx generate warnings when rewrite AST node into expression. @@ -125,19 +124,11 @@ type LoadDataInfo struct { exprWarnings []stmtctx.SQLWarn ColumnsAndUserVars []*ast.ColumnNameOrUserVar FieldMappings []*FieldMapping -======= - format string - schemaName string - columnAssignments []*ast.Assignment - columnsAndUserVars []*ast.ColumnNameOrUserVar - fieldMappings []*FieldMapping - onDuplicate ast.OnDuplicateKeyHandlingType // Data interpretation is restrictive if the SQL mode is restrictive and neither // the IGNORE nor the LOCAL modifier is specified. Errors terminate the load // operation. // ref https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-column-assignments restrictive bool ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) commitTaskQueue chan CommitTask StopCh chan struct{} @@ -145,237 +136,7 @@ type LoadDataInfo struct { OnDuplicate ast.OnDuplicateKeyHandlingType } -<<<<<<< HEAD // FieldMapping inticates the relationship between input field and table column or user variable -======= -// NewLoadDataWorker creates a new LoadDataWorker that is ready to work. -func NewLoadDataWorker( - sctx sessionctx.Context, - plan *plannercore.LoadData, - tbl table.Table, - getSysSessionFn func() (sessionctx.Context, error), - putSysSessionFn func(context.Context, sessionctx.Context), -) (*LoadDataWorker, error) { - insertVal := &InsertValues{ - baseExecutor: newBaseExecutor(sctx, nil, plan.ID()), - Table: tbl, - Columns: plan.Columns, - GenExprs: plan.GenCols.Exprs, - isLoadData: true, - txnInUse: sync.Mutex{}, - maxRowsInBatch: uint64(sctx.GetSessionVars().DMLBatchSize), - } - restrictive := sctx.GetSessionVars().SQLMode.HasStrictMode() && - plan.OnDuplicate != ast.OnDuplicateKeyHandlingIgnore - loadDataWorker := &LoadDataWorker{ - row: make([]types.Datum, 0, len(insertVal.insertColumns)), - commitTaskQueue: make(chan commitTask, taskQueueSize), - InsertValues: insertVal, - Path: plan.Path, - format: plan.Format, - schemaName: plan.Table.Schema.O, - table: tbl, - FieldsInfo: plan.FieldsInfo, - LinesInfo: plan.LinesInfo, - NullInfo: plan.NullInfo, - IgnoreLines: plan.IgnoreLines, - columnAssignments: plan.ColumnAssignments, - columnsAndUserVars: plan.ColumnsAndUserVars, - onDuplicate: plan.OnDuplicate, - Ctx: sctx, - restrictive: restrictive, - getSysSessionFn: getSysSessionFn, - putSysSessionFn: putSysSessionFn, - } - if !restrictive { - // TODO: DupKeyAsWarning represents too many "ignore error" paths, the - // meaning of this flag is not clear. I can only reuse it here. - sctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true - sctx.GetSessionVars().StmtCtx.TruncateAsWarning = true - sctx.GetSessionVars().StmtCtx.BadNullAsWarning = true - } - - if err := loadDataWorker.initOptions(plan.Options); err != nil { - return nil, err - } - columnNames := loadDataWorker.initFieldMappings() - if err := loadDataWorker.initLoadColumns(columnNames); err != nil { - return nil, err - } - loadDataWorker.ResetBatch() - return loadDataWorker, nil -} - -func (e *LoadDataWorker) initDefaultOptions() { - threadCnt := runtime.NumCPU() - if e.format == LoadDataFormatParquet { - threadCnt = int(math.Max(1, float64(threadCnt)*0.75)) - } - - e.importMode = logicalImportMode - _ = e.diskQuota.UnmarshalText([]byte("50GiB")) // todo confirm with pm - e.checksum = config.OpLevelRequired - e.addIndex = true - e.analyze = config.OpLevelOptional - e.threadCnt = int64(threadCnt) - _ = e.batchSize.UnmarshalText([]byte("100MiB")) // todo confirm with pm - e.maxWriteSpeed = unlimitedWriteSpeed - e.splitFile = false - e.maxRecordedErrors = 100 - e.detached = false -} - -func (e *LoadDataWorker) initOptions(options []*plannercore.LoadDataOpt) error { - e.initDefaultOptions() - - specifiedOptions := map[string]*plannercore.LoadDataOpt{} - for _, opt := range options { - hasValue, ok := supportedOptions[opt.Name] - if !ok { - return ErrUnknownOption.FastGenByArgs(opt.Name) - } - if hasValue && opt.Value == nil || !hasValue && opt.Value != nil { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - if _, ok = specifiedOptions[opt.Name]; ok { - return ErrDuplicateOption.FastGenByArgs(opt.Name) - } - specifiedOptions[opt.Name] = opt - } - - var ( - v string - err error - isNull bool - ) - if opt, ok := specifiedOptions[importModeOption]; ok { - v, isNull, err = opt.Value.EvalString(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - v = strings.ToLower(v) - if v != logicalImportMode && v != physicalImportMode { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - e.importMode = v - } - - if e.importMode == logicalImportMode { - // some options are only allowed in physical mode - for _, opt := range specifiedOptions { - if _, ok := optionsForPhysicalImport[opt.Name]; ok { - return ErrLoadDataUnsupportedOption.FastGenByArgs(opt.Name, e.importMode) - } - } - } - if opt, ok := specifiedOptions[diskQuotaOption]; ok { - v, isNull, err = opt.Value.EvalString(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - if err = e.diskQuota.UnmarshalText([]byte(v)); err != nil || e.diskQuota <= 0 { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - } - if opt, ok := specifiedOptions[checksumOption]; ok { - v, isNull, err = opt.Value.EvalString(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - if err = e.checksum.FromStringValue(v); err != nil { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - } - if opt, ok := specifiedOptions[addIndexOption]; ok { - var vInt int64 - if !mysql.HasIsBooleanFlag(opt.Value.GetType().GetFlag()) { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - vInt, isNull, err = opt.Value.EvalInt(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - e.addIndex = vInt == 1 - } - if opt, ok := specifiedOptions[analyzeOption]; ok { - v, isNull, err = opt.Value.EvalString(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - if err = e.analyze.FromStringValue(v); err != nil { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - } - if opt, ok := specifiedOptions[threadOption]; ok { - // boolean true will be taken as 1 - e.threadCnt, isNull, err = opt.Value.EvalInt(e.Ctx, chunk.Row{}) - if err != nil || isNull || e.threadCnt <= 0 { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - } - if opt, ok := specifiedOptions[batchSizeOption]; ok { - v, isNull, err = opt.Value.EvalString(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - if err = e.batchSize.UnmarshalText([]byte(v)); err != nil || e.batchSize <= 0 { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - } - if opt, ok := specifiedOptions[maxWriteSpeedOption]; ok { - v, isNull, err = opt.Value.EvalString(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - if err = e.maxWriteSpeed.UnmarshalText([]byte(v)); err != nil || e.maxWriteSpeed <= 0 { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - } - if opt, ok := specifiedOptions[splitFileOption]; ok { - if !mysql.HasIsBooleanFlag(opt.Value.GetType().GetFlag()) { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - var vInt int64 - vInt, isNull, err = opt.Value.EvalInt(e.Ctx, chunk.Row{}) - if err != nil || isNull { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - e.splitFile = vInt == 1 - } - if opt, ok := specifiedOptions[recordErrorsOption]; ok { - e.maxRecordedErrors, isNull, err = opt.Value.EvalInt(e.Ctx, chunk.Row{}) - if err != nil || isNull || e.maxRecordedErrors < -1 { - return ErrInvalidOptionVal.FastGenByArgs(opt.Name) - } - // todo: set a max value for this param? - } - if _, ok := specifiedOptions[detachedOption]; ok { - e.detached = true - } - - e.adjustOptions() - return nil -} - -func (e *LoadDataWorker) adjustOptions() { - if e.diskQuota < minDiskQuota { - e.diskQuota = minDiskQuota - } - // max value is cpu-count - numCPU := int64(runtime.NumCPU()) - if e.threadCnt > numCPU { - e.threadCnt = numCPU - } - if e.batchSize < minBatchSize { - e.batchSize = minBatchSize - } - if e.maxWriteSpeed < minWriteSpeed { - e.maxWriteSpeed = minWriteSpeed - } -} - -// FieldMapping indicates the relationship between input field and table column or user variable ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) type FieldMapping struct { Column *table.Column UserVar *ast.VariableExpr @@ -866,12 +627,7 @@ func (e *LoadDataInfo) CheckAndInsertOneBatch(ctx context.Context, rows [][]type } e.ctx.GetSessionVars().StmtCtx.AddRecordRows(cnt) -<<<<<<< HEAD - replace := false - if e.OnDuplicate == ast.OnDuplicateKeyHandlingReplace { - replace = true -======= - switch e.onDuplicate { + switch e.OnDuplicate { case ast.OnDuplicateKeyHandlingReplace: return e.batchCheckAndInsert(ctx, rows[0:cnt], e.addRecordLD, true) case ast.OnDuplicateKeyHandlingIgnore: @@ -896,8 +652,7 @@ func (e *LoadDataInfo) CheckAndInsertOneBatch(ctx context.Context, rows [][]type } return nil default: - return errors.Errorf("unknown on duplicate key handling: %v", e.onDuplicate) ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) + return errors.Errorf("unknown on duplicate key handling: %v", e.OnDuplicate) } } @@ -1136,115 +891,10 @@ func (w *fieldWriter) GetField() (bool, field) { w.OutputBuf = append(w.OutputBuf, w.escapeChar) w.OutputBuf = append(w.OutputBuf, ch) } -<<<<<<< HEAD -======= - return ErrLoadDataCantRead.GenWithStackByArgs( - err.Error(), - "Only the following formats delimited text file (csv, tsv), parquet, sql are supported. Please provide the valid source file(s)", - ) - } - // rowCount will be used in fillRow(), last insert ID will be assigned according to the rowCount = 1. - // So should add first here. - e.rowCount++ - r, err := e.parserData2TableData(ctx, parser.LastRow().Row) - if err != nil { - return err - } - e.rows = append(e.rows, r) - e.curBatchCnt++ - if e.maxRowsInBatch != 0 && e.rowCount%e.maxRowsInBatch == 0 { - logutil.Logger(ctx).Info("batch limit hit when inserting rows", zap.Int("maxBatchRows", e.maxChunkSize), - zap.Uint64("totalRows", e.rowCount)) - return nil - } - } -} - -// parserData2TableData encodes the data of parser output. -func (e *LoadDataWorker) parserData2TableData( - ctx context.Context, - parserData []types.Datum, -) ([]types.Datum, error) { - var errColNumMismatch error - switch { - case len(parserData) < len(e.fieldMappings): - errColNumMismatch = ErrWarnTooFewRecords.GenWithStackByArgs(e.rowCount) - case len(parserData) > len(e.fieldMappings): - errColNumMismatch = ErrWarnTooManyRecords.GenWithStackByArgs(e.rowCount) - } - - if errColNumMismatch != nil { - if e.restrictive { - return nil, errColNumMismatch - } - e.handleWarning(errColNumMismatch) - } - - row := make([]types.Datum, 0, len(e.insertColumns)) - sessionVars := e.Ctx.GetSessionVars() - setVar := func(name string, col *types.Datum) { - // User variable names are not case-sensitive - // https://dev.mysql.com/doc/refman/8.0/en/user-variables.html - name = strings.ToLower(name) - if col == nil || col.IsNull() { - sessionVars.UnsetUserVar(name) ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) } else { w.OutputBuf = append(w.OutputBuf, ch) } } -<<<<<<< HEAD -======= - - for i := 0; i < len(e.fieldMappings); i++ { - if i >= len(parserData) { - if e.fieldMappings[i].Column == nil { - setVar(e.fieldMappings[i].UserVar.Name, nil) - continue - } - - // If some columns is missing and their type is time and has not null flag, they should be set as current time. - if types.IsTypeTime(e.fieldMappings[i].Column.GetType()) && mysql.HasNotNullFlag(e.fieldMappings[i].Column.GetFlag()) { - row = append(row, types.NewTimeDatum(types.CurrentTime(e.fieldMappings[i].Column.GetType()))) - continue - } - - row = append(row, types.NewDatum(nil)) - continue - } - - if e.fieldMappings[i].Column == nil { - setVar(e.fieldMappings[i].UserVar.Name, &parserData[i]) - continue - } - - row = append(row, parserData[i]) - } - for i := 0; i < len(e.columnAssignments); i++ { - // eval expression of `SET` clause - d, err := expression.EvalAstExpr(e.Ctx, e.columnAssignments[i].Expr) - if err != nil { - if e.restrictive { - return nil, err - } - e.handleWarning(err) - } - row = append(row, d) - } - - // a new row buffer will be allocated in getRow - newRow, err := e.getRow(ctx, row) - if err != nil { - if e.restrictive { - return nil, err - } - e.handleWarning(err) - // TODO: should not return nil! caller will panic when lookup index - return nil, nil - } - - return newRow, nil ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) } // getFieldsFromLine splits line according to fieldsInfo. diff --git a/executor/replace.go b/executor/replace.go index 5388a1755bdc5..f04a8decae47f 100644 --- a/executor/replace.go +++ b/executor/replace.go @@ -60,62 +60,6 @@ func (e *ReplaceExec) Open(ctx context.Context) error { return nil } -<<<<<<< HEAD -// removeRow removes the duplicate row and cleanup its keys in the key-value map, -// but if the to-be-removed row equals to the to-be-added row, no remove or add things to do. -func (e *ReplaceExec) removeRow(ctx context.Context, txn kv.Transaction, handle kv.Handle, r toBeCheckedRow) (bool, error) { - newRow := r.row - oldRow, err := getOldRow(ctx, e.ctx, txn, r.t, handle, e.GenExprs) - if err != nil { - logutil.BgLogger().Error("get old row failed when replace", - zap.String("handle", handle.String()), - zap.String("toBeInsertedRow", types.DatumsToStrNoErr(r.row))) - if kv.IsErrNotFound(err) { - err = errors.NotFoundf("can not be duplicated row, due to old row not found. handle %s", handle) - } - return false, err - } - - rowUnchanged, err := e.EqualDatumsAsBinary(e.ctx.GetSessionVars().StmtCtx, oldRow, newRow) - if err != nil { - return false, err - } - if rowUnchanged { - e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) - _, err := appendUnchangedRowForLock(e.ctx, r.t, handle, oldRow) - if err != nil { - return false, err - } - return true, nil - } - - err = r.t.RemoveRecord(e.ctx, handle, oldRow) - if err != nil { - return false, err - } - e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) - return false, nil -} - -// EqualDatumsAsBinary compare if a and b contains the same datum values in binary collation. -func (e *ReplaceExec) EqualDatumsAsBinary(sc *stmtctx.StatementContext, a []types.Datum, b []types.Datum) (bool, error) { - if len(a) != len(b) { - return false, nil - } - for i, ai := range a { - v, err := ai.Compare(sc, &b[i], collate.GetBinaryCollator()) - if err != nil { - return false, errors.Trace(err) - } - if v != 0 { - return false, nil - } - } - return true, nil -} - -======= ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) // replaceRow removes all duplicate rows for one row, then inserts it. func (e *ReplaceExec) replaceRow(ctx context.Context, r toBeCheckedRow) error { txn, err := e.ctx.Txn(true) diff --git a/executor/utils_test.go b/executor/utils_test.go index 416d4d04bbf2f..8afcddb16d781 100644 --- a/executor/utils_test.go +++ b/executor/utils_test.go @@ -18,12 +18,8 @@ import ( "testing" "github.com/pingcap/errors" -<<<<<<< HEAD -======= - berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/types" ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947)) "github.com/stretchr/testify/require" ) @@ -99,17 +95,6 @@ func TestBatchRetrieverHelper(t *testing.T) { require.Equal(t, rangeStarts, []int{0}) require.Equal(t, rangeEnds, []int{10}) } -<<<<<<< HEAD -======= - -func TestGetMsgFromBRError(t *testing.T) { - var berr error = berrors.ErrStorageInvalidConfig - require.Equal(t, "[BR:ExternalStorage:ErrStorageInvalidConfig]invalid external storage config", berr.Error()) - require.Equal(t, "invalid external storage config", getMsgFromBRError(berr)) - berr = errors.Annotatef(berr, "some message about error reason") - require.Equal(t, "some message about error reason: [BR:ExternalStorage:ErrStorageInvalidConfig]invalid external storage config", berr.Error()) - require.Equal(t, "some message about error reason", getMsgFromBRError(berr)) -} func TestEqualDatumsAsBinary(t *testing.T) { tests := []struct { @@ -142,4 +127,3 @@ func TestEqualDatumsAsBinary(t *testing.T) { require.Equal(t, tt.same, res) } } ->>>>>>> 25770ffc6b9 (executor: unify replace into logic for InsertValues and ReplaceExec (#41947))