diff --git a/.golangci.yml b/.golangci.yml index accb83fab1e37..40e21a04e593e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,6 +23,8 @@ linters: - durationcheck - prealloc - predeclared + - revive + - lll linters-settings: staticcheck: @@ -35,6 +37,358 @@ linters-settings: excludes: - G101 - G112 + revive: + # Maximum number of open files at the same time. + # See https://github.com/mgechev/revive#command-line-flags + # Defaults to unlimited. + max-open-files: 2048 + + # When set to false, ignores files with "GENERATED" header, similar to golint. + # See https://github.com/mgechev/revive#available-rules for details. + # Default: false + ignore-generated-header: true + + # Sets the default severity. + # See https://github.com/mgechev/revive#configuration + # Default: warning + severity: error + + # Enable all available rules. + # Default: false + enable-all-rules: true + + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.1 + + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#add-constant + - name: add-constant + severity: warning + disabled: false + arguments: + - maxLitCount: "3" + allowStrs: '""' + allowInts: "0,1,2" + allowFloats: "0.0,0.,1.0,1.,2.0,2." + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#argument-limit + - name: argument-limit + severity: warning + disabled: false + arguments: [ 4 ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#atomic + - name: atomic + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#banned-characters + - name: banned-characters + severity: warning + disabled: false + arguments: [ "Ω","Σ","σ", "7" ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bare-return + - name: bare-return + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#call-to-gc + - name: call-to-gc + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cognitive-complexity + - name: cognitive-complexity + severity: warning + disabled: false + arguments: [ 7 ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#comment-spacings + - name: comment-spacings + severity: warning + disabled: false + arguments: + - mypragma + - otherpragma + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-naming + - name: confusing-naming + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-results + - name: confusing-results + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + - name: context-as-argument + severity: warning + disabled: false + arguments: + - allowTypesBefore: "*testing.T,*github.com/user/repo/testing.Harness" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cyclomatic + - name: cyclomatic + severity: warning + disabled: false + arguments: [ 3 ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#datarace + - name: datarace + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + severity: warning + disabled: false + arguments: + - [ "call-chain", "loop" ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + severity: warning + disabled: false + arguments: + - "checkPrivateReceivers" + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#file-header + - name: file-header + severity: warning + disabled: false + arguments: + - This is the text that must appear at the top of source files. + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#function-result-limit + - name: function-result-limit + severity: warning + disabled: false + arguments: [ 2 ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#function-length + - name: function-length + severity: warning + disabled: false + arguments: [ 10, 0 ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#get-return + - name: get-return + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#imports-blacklist + - name: imports-blacklist + severity: warning + disabled: false + arguments: + - "crypto/md5" + - "crypto/sha1" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#line-length-limit + - name: line-length-limit + severity: warning + disabled: false + arguments: [ 80 ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#max-public-structs + - name: max-public-structs + severity: warning + disabled: false + arguments: [ 3 ] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-parameter + - name: modifies-parameter + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-value-receiver + - name: modifies-value-receiver + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#nested-structs + - name: nested-structs + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#optimize-operands-order + - name: optimize-operands-order + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#receiver-naming + - name: receiver-naming + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-of-int + - name: string-of-int + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + severity: warning + disabled: false + arguments: + - - 'core.WriteError[1].Message' + - '/^([^A-Z]|$)/' + - must not start with a capital letter + - - 'fmt.Errorf[0]' + - '/(^|[^\.!?])$/' + - must not end in punctuation + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + arguments: + - "json,inline" + - "bson,outline,gnu" + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-naming + - name: time-naming + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + severity: warning + disabled: false + arguments: + - [ "ID" ] # AllowList + - [ "VM" ] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-naming + - name: unexported-naming + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + severity: warning + disabled: false + arguments: + - "fmt.Printf" + - "myFunction" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unreachable-code + - name: unreachable-code + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + severity: warning + disabled: false + - name: unused-receiver + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + severity: warning + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + severity: warning + disabled: false issues: exclude-rules: - path: _test\.go diff --git a/build/nogo_config.json b/build/nogo_config.json index 60f6fbd85e714..3272e5ab42b51 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -501,14 +501,7 @@ "autoid_service/": "autoid_service", "bindinfo/": "bindinfo", "br/pkg/lightning/": "br/pkg/lightning/", - "executor/aggregate.go": "executor/aggregate.go", - "executor/a": "executor/a", - "executor/change.go": "executor/change.go", - "executor/concurrent_map.go": "executor/concurrent_map.go", - "executor/internal/": "executor/internal/", - "executor/aggfuncs/": "executor/aggfuncs/", - "executor/asyncloaddata/": "executor/asyncloaddata/", - "executor/importer/": "executor/importer/", + "executor/": "executor/", "types/json_binary_functions.go": "types/json_binary_functions.go", "types/json_binary_test.go": "types/json_binary_test.go", "ddl/": "ddl", diff --git a/executor/BUILD.bazel b/executor/BUILD.bazel index 8b6908c7709a5..09e741b5e22ed 100644 --- a/executor/BUILD.bazel +++ b/executor/BUILD.bazel @@ -191,6 +191,7 @@ go_library( "//util/disk", "//util/etcd", "//util/execdetails", + "//util/filter", "//util/format", "//util/gcutil", "//util/globalconn", diff --git a/executor/batch_checker.go b/executor/batch_checker.go index 838c6af7bace0..fb8a36ec6b3b6 100644 --- a/executor/batch_checker.go +++ b/executor/batch_checker.go @@ -53,7 +53,7 @@ type toBeCheckedRow struct { // getKeysNeedCheck gets keys converted from to-be-insert rows to record keys and unique index keys, // which need to be checked whether they are duplicate keys. -func getKeysNeedCheck(ctx context.Context, sctx sessionctx.Context, t table.Table, rows [][]types.Datum) ([]toBeCheckedRow, error) { +func getKeysNeedCheck(sctx sessionctx.Context, t table.Table, rows [][]types.Datum) ([]toBeCheckedRow, error) { nUnique := 0 for _, v := range t.Indices() { if !tables.IsIndexWritable(v) { diff --git a/executor/bind.go b/executor/bind.go index 53fd6c7a664ad..375f554e25b64 100644 --- a/executor/bind.go +++ b/executor/bind.go @@ -45,7 +45,7 @@ type SQLBindExec struct { } // Next implements the Executor Next interface. -func (e *SQLBindExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *SQLBindExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() switch e.sqlBindOp { case plannercore.OpSQLBindCreate: diff --git a/executor/brie.go b/executor/brie.go index 76d470ea41320..237c8b2466255 100644 --- a/executor/brie.go +++ b/executor/brie.go @@ -394,8 +394,7 @@ func (b *executorBuilder) buildBRIE(s *ast.BRIEStmt, schema *expression.Schema) case ast.BRIEKindRestore: e.restoreCfg = &task.RestoreConfig{Config: cfg} for _, opt := range s.Options { - switch opt.Tp { - case ast.BRIEOptionOnline: + if opt.Tp == ast.BRIEOptionOnline { e.restoreCfg.Online = opt.UintValue != 0 } } @@ -437,7 +436,7 @@ type showQueryExec struct { targetID uint64 } -func (s *showQueryExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (s *showQueryExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() tsk, ok := globalBRIEQueue.queryTask(s.targetID) @@ -455,7 +454,7 @@ type cancelJobExec struct { targetID uint64 } -func (s cancelJobExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (s cancelJobExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() if !globalBRIEQueue.cancelTask(s.targetID) { s.Ctx().GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrLoadDataJobNotFound.FastGenByArgs(s.targetID)) @@ -647,12 +646,12 @@ func (gs *tidbGlueSession) GetSessionCtx() sessionctx.Context { } // GetDomain implements glue.Glue -func (gs *tidbGlueSession) GetDomain(store kv.Storage) (*domain.Domain, error) { +func (gs *tidbGlueSession) GetDomain(_ kv.Storage) (*domain.Domain, error) { return domain.GetDomain(gs.se), nil } // CreateSession implements glue.Glue -func (gs *tidbGlueSession) CreateSession(store kv.Storage) (glue.Session, error) { +func (gs *tidbGlueSession) CreateSession(_ kv.Storage) (glue.Session, error) { return gs, nil } @@ -674,7 +673,7 @@ func (gs *tidbGlueSession) ExecuteInternal(ctx context.Context, sql string, args } // CreateDatabase implements glue.Session -func (gs *tidbGlueSession) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { +func (gs *tidbGlueSession) CreateDatabase(_ context.Context, schema *model.DBInfo) error { d := domain.GetDomain(gs.se).DDL() // 512 is defaultCapOfCreateTable. result := bytes.NewBuffer(make([]byte, 0, 512)) @@ -690,7 +689,7 @@ func (gs *tidbGlueSession) CreateDatabase(ctx context.Context, schema *model.DBI } // CreateTable implements glue.Session -func (gs *tidbGlueSession) CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo, cs ...ddl.CreateTableWithInfoConfigurier) error { +func (gs *tidbGlueSession) CreateTable(_ context.Context, dbName model.CIStr, table *model.TableInfo, cs ...ddl.CreateTableWithInfoConfigurier) error { d := domain.GetDomain(gs.se).DDL() // 512 is defaultCapOfCreateTable. @@ -714,7 +713,7 @@ func (gs *tidbGlueSession) CreateTable(ctx context.Context, dbName model.CIStr, } // CreatePlacementPolicy implements glue.Session -func (gs *tidbGlueSession) CreatePlacementPolicy(ctx context.Context, policy *model.PolicyInfo) error { +func (gs *tidbGlueSession) CreatePlacementPolicy(_ context.Context, policy *model.PolicyInfo) error { gs.se.SetValue(sessionctx.QueryString, ConstructResultOfShowCreatePlacementPolicy(policy)) d := domain.GetDomain(gs.se).DDL() // the default behaviour is ignoring duplicated policy during restore. @@ -722,7 +721,7 @@ func (gs *tidbGlueSession) CreatePlacementPolicy(ctx context.Context, policy *mo } // Close implements glue.Session -func (gs *tidbGlueSession) Close() { +func (*tidbGlueSession) Close() { } // GetGlobalVariables implements glue.Session. @@ -736,12 +735,12 @@ func (gs *tidbGlueSession) Open(string, pd.SecurityOption) (kv.Storage, error) { } // OwnsStorage implements glue.Glue -func (gs *tidbGlueSession) OwnsStorage() bool { +func (*tidbGlueSession) OwnsStorage() bool { return false } // StartProgress implements glue.Glue -func (gs *tidbGlueSession) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { +func (gs *tidbGlueSession) StartProgress(_ context.Context, cmdName string, total int64, _ bool) glue.Progress { gs.progress.lock.Lock() gs.progress.cmd = cmdName gs.progress.total = total @@ -762,12 +761,12 @@ func (gs *tidbGlueSession) Record(name string, value uint64) { } } -func (gs *tidbGlueSession) GetVersion() string { +func (*tidbGlueSession) GetVersion() string { return "TiDB\n" + printer.GetTiDBInfo() } // UseOneShotSession implements glue.Glue -func (gs *tidbGlueSession) UseOneShotSession(store kv.Storage, closeDomain bool, fn func(se glue.Session) error) error { +func (gs *tidbGlueSession) UseOneShotSession(_ kv.Storage, _ bool, fn func(se glue.Session) error) error { // in SQL backup. we don't need to close domain. return fn(gs) } diff --git a/executor/builder.go b/executor/builder.go index e15b44c4238ca..a123ad5ccdac8 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -446,7 +446,7 @@ func buildIndexLookUpChecker(b *executorBuilder, p *plannercore.PhysicalIndexLoo is := p.IndexPlans[0].(*plannercore.PhysicalIndexScan) fullColLen := len(is.Index.Columns) + len(p.CommonHandleCols) if !e.isCommonHandle() { - fullColLen += 1 + fullColLen++ } e.dagPB.OutputOffsets = make([]uint32, fullColLen) for i := 0; i < fullColLen; i++ { @@ -889,7 +889,7 @@ func (b *executorBuilder) buildSimple(v *plannercore.Simple) exec.Executor { if b.Ti.AccountLockTelemetry == nil { b.Ti.AccountLockTelemetry = &AccountLockTelemetryInfo{} } - b.Ti.AccountLockTelemetry.CreateOrAlterUser += 1 + b.Ti.AccountLockTelemetry.CreateOrAlterUser++ if stmt, ok := v.Statement.(*ast.CreateUserStmt); ok { lockOptions = stmt.PasswordOrLockOptions } else if stmt, ok := v.Statement.(*ast.AlterUserStmt); ok { @@ -899,10 +899,10 @@ func (b *executorBuilder) buildSimple(v *plannercore.Simple) exec.Executor { // Multiple lock options are supported for the parser, but only the last one option takes effect. for i := len(lockOptions) - 1; i >= 0; i-- { if lockOptions[i].Type == ast.Lock { - b.Ti.AccountLockTelemetry.LockUser += 1 + b.Ti.AccountLockTelemetry.LockUser++ break } else if lockOptions[i].Type == ast.Unlock { - b.Ti.AccountLockTelemetry.UnlockUser += 1 + b.Ti.AccountLockTelemetry.UnlockUser++ break } } @@ -1140,7 +1140,7 @@ func (b *executorBuilder) buildPlanReplayer(v *plannercore.PlanReplayer) exec.Ex return e } -func (b *executorBuilder) buildReplace(vals *InsertValues) exec.Executor { +func (*executorBuilder) buildReplace(vals *InsertValues) exec.Executor { replaceExec := &ReplaceExec{ InsertValues: vals, } @@ -1467,7 +1467,7 @@ func (us *UnionScanExec) handleCachedTable(b *executorBuilder, x bypassDataSourc x.setDummy() us.cacheTable = cacheData } else if loading { - // continue + return } else { if !b.inUpdateStmt && !b.inDeleteStmt && !b.inInsertStmt && !vars.StmtCtx.InExplainStmt { store := b.ctx.GetStore() @@ -2137,7 +2137,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) exec.Ex return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, - retriever: buildStmtSummaryRetriever(b.ctx, v.Table, v.Columns, extractor), + retriever: buildStmtSummaryRetriever(v.Table, v.Columns, extractor), } case strings.ToLower(infoschema.TableColumns): return &MemTableReaderExec{ @@ -3099,7 +3099,7 @@ func markChildrenUsedCols(outputCols []*expression.Column, childSchemas ...*expr return } -func (b *executorBuilder) corColInDistPlan(plans []plannercore.PhysicalPlan) bool { +func (*executorBuilder) corColInDistPlan(plans []plannercore.PhysicalPlan) bool { for _, p := range plans { x, ok := p.(*plannercore.PhysicalSelection) if !ok { @@ -3115,7 +3115,7 @@ func (b *executorBuilder) corColInDistPlan(plans []plannercore.PhysicalPlan) boo } // corColInAccess checks whether there's correlated column in access conditions. -func (b *executorBuilder) corColInAccess(p plannercore.PhysicalPlan) bool { +func (*executorBuilder) corColInAccess(p plannercore.PhysicalPlan) bool { var access []expression.Expression switch x := p.(type) { case *plannercore.PhysicalTableScan: @@ -3710,7 +3710,7 @@ func getPartitionKeyColOffsets(keyColIDs []int64, pt table.PartitionedTable) []i return keyColOffsets } -func (builder *dataReaderBuilder) prunePartitionForInnerExecutor(tbl table.Table, schema *expression.Schema, partitionInfo *plannercore.PartitionInfo, +func (builder *dataReaderBuilder) prunePartitionForInnerExecutor(tbl table.Table, partitionInfo *plannercore.PartitionInfo, lookUpContent []*indexJoinLookUpContent) (usedPartition []table.PhysicalTable, canPrune bool, contentPos []int64, err error) { partitionTbl := tbl.(table.PartitionedTable) @@ -4281,26 +4281,26 @@ type mockPhysicalIndexReader struct { } // MemoryUsage of mockPhysicalIndexReader is only for testing -func (p *mockPhysicalIndexReader) MemoryUsage() (sum int64) { +func (*mockPhysicalIndexReader) MemoryUsage() (sum int64) { return } func (builder *dataReaderBuilder) buildExecutorForIndexJoin(ctx context.Context, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { - return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) + indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { + return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) } func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context.Context, plan plannercore.Plan, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { + indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { switch v := plan.(type) { case *plannercore.PhysicalTableReader: - return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) + return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) case *plannercore.PhysicalIndexReader: - return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) + return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) case *plannercore.PhysicalIndexLookUpReader: - return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) + return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) case *plannercore.PhysicalUnionScan: - return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) + return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) // The inner child of IndexJoin might be Projection when a combination of the following conditions is true: // 1. The inner child fetch data using indexLookupReader // 2. PK is not handle @@ -4308,11 +4308,11 @@ func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context. // In this case, an extra column tidb_rowid will be appended in the output result of IndexLookupReader(see copTask.doubleReadNeedProj). // Then we need a Projection upon IndexLookupReader to prune the redundant column. case *plannercore.PhysicalProjection: - return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) + return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) // Need to support physical selection because after PR 16389, TiDB will push down all the expr supported by TiKV or TiFlash // in predicate push down stage, so if there is an expr which only supported by TiFlash, a physical selection will be added after index read case *plannercore.PhysicalSelection: - childExec, err := builder.buildExecutorForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) + childExec, err := builder.buildExecutorForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) if err != nil { return nil, err } @@ -4655,7 +4655,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte } tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) - usedPartition, canPrune, contentPos, err := builder.prunePartitionForInnerExecutor(tbl, e.Schema(), &v.PartitionInfo, lookUpContents) + usedPartition, canPrune, contentPos, err := builder.prunePartitionForInnerExecutor(tbl, &v.PartitionInfo, lookUpContents) if err != nil { return nil, err } @@ -4730,7 +4730,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context } tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) - usedPartition, canPrune, contentPos, err := builder.prunePartitionForInnerExecutor(tbl, e.Schema(), &v.PartitionInfo, lookUpContents) + usedPartition, canPrune, contentPos, err := builder.prunePartitionForInnerExecutor(tbl, &v.PartitionInfo, lookUpContents) if err != nil { return nil, err } @@ -5085,7 +5085,7 @@ func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleE return shuffle } -func (b *executorBuilder) buildShuffleReceiverStub(v *plannercore.PhysicalShuffleReceiverStub) *shuffleReceiver { +func (*executorBuilder) buildShuffleReceiverStub(v *plannercore.PhysicalShuffleReceiverStub) *shuffleReceiver { return (*shuffleReceiver)(v.Receiver) } @@ -5406,11 +5406,11 @@ func fullRangePartition(idxArr []int) bool { type emptySampler struct{} -func (s *emptySampler) writeChunk(_ *chunk.Chunk) error { +func (*emptySampler) writeChunk(_ *chunk.Chunk) error { return nil } -func (s *emptySampler) finished() bool { +func (*emptySampler) finished() bool { return true } @@ -5613,7 +5613,7 @@ func (b *executorBuilder) getCacheTable(tblInfo *model.TableInfo, startTS uint64 sessVars.StmtCtx.ReadFromTableCache = true return cacheData } else if loading { - // continue + return nil } else { if !b.ctx.GetSessionVars().StmtCtx.InExplainStmt && !b.inDeleteStmt && !b.inUpdateStmt { tbl.(table.CachedTable).UpdateLockForRead(context.Background(), b.ctx.GetStore(), startTS, leaseDuration) diff --git a/executor/compact_table.go b/executor/compact_table.go index 2d88dd0293e23..69a8bbb247ba2 100644 --- a/executor/compact_table.go +++ b/executor/compact_table.go @@ -237,7 +237,7 @@ func (task *storeCompactTask) logProgressOptionally() { // // Returns: (stopAllTasks, err) func (task *storeCompactTask) compactOnePhysicalTable(physicalTableID int64) (bool, error) { - var startKey []byte = nil + var startKey []byte for { // This loop is to compact incrementally for all data. Each RPC request will only compact a partial of data. if task.ctx.Err() != nil { return true, task.ctx.Err() diff --git a/executor/coprocessor.go b/executor/coprocessor.go index c700dc01e0795..402039c6359c8 100644 --- a/executor/coprocessor.go +++ b/executor/coprocessor.go @@ -228,7 +228,7 @@ func (h *CoprocessorDAGHandler) buildStreamResponse(chunk *tipb.Chunk) *coproces return resp } -func (h *CoprocessorDAGHandler) buildErrorResponse(err error) *coprocessor.Response { +func (*CoprocessorDAGHandler) buildErrorResponse(err error) *coprocessor.Response { return &coprocessor.Response{ OtherError: err.Error(), } @@ -268,7 +268,7 @@ func (h *CoprocessorDAGHandler) encodeDefault(chk *chunk.Chunk, tps []*types.Fie const rowsPerChunk = 64 -func (h *CoprocessorDAGHandler) appendRow(chunks []tipb.Chunk, data []byte, rowCnt int) []tipb.Chunk { +func (*CoprocessorDAGHandler) appendRow(chunks []tipb.Chunk, data []byte, rowCnt int) []tipb.Chunk { if rowCnt%rowsPerChunk == 0 { chunks = append(chunks, tipb.Chunk{}) } diff --git a/executor/cte_table_reader.go b/executor/cte_table_reader.go index 2eb7aa91eb7c7..3c93eb6d12393 100644 --- a/executor/cte_table_reader.go +++ b/executor/cte_table_reader.go @@ -39,7 +39,7 @@ func (e *CTETableReaderExec) Open(ctx context.Context) error { } // Next implements the Executor interface. -func (e *CTETableReaderExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { +func (e *CTETableReaderExec) Next(_ context.Context, req *chunk.Chunk) (err error) { req.Reset() // We should read `iterInTbl` from the beginning when the next iteration starts. diff --git a/executor/ddl.go b/executor/ddl.go index 0a23696e188a5..556f11f22bddb 100644 --- a/executor/ddl.go +++ b/executor/ddl.go @@ -473,12 +473,12 @@ func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, dom *domain.Do // GetDropOrTruncateTableInfoFromJobs gets the dropped/truncated table information from DDL jobs, // it will use the `start_ts` of DDL job as snapshot to get the dropped/truncated table information. func GetDropOrTruncateTableInfoFromJobs(jobs []*model.Job, gcSafePoint uint64, dom *domain.Domain, fn func(*model.Job, *model.TableInfo) (bool, error)) (bool, error) { - getTable := func(StartTS uint64, SchemaID int64, TableID int64) (*model.TableInfo, error) { - snapMeta, err := dom.GetSnapshotMeta(StartTS) + getTable := func(startTS uint64, schemaID int64, tableID int64) (*model.TableInfo, error) { + snapMeta, err := dom.GetSnapshotMeta(startTS) if err != nil { return nil, err } - tbl, err := snapMeta.GetTable(SchemaID, TableID) + tbl, err := snapMeta.GetTable(schemaID, tableID) return tbl, err } return ddl.GetDropOrTruncateTableInfoFromJobsByStore(jobs, gcSafePoint, getTable, fn) diff --git a/executor/distsql.go b/executor/distsql.go index 71d24a78848af..a93e8e8a6c976 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -301,7 +301,7 @@ func (e *IndexReaderExecutor) Open(ctx context.Context) error { return e.open(ctx, kvRanges) } -func (e *IndexReaderExecutor) buildKVReq(ctx context.Context, r []kv.KeyRange) (*kv.Request, error) { +func (e *IndexReaderExecutor) buildKVReq(r []kv.KeyRange) (*kv.Request, error) { var builder distsql.RequestBuilder builder.SetKeyRanges(r). SetDAGRequest(e.dagPB). @@ -388,7 +388,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) }) // use sortedSelectResults only when byItems pushed down and partition numbers > 1 if e.byItems == nil || len(e.partitions) <= 1 { - kvReq, err := e.buildKVReq(ctx, kvRanges) + kvReq, err := e.buildKVReq(kvRanges) if err != nil { e.feedback.Invalidate() return err @@ -401,7 +401,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) } else { kvReqs := make([]*kv.Request, 0, len(kvRanges)) for _, kvRange := range kvRanges { - kvReq, err := e.buildKVReq(ctx, []kv.KeyRange{kvRange}) + kvReq, err := e.buildKVReq([]kv.KeyRange{kvRange}) if err != nil { e.feedback.Invalidate() return err @@ -1336,7 +1336,7 @@ func (e *IndexLookUpRunTimeStats) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (e *IndexLookUpRunTimeStats) Tp() int { +func (*IndexLookUpRunTimeStats) Tp() int { return execdetails.TpIndexLookUpRunTimeStats } diff --git a/executor/executor.go b/executor/executor.go index 621984f0c25a6..4c2bab5b644f1 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -201,7 +201,7 @@ func (a *globalPanicOnExceed) Action(t *memory.Tracker) { } // GetPriority get the priority of the Action -func (a *globalPanicOnExceed) GetPriority() int64 { +func (*globalPanicOnExceed) GetPriority() int64 { return memory.DefPanicPriority } @@ -281,7 +281,7 @@ type CommandDDLJobsExec struct { // Open implements the Executor for all Cancel/Pause/Resume command on DDL jobs // just with different processes. And, it should not be called directly by the // Executor. -func (e *CommandDDLJobsExec) Open(ctx context.Context) error { +func (e *CommandDDLJobsExec) Open(context.Context) error { // We want to use a global transaction to execute the admin command, so we don't use e.Ctx() here. newSess, err := e.GetSysSession() if err != nil { @@ -293,7 +293,7 @@ func (e *CommandDDLJobsExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface for Cancel/Pause/Resume -func (e *CommandDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *CommandDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error { req.GrowAndReset(e.MaxChunkSize()) if e.cursor >= len(e.jobIDs) { return nil @@ -334,7 +334,7 @@ type ShowNextRowIDExec struct { } // Next implements the Executor Next interface. -func (e *ShowNextRowIDExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *ShowNextRowIDExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() if e.done { return nil @@ -665,7 +665,7 @@ func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface. -func (e *ShowDDLJobQueriesExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *ShowDDLJobQueriesExec) Next(_ context.Context, req *chunk.Chunk) error { req.GrowAndReset(e.MaxChunkSize()) if e.cursor >= len(e.jobs) { return nil @@ -757,7 +757,7 @@ func (e *ShowDDLJobQueriesWithRangeExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface. -func (e *ShowDDLJobQueriesWithRangeExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *ShowDDLJobQueriesWithRangeExec) Next(_ context.Context, req *chunk.Chunk) error { req.GrowAndReset(e.MaxChunkSize()) if e.cursor >= len(e.jobs) { return nil @@ -806,7 +806,7 @@ func (e *ShowDDLJobsExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface. -func (e *ShowDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *ShowDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error { req.GrowAndReset(e.MaxChunkSize()) if (e.cursor - len(e.runningJobs)) >= e.jobNumber { return nil @@ -849,9 +849,9 @@ func (e *ShowDDLJobsExec) Close() error { func getSchemaName(is infoschema.InfoSchema, id int64) string { var schemaName string - DBInfo, ok := is.SchemaByID(id) + dbInfo, ok := is.SchemaByID(id) if ok { - schemaName = DBInfo.Name.O + schemaName = dbInfo.Name.O return schemaName } @@ -954,7 +954,7 @@ func (e *CheckTableExec) handlePanic(r interface{}) { } // Next implements the Executor Next interface. -func (e *CheckTableExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *CheckTableExec) Next(ctx context.Context, _ *chunk.Chunk) error { if e.done || len(e.srcs) == 0 { return nil } @@ -1088,7 +1088,7 @@ func (e *ShowSlowExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface. -func (e *ShowSlowExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *ShowSlowExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() if e.cursor >= len(e.result) { return nil @@ -1529,13 +1529,13 @@ type TableDualExec struct { } // Open implements the Executor Open interface. -func (e *TableDualExec) Open(ctx context.Context) error { +func (e *TableDualExec) Open(context.Context) error { e.numReturned = 0 return nil } // Next implements the Executor Next interface. -func (e *TableDualExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *TableDualExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() if e.numReturned >= e.numDualRows { return nil @@ -1578,7 +1578,7 @@ func (e *SelectionExec) Open(ctx context.Context) error { return e.open(ctx) } -func (e *SelectionExec) open(ctx context.Context) error { +func (e *SelectionExec) open(context.Context) error { if e.memTracker != nil { e.memTracker.Reset() } else { @@ -1722,7 +1722,7 @@ func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chu } // Open implements the Executor Open interface. -func (e *TableScanExec) Open(ctx context.Context) error { +func (e *TableScanExec) Open(context.Context) error { e.virtualTableChunkList = nil return nil } @@ -1834,7 +1834,7 @@ func (e *UnionExec) waitAllFinished() { } // Open implements the Executor Open interface. -func (e *UnionExec) Open(ctx context.Context) error { +func (e *UnionExec) Open(context.Context) error { e.stopFetchData.Store(false) e.initialized = false e.finished = make(chan struct{}) @@ -2312,7 +2312,6 @@ type checkIndexWorker struct { table table.Table indexInfos []*model.IndexInfo e *FastCheckTableExec - ctx context.Context } type groupByChecksum struct { @@ -2686,15 +2685,14 @@ func (w *checkIndexWorker) HandleTask(task checkIndexTask) { } // Close implements the Worker interface. -func (w *checkIndexWorker) Close() { -} +func (*checkIndexWorker) Close() {} func (e *FastCheckTableExec) createWorker() workerpool.Worker[checkIndexTask] { return &checkIndexWorker{sctx: e.Ctx(), dbName: e.dbName, table: e.table, indexInfos: e.indexInfos, e: e} } // Next implements the Executor Next interface. -func (e *FastCheckTableExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *FastCheckTableExec) Next(context.Context, *chunk.Chunk) error { if e.done || len(e.indexInfos) == 0 { return nil } diff --git a/executor/explain.go b/executor/explain.go index 8683d141ae538..bdb8e0eefad2a 100644 --- a/executor/explain.go +++ b/executor/explain.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/memory" + "github.com/pingcap/tidb/util/size" clientutil "github.com/tikv/client-go/v2/util" "go.uber.org/zap" ) @@ -178,7 +179,7 @@ type memoryDebugModeHandler struct { func (h *memoryDebugModeHandler) fetchCurrentMemoryUsage(gc bool) (heapInUse, trackedMem uint64) { if gc { - runtime.GC() + runtime.GC() //nolint: revive } instanceStats := memory.ForceReadMemStats() heapInUse = instanceStats.HeapInuse @@ -214,10 +215,9 @@ func (h *memoryDebugModeHandler) getTrackerTreeMemUseLogs() []zap.Field { } func updateTriggerIntervalByHeapInUse(heapInUse uint64) (time.Duration, int) { - const GB uint64 = 1 << 30 - if heapInUse < 30*GB { + if heapInUse < 30*size.GB { return 5 * time.Second, 6 - } else if heapInUse < 40*GB { + } else if heapInUse < 40*size.GB { return 15 * time.Second, 2 } else { return 30 * time.Second, 1 @@ -356,6 +356,6 @@ func (e *ruRuntimeStats) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (e *ruRuntimeStats) Tp() int { +func (*ruRuntimeStats) Tp() int { return execdetails.TpRURuntimeStats } diff --git a/executor/foreign_key.go b/executor/foreign_key.go index df32b95210f81..6b6f27624b9c6 100644 --- a/executor/foreign_key.go +++ b/executor/foreign_key.go @@ -316,7 +316,7 @@ func (fkc *FKCheckExec) checkKeys(ctx context.Context, txn kv.Transaction) error return nil } -func (fkc *FKCheckExec) prefetchKeys(ctx context.Context, txn kv.Transaction, keys []kv.Key) error { +func (*FKCheckExec) prefetchKeys(ctx context.Context, txn kv.Transaction, keys []kv.Key) error { // Fill cache using BatchGet _, err := txn.BatchGet(ctx, keys) if err != nil { @@ -407,7 +407,7 @@ func (fkc *FKCheckExec) checkPrefixKeyExist(key kv.Key, value []byte) error { return nil } -func (fkc *FKCheckExec) getIndexKeyValueInTable(ctx context.Context, memBuffer kv.MemBuffer, snap kv.Snapshot, key kv.Key) (k []byte, v []byte, _ error) { +func (*FKCheckExec) getIndexKeyValueInTable(ctx context.Context, memBuffer kv.MemBuffer, snap kv.Snapshot, key kv.Key) (k []byte, v []byte, _ error) { select { case <-ctx.Done(): return nil, nil, ctx.Err() @@ -487,7 +487,7 @@ func (h *fkValueHelper) fetchFKValues(row []types.Datum) ([]types.Datum, error) return vals, nil } -func (h *fkValueHelper) hasNullValue(vals []types.Datum) bool { +func (*fkValueHelper) hasNullValue(vals []types.Datum) bool { // If any foreign key column value is null, no need to check this row. // test case: // create table t1 (id int key,a int, b int, index(a, b)); @@ -933,7 +933,7 @@ func (s *FKCheckRuntimeStats) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (s *FKCheckRuntimeStats) Tp() int { +func (*FKCheckRuntimeStats) Tp() int { return execdetails.TpFKCheckRuntimeStats } @@ -969,6 +969,6 @@ func (s *FKCascadeRuntimeStats) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (s *FKCascadeRuntimeStats) Tp() int { +func (*FKCascadeRuntimeStats) Tp() int { return execdetails.TpFKCascadeRuntimeStats } diff --git a/executor/grant.go b/executor/grant.go index 6193982594f8a..34f25e70d003e 100644 --- a/executor/grant.go +++ b/executor/grant.go @@ -65,7 +65,7 @@ type GrantExec struct { } // Next implements the Executor Next interface. -func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *GrantExec) Next(ctx context.Context, _ *chunk.Chunk) error { if e.done { return nil } @@ -196,7 +196,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } switch e.Level.Level { case ast.GrantLevelDB: - err := checkAndInitDBPriv(internalSession, dbName, e.is, user.User.Username, user.User.Hostname) + err := checkAndInitDBPriv(internalSession, dbName, user.User.Username, user.User.Hostname) if err != nil { return err } @@ -273,7 +273,7 @@ func checkAndInitGlobalPriv(ctx sessionctx.Context, user string, host string) er // checkAndInitDBPriv checks if DB scope privilege entry exists in mysql.DB. // If unexists, insert a new one. -func checkAndInitDBPriv(ctx sessionctx.Context, dbName string, is infoschema.InfoSchema, user string, host string) error { +func checkAndInitDBPriv(ctx sessionctx.Context, dbName string, user string, host string) error { ok, err := dbUserExists(ctx, user, host, dbName) if err != nil { return err @@ -287,7 +287,7 @@ func checkAndInitDBPriv(ctx sessionctx.Context, dbName string, is infoschema.Inf // checkAndInitTablePriv checks if table scope privilege entry exists in mysql.Tables_priv. // If unexists, insert a new one. -func checkAndInitTablePriv(ctx sessionctx.Context, dbName, tblName string, is infoschema.InfoSchema, user string, host string) error { +func checkAndInitTablePriv(ctx sessionctx.Context, dbName, tblName string, _ infoschema.InfoSchema, user string, host string) error { ok, err := tableUserExists(ctx, user, host, dbName, tblName) if err != nil { return err @@ -495,7 +495,7 @@ func (e *GrantExec) grantDynamicPriv(privName string, user *ast.UserSpec, intern } // grantGlobalLevel manipulates mysql.user table. -func (e *GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error { +func (*GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error { sql := new(strings.Builder) sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET `, mysql.SystemDB, mysql.UserTable) err := composeGlobalPrivUpdate(sql, priv.Priv, "Y") @@ -709,13 +709,12 @@ func columnPrivEntryExists(ctx sessionctx.Context, name string, host string, db // getTablePriv gets current table scope privilege set from mysql.Tables_priv. // Return Table_priv and Column_priv. -func getTablePriv(sctx sessionctx.Context, name string, host string, db string, tbl string) (string, string, error) { +func getTablePriv(sctx sessionctx.Context, name string, host string, db string, tbl string) (tPriv, cPriv string, err error) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `SELECT Table_priv, Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?`, mysql.SystemDB, mysql.TablePrivTable, name, host, db, tbl) if err != nil { return "", "", err } - var tPriv, cPriv string rows, fields, err := getRowsAndFields(sctx, rs) if err != nil { return "", "", errors.Errorf("get table privilege fail for %s %s %s %s: %v", name, host, db, tbl, err) diff --git a/executor/hash_table.go b/executor/hash_table.go index d6d7d12112352..e852ea18138af 100644 --- a/executor/hash_table.go +++ b/executor/hash_table.go @@ -242,10 +242,10 @@ func (c *hashRowContainer) GetMatchedRowsAndPtrs(probeKey uint64, probeRow chunk // Some variables used for memTracker. var ( - matchedDataSize = int64(cap(matched))*rowSize + int64(cap(matchedPtrs))*rowPtrSize - lastChunkBufPointer *chunk.Chunk = nil - memDelta int64 = 0 - needTrackMemUsage = cap(innerPtrs) > signalCheckpointForJoinMask + matchedDataSize = int64(cap(matched))*rowSize + int64(cap(matchedPtrs))*rowPtrSize + needTrackMemUsage = cap(innerPtrs) > signalCheckpointForJoinMask + lastChunkBufPointer *chunk.Chunk + memDelta int64 ) c.chkBuf = nil c.memTracker.Consume(-c.chkBufSizeForOneProbe) diff --git a/executor/index_advise.go b/executor/index_advise.go index 42b0a4209b8e4..a0b6d773a667a 100644 --- a/executor/index_advise.go +++ b/executor/index_advise.go @@ -37,7 +37,7 @@ type IndexAdviseExec struct { } // Next implements the Executor Next interface. -func (e *IndexAdviseExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *IndexAdviseExec) Next(context.Context, *chunk.Chunk) error { if !e.IsLocal { return errors.New("Index Advise: don't support load file without local field") } @@ -57,12 +57,12 @@ func (e *IndexAdviseExec) Next(ctx context.Context, req *chunk.Chunk) error { } // Close implements the Executor Close interface. -func (e *IndexAdviseExec) Close() error { +func (*IndexAdviseExec) Close() error { return nil } // Open implements the Executor Open interface. -func (e *IndexAdviseExec) Open(ctx context.Context) error { +func (*IndexAdviseExec) Open(context.Context) error { return nil } @@ -121,7 +121,7 @@ func (e *IndexAdviseInfo) prepareInfo(data []byte) error { } // GetIndexAdvice gets the index advice by workload file. -func (e *IndexAdviseInfo) GetIndexAdvice(ctx context.Context, data []byte) error { +func (e *IndexAdviseInfo) GetIndexAdvice(data []byte) error { if err := e.prepareInfo(data); err != nil { return err } @@ -138,7 +138,7 @@ type IndexAdvice struct { type IndexAdviseVarKeyType int // String defines a Stringer function for debugging and pretty printing. -func (k IndexAdviseVarKeyType) String() string { +func (IndexAdviseVarKeyType) String() string { return "index_advise_var" } diff --git a/executor/index_lookup_hash_join.go b/executor/index_lookup_hash_join.go index e850f472c9c9c..f26ae8fca5f06 100644 --- a/executor/index_lookup_hash_join.go +++ b/executor/index_lookup_hash_join.go @@ -382,7 +382,7 @@ func (ow *indexHashJoinOuterWorker) buildTask(ctx context.Context) (*indexHashJo }, nil } -func (ow *indexHashJoinOuterWorker) pushToChan(ctx context.Context, task *indexHashJoinTask, dst chan<- *indexHashJoinTask) bool { +func (*indexHashJoinOuterWorker) pushToChan(ctx context.Context, task *indexHashJoinTask, dst chan<- *indexHashJoinTask) bool { select { case <-ctx.Done(): return true @@ -549,7 +549,7 @@ func (iw *indexHashJoinInnerWorker) getNewJoinResult(ctx context.Context) (*inde return joinResult, ok } -func (iw *indexHashJoinInnerWorker) buildHashTableForOuterResult(ctx context.Context, task *indexHashJoinTask, h hash.Hash64) { +func (iw *indexHashJoinInnerWorker) buildHashTableForOuterResult(task *indexHashJoinTask, h hash.Hash64) { failpoint.Inject("IndexHashJoinBuildHashTablePanic", nil) failpoint.Inject("ConsumeRandomPanic", nil) if iw.stats != nil { @@ -641,7 +641,7 @@ func (iw *indexHashJoinInnerWorker) handleTask(ctx context.Context, task *indexH // TODO(XuHuaiyu): we may always use the smaller side to build the hashtable. go util.WithRecovery( func() { - iw.buildHashTableForOuterResult(ctx, task, h) + iw.buildHashTableForOuterResult(task, h) }, func(r interface{}) { var err error @@ -772,7 +772,7 @@ func (iw *indexHashJoinInnerWorker) joinMatchedInnerRow2Chunk(ctx context.Contex return true, joinResult } -func (iw *indexHashJoinInnerWorker) collectMatchedInnerPtrs4OuterRows(ctx context.Context, innerRow chunk.Row, innerRowPtr chunk.RowPtr, +func (iw *indexHashJoinInnerWorker) collectMatchedInnerPtrs4OuterRows(innerRow chunk.Row, innerRowPtr chunk.RowPtr, task *indexHashJoinTask, h hash.Hash64, buf []byte) error { _, matchedOuterRowIdx, err := iw.getMatchedOuterRows(innerRow, task, h, buf) if err != nil { @@ -809,7 +809,7 @@ func (iw *indexHashJoinInnerWorker) doJoinInOrder(ctx context.Context, task *ind for j, chk := 0, task.innerResult.GetChunk(i); j < chk.NumRows(); j++ { row := chk.GetRow(j) ptr := chunk.RowPtr{ChkIdx: uint32(i), RowIdx: uint32(j)} - err = iw.collectMatchedInnerPtrs4OuterRows(ctx, row, ptr, task, h, iw.joinKeyBuf) + err = iw.collectMatchedInnerPtrs4OuterRows(row, ptr, task, h, iw.joinKeyBuf) failpoint.Inject("TestIssue31129", func() { err = errors.New("TestIssue31129") }) diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index 0c19fe4ea5bd4..49a0e7eb13aa9 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -396,7 +396,7 @@ func (ow *outerWorker) run(ctx context.Context, wg *sync.WaitGroup) { } } -func (ow *outerWorker) pushToChan(ctx context.Context, task *lookUpJoinTask, dst chan<- *lookUpJoinTask) bool { +func (*outerWorker) pushToChan(ctx context.Context, task *lookUpJoinTask, dst chan<- *lookUpJoinTask) bool { select { case <-ctx.Done(): return true @@ -849,6 +849,6 @@ func (e *indexLookUpJoinRuntimeStats) Merge(rs execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (e *indexLookUpJoinRuntimeStats) Tp() int { +func (*indexLookUpJoinRuntimeStats) Tp() int { return execdetails.TpIndexLookUpJoinRuntimeStats } diff --git a/executor/index_lookup_merge_join.go b/executor/index_lookup_merge_join.go index ed90c9b7baa99..64a4abab1b7d2 100644 --- a/executor/index_lookup_merge_join.go +++ b/executor/index_lookup_merge_join.go @@ -330,7 +330,7 @@ func (omw *outerMergeWorker) run(ctx context.Context, wg *sync.WaitGroup, cancel } } -func (omw *outerMergeWorker) pushToChan(ctx context.Context, task *lookUpMergeJoinTask, dst chan<- *lookUpMergeJoinTask) (finished bool) { +func (*outerMergeWorker) pushToChan(ctx context.Context, task *lookUpMergeJoinTask, dst chan<- *lookUpMergeJoinTask) (finished bool) { select { case <-ctx.Done(): return true diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index 3e76ae47deb57..fd26e066f0160 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -43,6 +43,7 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/channel" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/logutil" @@ -160,7 +161,7 @@ func (e *IndexMergeReaderExecutor) Table() table.Table { } // Open implements the Executor Open interface -func (e *IndexMergeReaderExecutor) Open(ctx context.Context) (err error) { +func (e *IndexMergeReaderExecutor) Open(_ context.Context) (err error) { e.keyRanges = make([][]kv.KeyRange, 0, len(e.partialPlans)) e.initRuntimeStats() if e.isCorColInTableFilter { @@ -1060,8 +1061,7 @@ func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderByAndPushedLimit(ctx co if len(uselessMap) == len(w.indexMerge.partialPlans) { // consume reset tasks go func() { - for range fetchCh { - } + channel.Clear(fetchCh) }() break } @@ -1252,7 +1252,7 @@ func (w *intersectionProcessWorker) doIntersectionPerPartition(ctx context.Conte } else { cnt := 1 mapDelta += hMap.Set(h, &cnt) + int64(h.ExtraMemSize()) - rowDelta += 1 + rowDelta++ } } @@ -1673,7 +1673,7 @@ func (w *indexMergeTableScanWorker) pickAndExecTask(ctx context.Context, task ** } } -func (w *indexMergeTableScanWorker) handleTableScanWorkerPanic(ctx context.Context, finished <-chan struct{}, task **indexMergeTableTask, worker string) func(r interface{}) { +func (*indexMergeTableScanWorker) handleTableScanWorkerPanic(ctx context.Context, finished <-chan struct{}, task **indexMergeTableTask, worker string) func(r interface{}) { return func(r interface{}) { if r == nil { logutil.BgLogger().Debug("worker finish without panic", zap.Any("worker", worker)) @@ -1814,6 +1814,6 @@ func (e *IndexMergeRuntimeStat) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (e *IndexMergeRuntimeStat) Tp() int { +func (*IndexMergeRuntimeStat) Tp() int { return execdetails.TpIndexMergeRunTimeStats } diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 3e74fd152e0e8..c7e7d75680157 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -119,7 +119,7 @@ func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Contex case infoschema.TableTables: err = e.setDataFromTables(ctx, sctx, dbs) case infoschema.TableReferConst: - err = e.setDataFromReferConst(ctx, sctx, dbs) + err = e.setDataFromReferConst(sctx, dbs) case infoschema.TableSequences: e.setDataFromSequences(sctx, dbs) case infoschema.TablePartitions: @@ -141,7 +141,7 @@ func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Contex case infoschema.TableKeyColumn: e.setDataFromKeyColumnUsage(sctx, dbs) case infoschema.TableMetricTables: - e.setDataForMetricTables(sctx) + e.setDataForMetricTables() case infoschema.TableProfiling: e.setDataForPseudoProfiling(sctx) case infoschema.TableCollationCharacterSetApplicability: @@ -183,11 +183,11 @@ func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Contex case infoschema.TableUserAttributes: err = e.setDataForUserAttributes(ctx, sctx) case infoschema.TableMemoryUsage: - err = e.setDataForMemoryUsage(sctx) + err = e.setDataForMemoryUsage() case infoschema.ClusterTableMemoryUsage: err = e.setDataForClusterMemoryUsage(sctx) case infoschema.TableMemoryUsageOpsHistory: - err = e.setDataForMemoryUsageOpsHistory(sctx) + err = e.setDataForMemoryUsageOpsHistory() case infoschema.ClusterTableMemoryUsageOpsHistory: err = e.setDataForClusterMemoryUsageOpsHistory(sctx) case infoschema.TableResourceGroups: @@ -443,7 +443,7 @@ func (e *memtableRetriever) setDataForStatisticsInTable(schema *model.DBInfo, ta e.rows = append(e.rows, rows...) } -func (e *memtableRetriever) setDataFromReferConst(ctx context.Context, sctx sessionctx.Context, schemas []*model.DBInfo) error { +func (e *memtableRetriever) setDataFromReferConst(sctx sessionctx.Context, schemas []*model.DBInfo) error { checker := privilege.GetPrivilegeManager(sctx) var rows [][]types.Datum for _, schema := range schemas { @@ -1246,7 +1246,7 @@ func (e *DDLJobsReaderExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface. -func (e *DDLJobsReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *DDLJobsReaderExec) Next(_ context.Context, req *chunk.Chunk) error { req.GrowAndReset(e.MaxChunkSize()) checker := privilege.GetPrivilegeManager(e.Ctx()) count := 0 @@ -1446,7 +1446,7 @@ func (e *memtableRetriever) setDataFromUserPrivileges(ctx sessionctx.Context) { e.rows = pm.UserPrivilegesTable(ctx.GetSessionVars().ActiveRoles, ctx.GetSessionVars().User.Username, ctx.GetSessionVars().User.Hostname) } -func (e *memtableRetriever) setDataForMetricTables(ctx sessionctx.Context) { +func (e *memtableRetriever) setDataForMetricTables() { tables := make([]string, 0, len(infoschema.MetricTableMap)) for name := range infoschema.MetricTableMap { tables = append(tables, name) @@ -1638,7 +1638,7 @@ func (e *memtableRetriever) getRegionsInfoForTable(h *helper.Helper, is infosche return allRegionsInfo, nil } -func (e *memtableRetriever) getRegionsInfoForSingleTable(helper *helper.Helper, tableID int64) (*helper.RegionsInfo, error) { +func (*memtableRetriever) getRegionsInfoForSingleTable(helper *helper.Helper, tableID int64) (*helper.RegionsInfo, error) { sk, ek := tablecodec.GetTableHandleKeyRange(tableID) sRegion, err := helper.GetRegionByKey(codec.EncodeBytes(nil, sk)) if err != nil { @@ -1824,7 +1824,7 @@ type tableStorageStatsRetriever struct { stats helper.PDRegionStats } -func (e *tableStorageStatsRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { +func (e *tableStorageStatsRetriever) retrieve(_ context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { if e.retrieved { return nil, nil } @@ -1839,7 +1839,7 @@ func (e *tableStorageStatsRetriever) retrieve(ctx context.Context, sctx sessionc return nil, nil } - rows, err := e.setDataForTableStorageStats(sctx) + rows, err := e.setDataForTableStorageStats() if err != nil { return nil, err } @@ -1927,7 +1927,7 @@ func (e *tableStorageStatsRetriever) initialize(sctx sessionctx.Context) error { return nil } -func (e *tableStorageStatsRetriever) setDataForTableStorageStats(ctx sessionctx.Context) ([][]types.Datum, error) { +func (e *tableStorageStatsRetriever) setDataForTableStorageStats() ([][]types.Datum, error) { rows := make([][]types.Datum, 0, 1024) count := 0 for e.curTable < len(e.initialTables) && count < 1024 { @@ -2022,14 +2022,14 @@ func dataForAnalyzeStatusHelper(ctx context.Context, sctx sessionctx.Context) (r if !ok { return nil, errors.New("invalid start time") } - RemainingDuration, progress, estimatedRowCnt, RemainDurationErr := + remainingDuration, progress, estimatedRowCnt, remainDurationErr := getRemainDurationForAnalyzeStatusHelper(ctx, sctx, &startTime, dbName, tableName, partitionName, processedRows) - if RemainDurationErr != nil { - logutil.BgLogger().Warn("get remaining duration failed", zap.Error(RemainDurationErr)) + if remainDurationErr != nil { + logutil.BgLogger().Warn("get remaining duration failed", zap.Error(remainDurationErr)) } - if RemainingDuration != nil { - remainDurationStr = execdetails.FormatDuration(*RemainingDuration) + if remainingDuration != nil { + remainDurationStr = execdetails.FormatDuration(*remainingDuration) } progressDouble = progress estimatedRowCntStr = int64(estimatedRowCnt) @@ -2059,7 +2059,7 @@ func getRemainDurationForAnalyzeStatusHelper( ctx context.Context, sctx sessionctx.Context, startTime *types.Time, dbName, tableName, partitionName string, processedRows int64) (*time.Duration, float64, float64, error) { - var RemainingDuration = time.Duration(0) + var remainingDuration = time.Duration(0) var percentage = 0.0 var totalCnt = float64(0) if startTime != nil { @@ -2070,8 +2070,8 @@ func getRemainDurationForAnalyzeStatusHelper( duration := time.Now().UTC().Sub(start) if intest.InTest { if val := ctx.Value(AnalyzeProgressTest); val != nil { - RemainingDuration, percentage = calRemainInfoForAnalyzeStatus(ctx, int64(totalCnt), processedRows, duration) - return &RemainingDuration, percentage, totalCnt, nil + remainingDuration, percentage = calRemainInfoForAnalyzeStatus(ctx, int64(totalCnt), processedRows, duration) + return &remainingDuration, percentage, totalCnt, nil } } var tid int64 @@ -2099,9 +2099,9 @@ func getRemainDurationForAnalyzeStatusHelper( if tid > 0 && totalCnt == 0 { totalCnt, _ = pdhelper.GlobalPDHelper.GetApproximateTableCountFromStorage(sctx, tid, dbName, tableName, partitionName) } - RemainingDuration, percentage = calRemainInfoForAnalyzeStatus(ctx, int64(totalCnt), processedRows, duration) + remainingDuration, percentage = calRemainInfoForAnalyzeStatus(ctx, int64(totalCnt), processedRows, duration) } - return &RemainingDuration, percentage, totalCnt, nil + return &remainingDuration, percentage, totalCnt, nil } func calRemainInfoForAnalyzeStatus(ctx context.Context, totalCnt int64, processedRows int64, duration time.Duration) (time.Duration, float64) { @@ -2361,7 +2361,7 @@ func (e *memtableRetriever) setDataForClusterTrxSummary(ctx sessionctx.Context) return nil } -func (e *memtableRetriever) setDataForMemoryUsage(ctx sessionctx.Context) error { +func (e *memtableRetriever) setDataForMemoryUsage() error { r := memory.ReadMemStats() currentOps, sessionKillLastDatum := types.NewDatum(nil), types.NewDatum(nil) if memory.TriggerMemoryLimitGC.Load() || servermemorylimit.IsKilling.Load() { @@ -2391,7 +2391,7 @@ func (e *memtableRetriever) setDataForMemoryUsage(ctx sessionctx.Context) error } func (e *memtableRetriever) setDataForClusterMemoryUsage(ctx sessionctx.Context) error { - err := e.setDataForMemoryUsage(ctx) + err := e.setDataForMemoryUsage() if err != nil { return err } @@ -2403,13 +2403,13 @@ func (e *memtableRetriever) setDataForClusterMemoryUsage(ctx sessionctx.Context) return nil } -func (e *memtableRetriever) setDataForMemoryUsageOpsHistory(ctx sessionctx.Context) error { +func (e *memtableRetriever) setDataForMemoryUsageOpsHistory() error { e.rows = servermemorylimit.GlobalMemoryOpsHistoryManager.GetRows() return nil } func (e *memtableRetriever) setDataForClusterMemoryUsageOpsHistory(ctx sessionctx.Context) error { - err := e.setDataForMemoryUsageOpsHistory(ctx) + err := e.setDataForMemoryUsageOpsHistory() if err != nil { return err } @@ -2465,8 +2465,7 @@ func (e *tidbTrxTableRetriever) retrieve(ctx context.Context, sctx sessionctx.Co // The current TiDB node's address is needed by the CLUSTER_TIDB_TRX table. var err error var instanceAddr string - switch e.table.Name.O { - case infoschema.ClusterTableTiDBTrx: + if e.table.Name.O == infoschema.ClusterTableTiDBTrx { instanceAddr, err = infoschema.GetInstanceAddr(sctx) if err != nil { return nil, err @@ -2636,7 +2635,7 @@ func (r *dataLockWaitsTableRetriever) retrieve(ctx context.Context, sctx session row = append(row, types.NewDatum(strings.ToUpper(hex.EncodeToString(lockWait.Key)))) case infoschema.DataLockWaitsColumnKeyInfo: infoSchema := sctx.GetInfoSchema().(infoschema.InfoSchema) - var decodedKeyStr interface{} = nil + var decodedKeyStr interface{} decodedKey, err := keydecoder.DecodeKey(lockWait.Key, infoSchema) if err == nil { decodedKeyBytes, err := json.Marshal(decodedKey) @@ -2683,7 +2682,7 @@ func (r *dataLockWaitsTableRetriever) retrieve(ctx context.Context, sctx session row = append(row, types.NewDatum(strings.ToUpper(hex.EncodeToString(resolving.Key)))) case infoschema.DataLockWaitsColumnKeyInfo: infoSchema := domain.GetDomain(sctx).InfoSchema() - var decodedKeyStr interface{} = nil + var decodedKeyStr interface{} decodedKey, err := keydecoder.DecodeKey(resolving.Key, infoSchema) if err == nil { decodedKeyBytes, err := json.Marshal(decodedKey) @@ -2740,7 +2739,7 @@ type deadlocksTableRetriever struct { // nextIndexPair advances a index pair (where `idx` is the index of the DeadlockRecord, and `waitChainIdx` is the index // of the wait chain item in the `idx`-th DeadlockRecord. This function helps iterate over each wait chain item // in all DeadlockRecords. -func (r *deadlocksTableRetriever) nextIndexPair(idx, waitChainIdx int) (int, int) { +func (r *deadlocksTableRetriever) nextIndexPair(idx, waitChainIdx int) (a, b int) { waitChainIdx++ if waitChainIdx >= len(r.deadlocks[idx].WaitChain) { waitChainIdx = 0 @@ -2775,8 +2774,7 @@ func (r *deadlocksTableRetriever) retrieve(ctx context.Context, sctx sessionctx. // The current TiDB node's address is needed by the CLUSTER_DEADLOCKS table. var err error var instanceAddr string - switch r.table.Name.O { - case infoschema.ClusterTableDeadlocks: + if r.table.Name.O == infoschema.ClusterTableDeadlocks { instanceAddr, err = infoschema.GetInstanceAddr(sctx) if err != nil { return nil, err @@ -2906,8 +2904,7 @@ func (e *hugeMemTableRetriever) retrieve(ctx context.Context, sctx sessionctx.Co } var err error - switch e.table.Name.O { - case infoschema.TableColumns: + if e.table.Name.O == infoschema.TableColumns { err = e.setDataForColumns(ctx, sctx, e.extractor) } if err != nil { @@ -2992,7 +2989,7 @@ func (e *TiFlashSystemTableRetriever) initialize(sctx sessionctx.Context, tiflas return errors.Errorf("node status addr: %s format illegal", info.StatusAddr) } e.instanceIds = append(e.instanceIds, info.Address) - e.instanceCount += 1 + e.instanceCount++ } e.initialized = true return nil @@ -3104,7 +3101,7 @@ func (e *TiFlashSystemTableRetriever) dataForTiFlashSystemTables(ctx context.Con } e.rowIdx += len(outputRows) if len(outputRows) < maxCount { - e.instanceIdx += 1 + e.instanceIdx++ e.rowIdx = 0 } return outputRows, nil diff --git a/executor/insert.go b/executor/insert.go index 302b2bdd5ac67..bd8f269670993 100644 --- a/executor/insert.go +++ b/executor/insert.go @@ -186,7 +186,7 @@ func (e *InsertValues) prefetchDataCache(ctx context.Context, txn kv.Transaction } // updateDupRow updates a duplicate row to a new row. -func (e *InsertExec) updateDupRow(ctx context.Context, idxInBatch int, txn kv.Transaction, row toBeCheckedRow, handle kv.Handle, onDuplicate []*expression.Assignment) error { +func (e *InsertExec) updateDupRow(ctx context.Context, idxInBatch int, txn kv.Transaction, row toBeCheckedRow, handle kv.Handle, _ []*expression.Assignment) error { oldRow, err := getOldRow(ctx, e.Ctx(), txn, row.t, handle, e.GenExprs) if err != nil { return err @@ -210,7 +210,7 @@ func (e *InsertExec) updateDupRow(ctx context.Context, idxInBatch int, txn kv.Tr func (e *InsertExec) batchUpdateDupRows(ctx context.Context, newRows [][]types.Datum) error { // Get keys need to be checked. start := time.Now() - toBeCheckedRows, err := getKeysNeedCheck(ctx, e.Ctx(), e.Table, newRows) + toBeCheckedRows, err := getKeysNeedCheck(e.Ctx(), e.Table, newRows) if err != nil { return err } diff --git a/executor/insert_common.go b/executor/insert_common.go index 5ff765019784b..1625941b7b1cd 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -113,7 +113,7 @@ func (e *InsertValues) insertCommon() *InsertValues { return e } -func (e *InsertValues) exec(_ context.Context, _ [][]types.Datum) error { +func (*InsertValues) exec(context.Context, [][]types.Datum) error { panic("derived should overload exec function") } @@ -1185,7 +1185,7 @@ func (e *InsertValues) batchCheckAndInsert( defer tracing.StartRegion(ctx, "InsertValues.batchCheckAndInsert").End() start := time.Now() // Get keys need to be checked. - toBeCheckedRows, err := getKeysNeedCheck(ctx, e.Ctx(), e.Table, rows) + toBeCheckedRows, err := getKeysNeedCheck(e.Ctx(), e.Table, rows) if err != nil { return err } @@ -1556,6 +1556,6 @@ func (e *InsertRuntimeStat) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (e *InsertRuntimeStat) Tp() int { +func (*InsertRuntimeStat) Tp() int { return execdetails.TpInsertRuntimeStat } diff --git a/executor/inspection_common.go b/executor/inspection_common.go index f30e8d26e3413..42ffb3da390a7 100644 --- a/executor/inspection_common.go +++ b/executor/inspection_common.go @@ -34,7 +34,7 @@ const ( inspectionRuleTypeSummary string = "summary" ) -func (e *inspectionRuleRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { +func (e *inspectionRuleRetriever) retrieve(context.Context, sessionctx.Context) ([][]types.Datum, error) { if e.retrieved || e.extractor.SkipRequest { return nil, nil } diff --git a/executor/inspection_profile.go b/executor/inspection_profile.go index e8eac1f24b73c..9670f4ac626d0 100644 --- a/executor/inspection_profile.go +++ b/executor/inspection_profile.go @@ -587,7 +587,7 @@ func (pb *profileBuilder) formatValueByTp(value float64) string { } // dotColor function is copy from https://github.com/google/pprof. -func (pb *profileBuilder) dotColor(score float64, isBackground bool) string { +func (*profileBuilder) dotColor(score float64, isBackground bool) string { // A float between 0.0 and 1.0, indicating the extent to which // colors should be shifted away from grey (to make positive and // negative values easier to distinguish, and to make more use of @@ -638,7 +638,7 @@ func (pb *profileBuilder) dotColor(score float64, isBackground bool) string { return fmt.Sprintf("#%02x%02x%02x", uint8(r*255.0), uint8(g*255.0), uint8(b*255.0)) } -func (pb *profileBuilder) genTiDBGCTree() *metricNode { +func (*profileBuilder) genTiDBGCTree() *metricNode { tidbGC := &metricNode{ table: "tidb_gc", isPartOfParent: true, @@ -653,7 +653,7 @@ func (pb *profileBuilder) genTiDBGCTree() *metricNode { return tidbGC } -func (pb *profileBuilder) genTiDBQueryTree() *metricNode { +func (*profileBuilder) genTiDBQueryTree() *metricNode { tidbKVRequest := &metricNode{ table: "tidb_kv_request", isPartOfParent: true, diff --git a/executor/inspection_result.go b/executor/inspection_result.go index 55793a7729274..a120bd22897ee 100644 --- a/executor/inspection_result.go +++ b/executor/inspection_result.go @@ -474,7 +474,7 @@ func (versionInspection) inspect(ctx context.Context, sctx sessionctx.Context, f return results } -func (c nodeLoadInspection) inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult { +func (nodeLoadInspection) inspect(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult { var rules = []ruleChecker{ inspectCPULoad{item: "load1", tbl: "node_load1"}, inspectCPULoad{item: "load5", tbl: "node_load5"}, @@ -493,7 +493,7 @@ func (inspectVirtualMemUsage) genSQL(timeRange plannercore.QueryTimeRange) strin return sql } -func (i inspectVirtualMemUsage) genResult(sql string, row chunk.Row) inspectionResult { +func (i inspectVirtualMemUsage) genResult(_ string, row chunk.Row) inspectionResult { return inspectionResult{ tp: "node", instance: row.GetString(0), @@ -516,7 +516,7 @@ func (inspectSwapMemoryUsed) genSQL(timeRange plannercore.QueryTimeRange) string return sql } -func (i inspectSwapMemoryUsed) genResult(sql string, row chunk.Row) inspectionResult { +func (i inspectSwapMemoryUsed) genResult(_ string, row chunk.Row) inspectionResult { return inspectionResult{ tp: "node", instance: row.GetString(0), @@ -538,7 +538,7 @@ func (inspectDiskUsage) genSQL(timeRange plannercore.QueryTimeRange) string { return sql } -func (i inspectDiskUsage) genResult(sql string, row chunk.Row) inspectionResult { +func (i inspectDiskUsage) genResult(_ string, row chunk.Row) inspectionResult { return inspectionResult{ tp: "node", instance: row.GetString(0), @@ -567,7 +567,7 @@ func (i inspectCPULoad) genSQL(timeRange plannercore.QueryTimeRange) string { return sql } -func (i inspectCPULoad) genResult(sql string, row chunk.Row) inspectionResult { +func (i inspectCPULoad) genResult(_ string, row chunk.Row) inspectionResult { return inspectionResult{ tp: "node", instance: row.GetString(0), @@ -1095,7 +1095,7 @@ func (c compareStoreStatus) getItem() string { type checkRegionHealth struct{} -func (c checkRegionHealth) genSQL(timeRange plannercore.QueryTimeRange) string { +func (checkRegionHealth) genSQL(timeRange plannercore.QueryTimeRange) string { condition := timeRange.Condition() return fmt.Sprintf(`select instance, sum(value) as sum_value from metrics_schema.pd_region_health %s and type in ('extra-peer-region-count','learner-peer-region-count','pending-peer-region-count') having sum_value>100`, condition) @@ -1117,18 +1117,18 @@ func (c checkRegionHealth) genResult(_ string, row chunk.Row) inspectionResult { } } -func (c checkRegionHealth) getItem() string { +func (checkRegionHealth) getItem() string { return "region-health" } type checkStoreRegionTooMuch struct{} -func (c checkStoreRegionTooMuch) genSQL(timeRange plannercore.QueryTimeRange) string { +func (checkStoreRegionTooMuch) genSQL(timeRange plannercore.QueryTimeRange) string { condition := timeRange.Condition() return fmt.Sprintf(`select address, max(value) from metrics_schema.pd_scheduler_store_status %s and type='region_count' and value > 20000 group by address`, condition) } -func (c checkStoreRegionTooMuch) genResult(sql string, row chunk.Row) inspectionResult { +func (c checkStoreRegionTooMuch) genResult(_ string, row chunk.Row) inspectionResult { actual := fmt.Sprintf("%.2f", row.GetFloat64(1)) degree := math.Abs(row.GetFloat64(1)-20000) / math.Max(row.GetFloat64(1), 20000) return inspectionResult{ @@ -1143,7 +1143,7 @@ func (c checkStoreRegionTooMuch) genResult(sql string, row chunk.Row) inspection } } -func (c checkStoreRegionTooMuch) getItem() string { +func (checkStoreRegionTooMuch) getItem() string { return "region-count" } @@ -1190,7 +1190,7 @@ func checkRules(ctx context.Context, sctx sessionctx.Context, filter inspectionF return results } -func (c thresholdCheckInspection) inspectForLeaderDrop(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult { +func (thresholdCheckInspection) inspectForLeaderDrop(ctx context.Context, sctx sessionctx.Context, filter inspectionFilter) []inspectionResult { condition := filter.timeRange.Condition() threshold := 50.0 sql := new(strings.Builder) diff --git a/executor/join.go b/executor/join.go index ce93bfb255e0f..f27d4642556f1 100644 --- a/executor/join.go +++ b/executor/join.go @@ -908,12 +908,12 @@ func (w *probeWorker) joinNAASJMatchProbeSideRow2Chunk(probeKey uint64, probeKey // For NA-AntiLeftOuterSemiJoin, we couldn't match null-bucket first, because once y set has a same key x and null // key, we should return the result as left side row appended with a scalar value 0 which is from same key matching failure. func (w *probeWorker) joinNAAJMatchProbeSideRow2Chunk(probeKey uint64, probeKeyNullBits *bitmap.ConcurrentBitmap, probeSideRow chunk.Row, hCtx *hashContext, joinResult *hashjoinWorkerResult) (bool, *hashjoinWorkerResult) { - NAAntiSemiJoin := w.hashJoinCtx.joinType == plannercore.AntiSemiJoin && w.hashJoinCtx.isNullAware - NAAntiLeftOuterSemiJoin := w.hashJoinCtx.joinType == plannercore.AntiLeftOuterSemiJoin && w.hashJoinCtx.isNullAware - if NAAntiSemiJoin { + naAntiSemiJoin := w.hashJoinCtx.joinType == plannercore.AntiSemiJoin && w.hashJoinCtx.isNullAware + naAntiLeftOuterSemiJoin := w.hashJoinCtx.joinType == plannercore.AntiLeftOuterSemiJoin && w.hashJoinCtx.isNullAware + if naAntiSemiJoin { return w.joinNAASJMatchProbeSideRow2Chunk(probeKey, probeKeyNullBits, probeSideRow, hCtx, joinResult) } - if NAAntiLeftOuterSemiJoin { + if naAntiLeftOuterSemiJoin { return w.joinNAALOSJMatchProbeSideRow2Chunk(probeKey, probeKeyNullBits, probeSideRow, hCtx, joinResult) } // shouldn't be here, not a valid NAAJ. @@ -1559,7 +1559,7 @@ func (e *joinRuntimeStats) String() string { } // Tp implements the RuntimeStats interface. -func (e *joinRuntimeStats) Tp() int { +func (*joinRuntimeStats) Tp() int { return execdetails.TpJoinRuntimeStats } @@ -1596,7 +1596,7 @@ func (e *hashJoinRuntimeStats) setMaxFetchAndProbeTime(t int64) { } // Tp implements the RuntimeStats interface. -func (e *hashJoinRuntimeStats) Tp() int { +func (*hashJoinRuntimeStats) Tp() int { return execdetails.TpHashJoinRuntimeStats } diff --git a/executor/joiner.go b/executor/joiner.go index 842135802444f..907823c928882 100644 --- a/executor/joiner.go +++ b/executor/joiner.go @@ -238,7 +238,7 @@ func (j *baseJoiner) initDefaultInner(innerTypes []*types.FieldType, defaultInne j.defaultInner = mutableRow.ToRow() } -func (j *baseJoiner) makeJoinRowToChunk(chk *chunk.Chunk, lhs, rhs chunk.Row, lUsed, rUsed []int) { +func (*baseJoiner) makeJoinRowToChunk(chk *chunk.Chunk, lhs, rhs chunk.Row, lUsed, rUsed []int) { // Call AppendRow() first to increment the virtual rows. // Fix: https://github.com/pingcap/tidb/issues/5771 lWide := chk.AppendRowByColIdxs(lhs, lUsed) @@ -424,8 +424,7 @@ func (j *semiJoiner) tryToMatchOuters(outers chunk.Iterator, inner chunk.Row, ch return outerRowStatus, err } -func (j *semiJoiner) onMissMatch(_ bool, outer chunk.Row, chk *chunk.Chunk) { -} +func (*semiJoiner) onMissMatch(bool, chunk.Row, *chunk.Chunk) {} // Clone implements joiner interface. func (j *semiJoiner) Clone() joiner { @@ -453,7 +452,7 @@ type nullAwareAntiSemiJoiner struct { } // tryToMatchInners implements joiner interface. -func (naaj *nullAwareAntiSemiJoiner) tryToMatchInners(outer chunk.Row, inners chunk.Iterator, chk *chunk.Chunk, _ ...NAAJType) (matched bool, hasNull bool, err error) { +func (naaj *nullAwareAntiSemiJoiner) tryToMatchInners(outer chunk.Row, inners chunk.Iterator, _ *chunk.Chunk, _ ...NAAJType) (matched bool, hasNull bool, err error) { // Step1: inner rows come from NULL-bucket OR Same-Key bucket. (no rows mean not matched) if inners.Len() == 0 { return false, false, nil @@ -482,7 +481,7 @@ func (naaj *nullAwareAntiSemiJoiner) tryToMatchInners(outer chunk.Row, inners ch return false, false, err } -func (naaj *nullAwareAntiSemiJoiner) tryToMatchOuters(outers chunk.Iterator, inner chunk.Row, chk *chunk.Chunk, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) { +func (*nullAwareAntiSemiJoiner) tryToMatchOuters(_ chunk.Iterator, _ chunk.Row, _ *chunk.Chunk, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) { // todo: use the outer build. return outerRowStatus, err } @@ -500,7 +499,7 @@ type antiSemiJoiner struct { } // tryToMatchInners implements joiner interface. -func (j *antiSemiJoiner) tryToMatchInners(outer chunk.Row, inners chunk.Iterator, chk *chunk.Chunk, _ ...NAAJType) (matched bool, hasNull bool, err error) { +func (j *antiSemiJoiner) tryToMatchInners(outer chunk.Row, inners chunk.Iterator, _ *chunk.Chunk, _ ...NAAJType) (matched bool, hasNull bool, err error) { if inners.Len() == 0 { return false, false, nil } @@ -709,7 +708,7 @@ func (naal *nullAwareAntiLeftOuterSemiJoiner) onMissMatch(_ bool, outer chunk.Ro chk.AppendInt64(lWide, 1) } -func (naal *nullAwareAntiLeftOuterSemiJoiner) tryToMatchOuters(outers chunk.Iterator, inner chunk.Row, chk *chunk.Chunk, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) { +func (*nullAwareAntiLeftOuterSemiJoiner) tryToMatchOuters(chunk.Iterator, chunk.Row, *chunk.Chunk, []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) { // todo: return nil, err } @@ -1034,8 +1033,7 @@ func (j *innerJoiner) tryToMatchOuters(outers chunk.Iterator, inner chunk.Row, c return j.filterAndCheckOuterRowStatus(chkForJoin, chk, inner.Len(), outerRowStatus, lUsedForFilter, rUsedForFilter) } -func (j *innerJoiner) onMissMatch(_ bool, outer chunk.Row, chk *chunk.Chunk) { -} +func (*innerJoiner) onMissMatch(bool, chunk.Row, *chunk.Chunk) {} func (j *innerJoiner) Clone() joiner { return &innerJoiner{baseJoiner: j.baseJoiner.Clone()} diff --git a/executor/load_data.go b/executor/load_data.go index 4e08a7eae41de..2cc00260f9c3e 100644 --- a/executor/load_data.go +++ b/executor/load_data.go @@ -723,7 +723,7 @@ func (s *SimpleSeekerOnReadCloser) Close() error { type loadDataVarKeyType int // String defines a Stringer function for debugging and pretty printing. -func (k loadDataVarKeyType) String() string { +func (loadDataVarKeyType) String() string { return "load_data_var" } diff --git a/executor/load_stats.go b/executor/load_stats.go index c409aad12c1c6..5fa1d591bc7f9 100644 --- a/executor/load_stats.go +++ b/executor/load_stats.go @@ -45,7 +45,7 @@ type LoadStatsInfo struct { type loadStatsVarKeyType int // String defines a Stringer function for debugging and pretty printing. -func (k loadStatsVarKeyType) String() string { +func (loadStatsVarKeyType) String() string { return "load_stats_var" } @@ -68,12 +68,12 @@ func (e *LoadStatsExec) Next(_ context.Context, req *chunk.Chunk) error { } // Close implements the Executor Close interface. -func (e *LoadStatsExec) Close() error { +func (*LoadStatsExec) Close() error { return nil } // Open implements the Executor Open interface. -func (e *LoadStatsExec) Open(_ context.Context) error { +func (*LoadStatsExec) Open(context.Context) error { return nil } diff --git a/executor/lock_stats.go b/executor/lock_stats.go index 5d36b900887e7..7ec3d12e9be4a 100644 --- a/executor/lock_stats.go +++ b/executor/lock_stats.go @@ -37,7 +37,7 @@ type LockStatsExec struct { type lockStatsVarKeyType int // String defines a Stringer function for debugging and pretty printing. -func (k lockStatsVarKeyType) String() string { +func (lockStatsVarKeyType) String() string { return "lock_stats_var" } @@ -83,12 +83,12 @@ func (e *LockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { } // Close implements the Executor Close interface. -func (e *LockStatsExec) Close() error { +func (*LockStatsExec) Close() error { return nil } // Open implements the Executor Open interface. -func (e *LockStatsExec) Open(_ context.Context) error { +func (*LockStatsExec) Open(context.Context) error { return nil } @@ -102,7 +102,7 @@ type UnlockStatsExec struct { type unlockStatsVarKeyType int // String defines a Stringer function for debugging and pretty printing. -func (k unlockStatsVarKeyType) String() string { +func (unlockStatsVarKeyType) String() string { return "unlock_stats_var" } @@ -110,7 +110,7 @@ func (k unlockStatsVarKeyType) String() string { const UnlockStatsVarKey unlockStatsVarKeyType = 0 // Next implements the Executor Next interface. -func (e *UnlockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { +func (e *UnlockStatsExec) Next(context.Context, *chunk.Chunk) error { do := domain.GetDomain(e.Ctx()) is := do.InfoSchema() h := do.StatsHandle() @@ -148,11 +148,11 @@ func (e *UnlockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { } // Close implements the Executor Close interface. -func (e *UnlockStatsExec) Close() error { +func (*UnlockStatsExec) Close() error { return nil } // Open implements the Executor Open interface. -func (e *UnlockStatsExec) Open(_ context.Context) error { +func (*UnlockStatsExec) Open(context.Context) error { return nil } diff --git a/executor/mem_reader.go b/executor/mem_reader.go index 69c31e46cd35b..c0c7dd466ea51 100644 --- a/executor/mem_reader.go +++ b/executor/mem_reader.go @@ -762,7 +762,7 @@ func (m *memIndexLookUpReader) getMemRows(ctx context.Context) ([][]types.Datum, return memTblReader.getMemRows(ctx) } -func (m *memIndexLookUpReader) getMemRowsHandle() ([]kv.Handle, error) { +func (*memIndexLookUpReader) getMemRowsHandle() ([]kv.Handle, error) { return nil, errors.New("getMemRowsHandle has not been implemented for memIndexLookUpReader") } @@ -980,7 +980,7 @@ func (m *memIndexMergeReader) intersectionHandles(kvRanges [][]kv.KeyRange) (fin cnt := 1 hMap.Set(h, &cnt) } else { - *(cntPtr.(*int)) += 1 + *(cntPtr.(*int))++ } } } @@ -993,7 +993,7 @@ func (m *memIndexMergeReader) intersectionHandles(kvRanges [][]kv.KeyRange) (fin return finalHandles, nil } -func (m *memIndexMergeReader) getMemRowsHandle() ([]kv.Handle, error) { +func (*memIndexMergeReader) getMemRowsHandle() ([]kv.Handle, error) { return nil, errors.New("getMemRowsHandle has not been implemented for memIndexMergeReader") } diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index 8d2634ba76696..60b34ac0129d3 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -78,7 +78,7 @@ type MemTableReaderExec struct { cacheRetrieved bool } -func (e *MemTableReaderExec) isInspectionCacheableTable(tblName string) bool { +func (*MemTableReaderExec) isInspectionCacheableTable(tblName string) bool { switch tblName { case strings.ToLower(infoschema.TableClusterConfig), strings.ToLower(infoschema.TableClusterInfo), @@ -579,7 +579,7 @@ func (e *clusterLogRetriever) close() error { return nil } -func (e *clusterLogRetriever) getRuntimeStats() execdetails.RuntimeStats { +func (*clusterLogRetriever) getRuntimeStats() execdetails.RuntimeStats { return nil } @@ -803,7 +803,7 @@ func (e *hotRegionsHistoryRetriver) retrieve(ctx context.Context, sctx sessionct return finalRows, nil } -func (e *hotRegionsHistoryRetriver) getHotRegionRowWithSchemaInfo( +func (*hotRegionsHistoryRetriver) getHotRegionRowWithSchemaInfo( hisHotRegion *HistoryHotRegion, tikvHelper *helper.Helper, tables []helper.TableInfoWithKeyRange, diff --git a/executor/metrics_reader.go b/executor/metrics_reader.go index d9e0bd39f1128..454e46989592e 100644 --- a/executor/metrics_reader.go +++ b/executor/metrics_reader.go @@ -142,8 +142,7 @@ func (e *MetricRetriever) getQueryRange(sctx sessionctx.Context) promQLQueryRang func (e *MetricRetriever) genRows(value pmodel.Value, quantile float64) [][]types.Datum { var rows [][]types.Datum - switch value.Type() { - case pmodel.ValMatrix: + if value.Type() == pmodel.ValMatrix { matrix := value.(pmodel.Matrix) for _, m := range matrix { for _, v := range m.Values { diff --git a/executor/opt_rule_blacklist.go b/executor/opt_rule_blacklist.go index 1525c79594141..b06c87cf616ee 100644 --- a/executor/opt_rule_blacklist.go +++ b/executor/opt_rule_blacklist.go @@ -32,7 +32,7 @@ type ReloadOptRuleBlacklistExec struct { } // Next implements the Executor Next interface. -func (e *ReloadOptRuleBlacklistExec) Next(ctx context.Context, _ *chunk.Chunk) error { +func (e *ReloadOptRuleBlacklistExec) Next(context.Context, *chunk.Chunk) error { internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) return LoadOptRuleBlacklist(internalCtx, e.Ctx()) } diff --git a/executor/pipelined_window.go b/executor/pipelined_window.go index 01164c878b7ee..4811e313c3b23 100644 --- a/executor/pipelined_window.go +++ b/executor/pipelined_window.go @@ -204,7 +204,7 @@ func (e *PipelinedWindowExec) getRowsInPartition(ctx context.Context) (err error return } -func (e *PipelinedWindowExec) fetchChild(ctx context.Context) (EOF bool, err error) { +func (e *PipelinedWindowExec) fetchChild(ctx context.Context) (eof bool, err error) { // TODO: reuse chunks childResult := tryNewCacheChunk(e.Children(0)) err = Next(ctx, e.Children(0), childResult) diff --git a/executor/plan_replayer.go b/executor/plan_replayer.go index 721628debc6d2..20cf10eeffe25 100644 --- a/executor/plan_replayer.go +++ b/executor/plan_replayer.go @@ -229,13 +229,13 @@ type PlanReplayerLoadInfo struct { type planReplayerDumpKeyType int -func (k planReplayerDumpKeyType) String() string { +func (planReplayerDumpKeyType) String() string { return "plan_replayer_dump_var" } type planReplayerLoadKeyType int -func (k planReplayerLoadKeyType) String() string { +func (planReplayerLoadKeyType) String() string { return "plan_replayer_load_var" } @@ -246,7 +246,7 @@ const PlanReplayerLoadVarKey planReplayerLoadKeyType = 0 const PlanReplayerDumpVarKey planReplayerDumpKeyType = 1 // Next implements the Executor Next interface. -func (e *PlanReplayerLoadExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *PlanReplayerLoadExec) Next(_ context.Context, req *chunk.Chunk) error { req.GrowAndReset(e.MaxChunkSize()) if len(e.info.Path) == 0 { return errors.New("plan replayer: file path is empty") @@ -267,7 +267,7 @@ func loadSetTiFlashReplica(ctx sessionctx.Context, z *zip.Reader) error { if err != nil { return errors.AddStack(err) } - //nolint: errcheck,all_revive + //nolint: errcheck,all_revive,revive defer v.Close() buf := new(bytes.Buffer) _, err = buf.ReadFrom(v) @@ -365,9 +365,9 @@ func loadVariables(ctx sessionctx.Context, z *zip.Reader) error { if err != nil { return errors.AddStack(err) } - //nolint: errcheck,all_revive + //nolint: errcheck,all_revive,revive defer v.Close() - _, err = toml.DecodeReader(v, &varMap) + _, err = toml.NewDecoder(v).Decode(&varMap) if err != nil { return errors.AddStack(err) } diff --git a/executor/point_get.go b/executor/point_get.go index 81e1278b4848a..b99a86aaabfef 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -394,7 +394,7 @@ func (e *PointGetExecutor) lockKeyIfExists(ctx context.Context, key []byte) ([]b func (e *PointGetExecutor) lockKeyBase(ctx context.Context, key []byte, - LockOnlyIfExists bool) ([]byte, error) { + lockOnlyIfExists bool) ([]byte, error) { if len(key) == 0 { return nil, nil } @@ -405,7 +405,7 @@ func (e *PointGetExecutor) lockKeyBase(ctx context.Context, if err != nil { return nil, err } - lockCtx.LockOnlyIfExists = LockOnlyIfExists + lockCtx.LockOnlyIfExists = lockOnlyIfExists lockCtx.InitReturnValues(1) err = doLockKeys(ctx, e.Ctx(), lockCtx, key) if err != nil { @@ -417,7 +417,7 @@ func (e *PointGetExecutor) lockKeyBase(ctx context.Context, if len(e.handleVal) > 0 { seVars.TxnCtx.SetPessimisticLockCache(e.idxKey, e.handleVal) } - if LockOnlyIfExists { + if lockOnlyIfExists { return e.getValueFromLockCtx(ctx, lockCtx, key) } } @@ -721,6 +721,6 @@ func (e *runtimeStatsWithSnapshot) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (e *runtimeStatsWithSnapshot) Tp() int { +func (*runtimeStatsWithSnapshot) Tp() int { return execdetails.TpRuntimeStatsWithSnapshot } diff --git a/executor/prepared.go b/executor/prepared.go index b5fd7ebed1fe6..f55166b328a37 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -73,7 +73,7 @@ func NewPrepareExec(ctx sessionctx.Context, sqlTxt string) *PrepareExec { } // Next implements the Executor Next interface. -func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *PrepareExec) Next(ctx context.Context, _ *chunk.Chunk) error { vars := e.Ctx().GetSessionVars() if e.ID != 0 { // Must be the case when we retry a prepare. @@ -164,7 +164,7 @@ type ExecuteExec struct { } // Next implements the Executor Next interface. -func (e *ExecuteExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (*ExecuteExec) Next(context.Context, *chunk.Chunk) error { return nil } @@ -191,7 +191,7 @@ type DeallocateExec struct { } // Next implements the Executor Next interface. -func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *DeallocateExec) Next(context.Context, *chunk.Chunk) error { vars := e.Ctx().GetSessionVars() id, ok := vars.PreparedStmtNameToID[e.Name] if !ok { diff --git a/executor/reload_expr_pushdown_blacklist.go b/executor/reload_expr_pushdown_blacklist.go index b6f1ef810756a..a991f32144ab8 100644 --- a/executor/reload_expr_pushdown_blacklist.go +++ b/executor/reload_expr_pushdown_blacklist.go @@ -34,7 +34,7 @@ type ReloadExprPushdownBlacklistExec struct { } // Next implements the Executor Next interface. -func (e *ReloadExprPushdownBlacklistExec) Next(ctx context.Context, _ *chunk.Chunk) error { +func (e *ReloadExprPushdownBlacklistExec) Next(context.Context, *chunk.Chunk) error { return LoadExprPushdownBlacklist(e.Ctx()) } @@ -53,7 +53,7 @@ func LoadExprPushdownBlacklist(sctx sessionctx.Context) (err error) { if alias, ok := funcName2Alias[name]; ok { name = alias } - var value uint32 = 0 + var value uint32 if val, ok := newBlocklist[name]; ok { value = val } diff --git a/executor/replace.go b/executor/replace.go index e62ae05b018c0..ef7ee918ee14f 100644 --- a/executor/replace.go +++ b/executor/replace.go @@ -117,7 +117,7 @@ func (e *ReplaceExec) replaceRow(ctx context.Context, r toBeCheckedRow) error { // 2. bool: true when found the duplicated key. This only means that duplicated key was found, // and the row was removed. // 3. error: the error. -func (e *ReplaceExec) removeIndexRow(ctx context.Context, txn kv.Transaction, r toBeCheckedRow) (bool, bool, error) { +func (e *ReplaceExec) removeIndexRow(ctx context.Context, txn kv.Transaction, r toBeCheckedRow) (rowUnchanged, foundDupKey bool, err error) { for _, uk := range r.uniqueKeys { _, handle, err := tables.FetchDuplicatedHandle(ctx, uk.newKey, true, txn, e.Table.Meta().ID, uk.commonHandle) if err != nil { @@ -151,7 +151,7 @@ func (e *ReplaceExec) exec(ctx context.Context, newRows [][]types.Datum) error { defer trace.StartRegion(ctx, "ReplaceExec").End() // Get keys need to be checked. - toBeCheckedRows, err := getKeysNeedCheck(ctx, e.Ctx(), e.Table, newRows) + toBeCheckedRows, err := getKeysNeedCheck(e.Ctx(), e.Table, newRows) if err != nil { return err } diff --git a/executor/revoke.go b/executor/revoke.go index 1e74cd620202d..3f13811ccafb1 100644 --- a/executor/revoke.go +++ b/executor/revoke.go @@ -60,7 +60,7 @@ type RevokeExec struct { } // Next implements the Executor Next interface. -func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *RevokeExec) Next(ctx context.Context, _ *chunk.Chunk) error { if e.done { return nil } diff --git a/executor/sample.go b/executor/sample.go index c6d897d49865c..a8a35561c94f1 100644 --- a/executor/sample.go +++ b/executor/sample.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/channel" "github.com/pingcap/tidb/util/chunk" decoder "github.com/pingcap/tidb/util/rowDecoder" "github.com/pingcap/tidb/util/tracing" @@ -47,14 +48,14 @@ type TableSampleExecutor struct { } // Open initializes necessary variables for using this executor. -func (e *TableSampleExecutor) Open(ctx context.Context) error { +func (*TableSampleExecutor) Open(ctx context.Context) error { defer tracing.StartRegion(ctx, "TableSampleExecutor.Open").End() return nil } // Next fills data into the chunk passed by its caller. // The task was actually done by sampler. -func (e *TableSampleExecutor) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *TableSampleExecutor) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() if e.sampler.finished() { return nil @@ -64,7 +65,7 @@ func (e *TableSampleExecutor) Next(ctx context.Context, req *chunk.Chunk) error } // Close implements the Executor Close interface. -func (e *TableSampleExecutor) Close() error { +func (*TableSampleExecutor) Close() error { return nil } @@ -379,8 +380,7 @@ func (s *sampleSyncer) sync() error { defer func() { for _, f := range s.fetchers { // Cleanup channels to terminate fetcher goroutines. - for _, ok := <-f.kvChan; ok; { - } + channel.Clear(f.kvChan) } }() for i := 0; i < s.totalCount; i++ { diff --git a/executor/select_into.go b/executor/select_into.go index 89ccb33d8cf16..4caff59ea50a9 100644 --- a/executor/select_into.go +++ b/executor/select_into.go @@ -71,7 +71,7 @@ func (s *SelectIntoExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface. -func (s *SelectIntoExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (s *SelectIntoExec) Next(ctx context.Context, _ *chunk.Chunk) error { for { if err := Next(ctx, s.Children(0), s.chk); err != nil { return err @@ -86,7 +86,7 @@ func (s *SelectIntoExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } -func (s *SelectIntoExec) considerEncloseOpt(et types.EvalType) bool { +func (*SelectIntoExec) considerEncloseOpt(et types.EvalType) bool { return et == types.ETString || et == types.ETDuration || et == types.ETTimestamp || et == types.ETDatetime || et == types.ETJson diff --git a/executor/set.go b/executor/set.go index f52130fc9ac7e..72188f102c0b2 100644 --- a/executor/set.go +++ b/executor/set.go @@ -257,19 +257,19 @@ func (e *SetExecutor) setCharset(cs, co string, isSetName bool) error { return errors.Trace(err) } } - csDb, err := sessionVars.GlobalVarsAccessor.GetGlobalSysVar(variable.CharsetDatabase) + csDB, err := sessionVars.GlobalVarsAccessor.GetGlobalSysVar(variable.CharsetDatabase) if err != nil { return err } - coDb, err := sessionVars.GlobalVarsAccessor.GetGlobalSysVar(variable.CollationDatabase) + coDB, err := sessionVars.GlobalVarsAccessor.GetGlobalSysVar(variable.CollationDatabase) if err != nil { return err } - err = sessionVars.SetSystemVar(variable.CharacterSetConnection, csDb) + err = sessionVars.SetSystemVar(variable.CharacterSetConnection, csDB) if err != nil { return errors.Trace(err) } - return errors.Trace(sessionVars.SetSystemVar(variable.CollationConnection, coDb)) + return errors.Trace(sessionVars.SetSystemVar(variable.CollationConnection, coDB)) } func (e *SetExecutor) getVarValue(ctx context.Context, v *expression.VarAssignment, sysVar *variable.SysVar) (value string, err error) { diff --git a/executor/set_config.go b/executor/set_config.go index 0fb8c5d255ce1..8f9f73af0c47a 100644 --- a/executor/set_config.go +++ b/executor/set_config.go @@ -46,7 +46,7 @@ type SetConfigExec struct { } // Open implements the Executor Open interface. -func (s *SetConfigExec) Open(ctx context.Context) error { +func (s *SetConfigExec) Open(context.Context) error { if s.p.Type != "" { s.p.Type = strings.ToLower(s.p.Type) if s.p.Type != "tikv" && s.p.Type != "tidb" && s.p.Type != "pd" && s.p.Type != "tiflash" { @@ -84,7 +84,7 @@ var TestSetConfigServerInfoKey stringutil.StringerStr = "TestSetConfigServerInfo var TestSetConfigHTTPHandlerKey stringutil.StringerStr = "TestSetConfigHTTPHandlerKey" // Next implements the Executor Next interface. -func (s *SetConfigExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (s *SetConfigExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() getServerFunc := infoschema.GetClusterServerInfo if v := s.Ctx().Value(TestSetConfigServerInfoKey); v != nil { diff --git a/executor/show.go b/executor/show.go index 5f22d8271c7cb..2c5cb941cd42b 100644 --- a/executor/show.go +++ b/executor/show.go @@ -70,6 +70,7 @@ import ( "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/dbterror/exeerrors" "github.com/pingcap/tidb/util/etcd" + "github.com/pingcap/tidb/util/filter" "github.com/pingcap/tidb/util/format" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/hint" @@ -180,7 +181,7 @@ func (e *ShowExec) fetchAll(ctx context.Context) error { case ast.ShowColumns: return e.fetchShowColumns(ctx) case ast.ShowConfig: - return e.fetchShowClusterConfigs(ctx) + return e.fetchShowClusterConfigs() case ast.ShowCreateTable: return e.fetchShowCreateTable() case ast.ShowCreateSequence: @@ -299,8 +300,7 @@ type visibleChecker struct { } func (v *visibleChecker) Enter(in ast.Node) (out ast.Node, skipChildren bool) { - switch x := in.(type) { - case *ast.TableName: + if x, ok := in.(*ast.TableName); ok { schema := x.Schema.L if schema == "" { schema = v.defaultDB @@ -317,7 +317,7 @@ func (v *visibleChecker) Enter(in ast.Node) (out ast.Node, skipChildren bool) { return in, false } -func (v *visibleChecker) Leave(in ast.Node) (out ast.Node, ok bool) { +func (*visibleChecker) Leave(in ast.Node) (out ast.Node, ok bool) { return in, true } @@ -380,7 +380,7 @@ func (e *ShowExec) fetchShowBind() error { if !checker.ok { continue } - e.appendRow([]interface{}{ + e.appendRow([]any{ bindData.OriginalSQL, hint.BindSQL, bindData.Db, @@ -421,7 +421,7 @@ func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { memUsage := handle.GetMemUsage() memCapacity := handle.GetMemCapacity() - e.appendRow([]interface{}{ + e.appendRow([]any{ numBindings, rows[0].GetInt64(0), memory.FormatBytes(memUsage), @@ -445,14 +445,14 @@ func (e *ShowExec) fetchShowEngines(ctx context.Context) error { // moveInfoSchemaToFront moves information_schema to the first, and the others are sorted in the origin ascending order. func moveInfoSchemaToFront(dbs []string) { - if len(dbs) > 0 && strings.EqualFold(dbs[0], "INFORMATION_SCHEMA") { + if len(dbs) > 0 && strings.EqualFold(dbs[0], filter.InformationSchemaName) { return } - i := sort.SearchStrings(dbs, "INFORMATION_SCHEMA") - if i < len(dbs) && strings.EqualFold(dbs[i], "INFORMATION_SCHEMA") { + i := sort.SearchStrings(dbs, filter.InformationSchemaName) + if i < len(dbs) && strings.EqualFold(dbs[i], filter.InformationSchemaName) { copy(dbs[1:i+1], dbs[0:i]) - dbs[0] = "INFORMATION_SCHEMA" + dbs[0] = filter.InformationSchemaName } } @@ -513,7 +513,7 @@ func (e *ShowExec) fetchShowProcessList() error { return nil } -func (e *ShowExec) fetchShowOpenTables() error { +func (*ShowExec) fetchShowOpenTables() error { // TiDB has no concept like mysql's "table cache" and "open table" // For simplicity, we just return an empty result with the same structure as MySQL's SHOW OPEN TABLES return nil @@ -1059,7 +1059,7 @@ func constructResultOfShowCreateTable(ctx sessionctx.Context, dbName *model.CISt default: defaultValStr := fmt.Sprintf("%v", defaultValue) // If column is timestamp, and default value is not current_timestamp, should convert the default value to the current session time zone. - if col.GetType() == mysql.TypeTimestamp && defaultValStr != types.ZeroDatetimeStr { + if defaultValStr != types.ZeroDatetimeStr && col.GetType() == mysql.TypeTimestamp { timeValue, err := table.GetColDefaultValue(ctx, col) if err != nil { return errors.Trace(err) @@ -1407,7 +1407,7 @@ var TestShowClusterConfigKey stringutil.StringerStr = "TestShowClusterConfigKey" // TestShowClusterConfigFunc is used to test 'show config ...'. type TestShowClusterConfigFunc func() ([][]types.Datum, error) -func (e *ShowExec) fetchShowClusterConfigs(ctx context.Context) error { +func (e *ShowExec) fetchShowClusterConfigs() error { emptySet := set.NewStringSet() var confItems [][]types.Datum var err error @@ -1832,11 +1832,11 @@ func (e *ShowExec) fetchShowPrivileges() error { return nil } -func (e *ShowExec) fetchShowTriggers() error { +func (*ShowExec) fetchShowTriggers() error { return nil } -func (e *ShowExec) fetchShowProcedureStatus() error { +func (*ShowExec) fetchShowProcedureStatus() error { return nil } diff --git a/executor/show_placement.go b/executor/show_placement.go index f577c3bb2b6be..1bd9203f94c06 100644 --- a/executor/show_placement.go +++ b/executor/show_placement.go @@ -95,7 +95,7 @@ func (b *showPlacementLabelsResultBuilder) BuildRows() ([][]interface{}, error) return rows, nil } -func (b *showPlacementLabelsResultBuilder) sortMapKeys(m map[string]interface{}) []string { +func (*showPlacementLabelsResultBuilder) sortMapKeys(m map[string]interface{}) []string { sorted := make([]string, 0, len(m)) for key := range m { sorted = append(sorted, key) @@ -269,7 +269,7 @@ func (e *ShowExec) fetchAllDBPlacements(ctx context.Context, scheduleState map[i slices.SortFunc(dbs, func(i, j *model.DBInfo) bool { return i.Name.O < j.Name.O }) for _, dbInfo := range dbs { - if e.Ctx().GetSessionVars().User != nil && checker != nil && !checker.DBIsVisible(activeRoles, dbInfo.Name.O) { + if checker != nil && e.Ctx().GetSessionVars().User != nil && !checker.DBIsVisible(activeRoles, dbInfo.Name.O) { continue } diff --git a/executor/show_stats.go b/executor/show_stats.go index babd61b3360e1..ea9dcfb7b7144 100644 --- a/executor/show_stats.go +++ b/executor/show_stats.go @@ -257,7 +257,7 @@ func (e *ShowExec) histogramToRow(dbName, tblName, partitionName, colName string }) } -func (e *ShowExec) versionToTime(version uint64) types.Time { +func (*ShowExec) versionToTime(version uint64) types.Time { t := oracle.GetTimeFromTS(version) return types.NewTime(types.FromGoTime(t), mysql.TypeDatetime, 0) } diff --git a/executor/shuffle.go b/executor/shuffle.go index 4d2c02cf73f60..777b83642d927 100644 --- a/executor/shuffle.go +++ b/executor/shuffle.go @@ -299,6 +299,7 @@ func (e *ShuffleExec) fetchDataAndSplit(ctx context.Context, dataSourceIndex int case <-e.finishCh: return case results[workerIdx] = <-w.receivers[dataSourceIndex].inputHolderCh: + //nolint: revive break } } @@ -346,7 +347,7 @@ func (e *shuffleReceiver) Close() error { // Next implements the Executor Next interface. // It is called by `Tail` executor within "shuffle", to fetch data from `DataSource` by `inputCh`. -func (e *shuffleReceiver) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *shuffleReceiver) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() if e.executed { return nil @@ -459,7 +460,7 @@ func buildPartitionRangeSplitter(ctx sessionctx.Context, concurrency int, byItem // This method is supposed to be used for shuffle with sorted `dataSource` // the caller of this method should guarantee that `input` is grouped, // which means that rows with the same byItems should be continuous, the order does not matter. -func (s *partitionRangeSplitter) split(ctx sessionctx.Context, input *chunk.Chunk, workerIndices []int) ([]int, error) { +func (s *partitionRangeSplitter) split(_ sessionctx.Context, input *chunk.Chunk, workerIndices []int) ([]int, error) { _, err := s.groupChecker.splitIntoGroups(input) if err != nil { return workerIndices, err diff --git a/executor/simple.go b/executor/simple.go index 6d1f6955f9b1d..81c49f700325f 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -127,7 +127,7 @@ func clearSysSession(ctx context.Context, sctx sessionctx.Context) { } // Next implements the Executor Next interface. -func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { +func (e *SimpleExec) Next(ctx context.Context, _ *chunk.Chunk) (err error) { if e.done { return nil } @@ -152,7 +152,7 @@ func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { case *ast.BeginStmt: err = e.executeBegin(ctx, x) case *ast.CommitStmt: - e.executeCommit(x) + e.executeCommit() case *ast.SavepointStmt: err = e.executeSavepoint(x) case *ast.ReleaseSavepointStmt: @@ -185,7 +185,7 @@ func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { case *ast.SetDefaultRoleStmt: err = e.executeSetDefaultRole(ctx, x) case *ast.ShutdownStmt: - err = e.executeShutdown(x) + err = e.executeShutdown() case *ast.AdminStmt: err = e.executeAdmin(x) case *ast.SetResourceGroupStmt: @@ -469,7 +469,7 @@ func (e *SimpleExec) setRoleRegular(s *ast.SetRoleStmt) error { return nil } -func (e *SimpleExec) setRoleAll(s *ast.SetRoleStmt) error { +func (e *SimpleExec) setRoleAll() error { // Deal with SQL like `SET ROLE ALL;` checker := privilege.GetPrivilegeManager(e.Ctx()) user, host := e.Ctx().GetSessionVars().User.AuthUsername, e.Ctx().GetSessionVars().User.AuthHostname @@ -521,7 +521,7 @@ func (e *SimpleExec) setRoleAllExcept(s *ast.SetRoleStmt) error { return nil } -func (e *SimpleExec) setRoleDefault(s *ast.SetRoleStmt) error { +func (e *SimpleExec) setRoleDefault() error { // Deal with SQL like `SET ROLE DEFAULT;` checker := privilege.GetPrivilegeManager(e.Ctx()) user, host := e.Ctx().GetSessionVars().User.AuthUsername, e.Ctx().GetSessionVars().User.AuthHostname @@ -534,7 +534,7 @@ func (e *SimpleExec) setRoleDefault(s *ast.SetRoleStmt) error { return nil } -func (e *SimpleExec) setRoleNone(s *ast.SetRoleStmt) error { +func (e *SimpleExec) setRoleNone() error { // Deal with SQL like `SET ROLE NONE;` checker := privilege.GetPrivilegeManager(e.Ctx()) roles := make([]*auth.RoleIdentity, 0) @@ -551,13 +551,13 @@ func (e *SimpleExec) executeSetRole(s *ast.SetRoleStmt) error { case ast.SetRoleRegular: return e.setRoleRegular(s) case ast.SetRoleAll: - return e.setRoleAll(s) + return e.setRoleAll() case ast.SetRoleAllExcept: return e.setRoleAllExcept(s) case ast.SetRoleNone: - return e.setRoleNone(s) + return e.setRoleNone() case ast.SetRoleDefault: - return e.setRoleDefault(s) + return e.setRoleDefault() } return nil } @@ -763,7 +763,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } -func (e *SimpleExec) executeCommit(s *ast.CommitStmt) { +func (e *SimpleExec) executeCommit() { e.Ctx().GetSessionVars().SetInTxn(false) } @@ -1052,7 +1052,7 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm if err != nil { return err } - PasswordLocking := createUserFailedLoginJSON(plOptions) + passwordLocking := createUserFailedLoginJSON(plOptions) if s.IsCreateRole { plOptions.lockAccount = "Y" plOptions.passwordExpired = "Y" @@ -1085,15 +1085,14 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm } // If FAILED_LOGIN_ATTEMPTS and PASSWORD_LOCK_TIME are both specified to 0, a string of 0 length is generated. // When inserting the attempts into json, an error occurs. This requires special handling. - if PasswordLocking != "" { - userAttributes = append(userAttributes, PasswordLocking) + if passwordLocking != "" { + userAttributes = append(userAttributes, passwordLocking) } userAttributesStr := fmt.Sprintf("{%s}", strings.Join(userAttributes, ",")) tokenIssuer := "" for _, authTokenOption := range s.AuthTokenOrTLSOptions { - switch authTokenOption.Type { - case ast.TokenIssuer: + if authTokenOption.Type == ast.TokenIssuer { tokenIssuer = authTokenOption.Value } } @@ -1751,15 +1750,15 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) type AuthTokenOptionHandler int const ( - // NoNeedAuthTokenOptions means the final auth plugin is NOT tidb_auth_plugin - NoNeedAuthTokenOptions AuthTokenOptionHandler = iota + // noNeedAuthTokenOptions means the final auth plugin is NOT tidb_auth_plugin + noNeedAuthTokenOptions AuthTokenOptionHandler = iota // OptionalAuthTokenOptions means the final auth_plugin is tidb_auth_plugin, // and whether to declare AuthTokenOptions or not is ok. OptionalAuthTokenOptions // RequireAuthTokenOptions means the final auth_plugin is tidb_auth_plugin and need AuthTokenOptions here RequireAuthTokenOptions ) - authTokenOptionHandler := NoNeedAuthTokenOptions + authTokenOptionHandler := noNeedAuthTokenOptions currentAuthPlugin, err := privilege.GetPrivilegeManager(e.Ctx()).GetAuthPlugin(spec.User.Username, spec.User.Hostname) if err != nil { return err @@ -1780,7 +1779,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } switch spec.AuthOpt.AuthPlugin { case mysql.AuthNativePassword, mysql.AuthCachingSha2Password, mysql.AuthTiDBSM3Password, mysql.AuthSocket, mysql.AuthLDAPSimple, mysql.AuthLDAPSASL, "": - authTokenOptionHandler = NoNeedAuthTokenOptions + authTokenOptionHandler = noNeedAuthTokenOptions case mysql.AuthTiDBAuthToken: if authTokenOptionHandler != OptionalAuthTokenOptions { authTokenOptionHandler = RequireAuthTokenOptions @@ -1907,7 +1906,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } switch authTokenOptionHandler { - case NoNeedAuthTokenOptions: + case noNeedAuthTokenOptions: if len(authTokenOptions) > 0 { err := errors.New("TOKEN_ISSUER is not needed for the auth plugin") e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) @@ -2711,7 +2710,7 @@ func (e *SimpleExec) autoNewTxn() bool { return false } -func (e *SimpleExec) executeShutdown(s *ast.ShutdownStmt) error { +func (e *SimpleExec) executeShutdown() error { sessVars := e.Ctx().GetSessionVars() logutil.BgLogger().Info("execute shutdown statement", zap.Uint64("conn", sessVars.ConnectionID)) p, err := os.FindProcess(os.Getpid()) diff --git a/executor/slow_query.go b/executor/slow_query.go index f7878839ae24b..1e70bee8fd3db 100644 --- a/executor/slow_query.go +++ b/executor/slow_query.go @@ -87,7 +87,7 @@ func (e *slowQueryRetriever) retrieve(ctx context.Context, sctx sessionctx.Conte ctx, e.cancel = context.WithCancel(ctx) e.initializeAsyncParsing(ctx, sctx) } - return e.dataForSlowLog(ctx, sctx) + return e.dataForSlowLog(ctx) } func (e *slowQueryRetriever) initialize(ctx context.Context, sctx sessionctx.Context) error { @@ -106,7 +106,7 @@ func (e *slowQueryRetriever) initialize(ctx context.Context, sctx sessionctx.Con } continue } - factory, err := getColumnValueFactoryByName(sctx, col.Name.O, idx) + factory, err := getColumnValueFactoryByName(col.Name.O, idx) if err != nil { return err } @@ -213,7 +213,7 @@ func (e *slowQueryRetriever) parseDataForSlowLog(ctx context.Context, sctx sessi e.parseSlowLog(ctx, sctx, reader, ParseSlowLogBatchSize) } -func (e *slowQueryRetriever) dataForSlowLog(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { +func (e *slowQueryRetriever) dataForSlowLog(ctx context.Context) ([][]types.Datum, error) { var ( task slowLogTask ok bool @@ -484,7 +484,7 @@ func (e *slowQueryRetriever) parseSlowLog(ctx context.Context, sctx sessionctx.C } } -func (e *slowQueryRetriever) sendParsedSlowLogCh(t slowLogTask, re parsedSlowLog) { +func (*slowQueryRetriever) sendParsedSlowLogCh(t slowLogTask, re parsedSlowLog) { select { case t.resultCh <- re: default: @@ -681,7 +681,7 @@ func parseUserOrHostValue(value string) string { return strings.TrimSpace(tmp[0]) } -func getColumnValueFactoryByName(sctx sessionctx.Context, colName string, columnIdx int) (slowQueryColumnValueFactory, error) { +func getColumnValueFactoryByName(colName string, columnIdx int) (slowQueryColumnValueFactory, error) { switch colName { case variable.SlowLogTimeStr: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (bool, error) { @@ -947,7 +947,7 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co return logFiles, err } -func (e *slowQueryRetriever) getFileStartTime(ctx context.Context, file *os.File) (time.Time, error) { +func (*slowQueryRetriever) getFileStartTime(ctx context.Context, file *os.File) (time.Time, error) { var t time.Time _, err := file.Seek(0, io.SeekStart) if err != nil { @@ -964,7 +964,7 @@ func (e *slowQueryRetriever) getFileStartTime(ctx context.Context, file *os.File if strings.HasPrefix(line, variable.SlowLogStartPrefixStr) { return ParseTime(line[len(variable.SlowLogStartPrefixStr):]) } - maxNum -= 1 + maxNum-- if maxNum <= 0 { break } @@ -1018,11 +1018,11 @@ func (s *slowQueryRuntimeStats) Clone() execdetails.RuntimeStats { } // Tp implements the RuntimeStats interface. -func (s *slowQueryRuntimeStats) Tp() int { +func (*slowQueryRuntimeStats) Tp() int { return execdetails.TpSlowQueryRuntimeStat } -func (e *slowQueryRetriever) getFileEndTime(ctx context.Context, file *os.File) (time.Time, error) { +func (*slowQueryRetriever) getFileEndTime(ctx context.Context, file *os.File) (time.Time, error) { var t time.Time var tried int stat, err := file.Stat() diff --git a/executor/sort.go b/executor/sort.go index 97ace1c18e124..4210a086bcf30 100644 --- a/executor/sort.go +++ b/executor/sort.go @@ -294,7 +294,7 @@ func (h *multiWayMerge) Len() int { return len(h.elements) } -func (h *multiWayMerge) Push(x interface{}) { +func (*multiWayMerge) Push(interface{}) { // Should never be called. } @@ -355,7 +355,7 @@ func (h *topNChunkHeap) Len() int { return len(h.rowPtrs) } -func (h *topNChunkHeap) Push(x interface{}) { +func (*topNChunkHeap) Push(interface{}) { // Should never be called. } diff --git a/executor/split.go b/executor/split.go index 10dfefea82310..005090aa638b6 100644 --- a/executor/split.go +++ b/executor/split.go @@ -66,7 +66,7 @@ type splitRegionResult struct { } // Open implements the Executor Open interface. -func (e *SplitIndexRegionExec) Open(ctx context.Context) (err error) { +func (e *SplitIndexRegionExec) Open(context.Context) (err error) { e.splitIdxKeys, err = e.getSplitIdxKeys() return err } @@ -338,7 +338,7 @@ type SplitTableRegionExec struct { } // Open implements the Executor Open interface. -func (e *SplitTableRegionExec) Open(ctx context.Context) (err error) { +func (e *SplitTableRegionExec) Open(context.Context) (err error) { e.splitKeys, err = e.getSplitTableKeys() return err } diff --git a/executor/stmtsummary.go b/executor/stmtsummary.go index de7cbef6e1aa6..272d863d38de4 100644 --- a/executor/stmtsummary.go +++ b/executor/stmtsummary.go @@ -35,7 +35,6 @@ const ( ) func buildStmtSummaryRetriever( - ctx sessionctx.Context, table *model.TableInfo, columns []*model.ColumnInfo, extractor *plannercore.StatementsSummaryExtractor, @@ -73,7 +72,7 @@ type dummyRetriever struct { dummyCloser } -func (e *dummyRetriever) retrieve(_ context.Context, _ sessionctx.Context) ([][]types.Datum, error) { +func (*dummyRetriever) retrieve(_ context.Context, _ sessionctx.Context) ([][]types.Datum, error) { return nil, nil } @@ -87,7 +86,7 @@ type stmtSummaryRetriever struct { rowsReader *rowsReader } -func (e *stmtSummaryRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { +func (e *stmtSummaryRetriever) retrieve(_ context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { if err := e.ensureRowsReader(sctx); err != nil { return nil, err } @@ -101,7 +100,7 @@ func (e *stmtSummaryRetriever) close() error { return nil } -func (e *stmtSummaryRetriever) getRuntimeStats() execdetails.RuntimeStats { +func (*stmtSummaryRetriever) getRuntimeStats() execdetails.RuntimeStats { return nil } @@ -195,7 +194,7 @@ func (r *stmtSummaryRetrieverV2) close() error { return nil } -func (r *stmtSummaryRetrieverV2) getRuntimeStats() execdetails.RuntimeStats { +func (*stmtSummaryRetrieverV2) getRuntimeStats() execdetails.RuntimeStats { return nil } diff --git a/executor/update.go b/executor/update.go index 7929a97a79d2e..f76aeb770c875 100644 --- a/executor/update.go +++ b/executor/update.go @@ -169,7 +169,7 @@ func (e *UpdateExec) merge(row, newData []types.Datum, mergeGenerated bool) erro return nil } -func (e *UpdateExec) exec(ctx context.Context, schema *expression.Schema, row, newData []types.Datum) error { +func (e *UpdateExec) exec(ctx context.Context, _ *expression.Schema, row, newData []types.Datum) error { defer trace.StartRegion(ctx, "UpdateExec").End() bAssignFlag := make([]bool, len(e.assignFlag)) for i, flag := range e.assignFlag { @@ -326,7 +326,7 @@ func (e *UpdateExec) updateRows(ctx context.Context) (int, error) { return totalNumRows, nil } -func (e *UpdateExec) handleErr(colName model.CIStr, rowIdx int, err error) error { +func (*UpdateExec) handleErr(colName model.CIStr, rowIdx int, err error) error { if err == nil { return nil } @@ -535,7 +535,7 @@ func (e *updateRuntimeStats) Merge(other execdetails.RuntimeStats) { } // Tp implements the RuntimeStats interface. -func (e *updateRuntimeStats) Tp() int { +func (*updateRuntimeStats) Tp() int { return execdetails.TpUpdateRuntimeStats } diff --git a/executor/window.go b/executor/window.go index e585e2689c526..3c155b3218ede 100644 --- a/executor/window.go +++ b/executor/window.go @@ -151,7 +151,7 @@ func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) { return nil } -func (e *WindowExec) fetchChild(ctx context.Context) (EOF bool, err error) { +func (e *WindowExec) fetchChild(ctx context.Context) (eof bool, err error) { childResult := tryNewCacheChunk(e.Children(0)) err = Next(ctx, e.Children(0), childResult) if err != nil { @@ -288,7 +288,7 @@ func (p *rowFrameWindowProcessor) getEndOffset(numRows uint64) uint64 { return 0 } -func (p *rowFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { +func (*rowFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { return rows, nil } @@ -523,7 +523,7 @@ func (p *rangeFrameWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, r return rows, nil } -func (p *rangeFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { +func (*rangeFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { return rows, nil } diff --git a/executor/write.go b/executor/write.go index 29bc2adf401bd..9526c7d2e62d0 100644 --- a/executor/write.go +++ b/executor/write.go @@ -54,7 +54,7 @@ var ( func updateRecord( ctx context.Context, sctx sessionctx.Context, h kv.Handle, oldData, newData []types.Datum, modified []bool, t table.Table, - onDup bool, memTracker *memory.Tracker, fkChecks []*FKCheckExec, fkCascades []*FKCascadeExec, + onDup bool, _ *memory.Tracker, fkChecks []*FKCheckExec, fkCascades []*FKCascadeExec, ) (bool, error) { r, ctx := tracing.StartRegionEx(ctx, "executor.updateRecord") defer r.End() @@ -322,7 +322,7 @@ func rebaseAutoRandomValue( // resetErrDataTooLong reset ErrDataTooLong error msg. // types.ErrDataTooLong is produced in types.ProduceStrWithSpecifiedTp, there is no column info in there, // so we reset the error msg here, and wrap old err with errors.Wrap. -func resetErrDataTooLong(colName string, rowIdx int, err error) error { +func resetErrDataTooLong(colName string, rowIdx int, _ error) error { newErr := types.ErrDataTooLong.GenWithStack("Data too long for column '%v' at row %v", colName, rowIdx) return newErr } diff --git a/server/conn.go b/server/conn.go index 87d7372233240..45ecd4a18e7a6 100644 --- a/server/conn.go +++ b/server/conn.go @@ -1640,7 +1640,7 @@ func (cc *clientConn) handleIndexAdvise(ctx context.Context, indexAdviseInfo *ex return errors.New("Index Advise: infile is empty") } - if err := indexAdviseInfo.GetIndexAdvice(ctx, data); err != nil { + if err := indexAdviseInfo.GetIndexAdvice(data); err != nil { return err }