From 57b49c786207864909eb4fd2c5929c19db231b44 Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Tue, 17 Oct 2023 15:22:59 +0800 Subject: [PATCH] *: replace mathutil.Max/Min with built-in max/min (#47700) ref pingcap/tidb#45933 --- br/pkg/lightning/backend/external/BUILD.bazel | 1 - .../lightning/backend/external/byte_reader.go | 3 +-- br/pkg/lightning/backend/kv/BUILD.bazel | 1 - br/pkg/lightning/backend/kv/session.go | 3 +-- br/pkg/lightning/backend/local/BUILD.bazel | 1 - br/pkg/lightning/backend/local/checksum.go | 3 +-- br/pkg/lightning/backend/local/local.go | 3 +-- br/pkg/lightning/backend/local/localhelper.go | 3 +-- br/pkg/lightning/backend/local/region_job.go | 3 +-- br/pkg/lightning/checkpoints/BUILD.bazel | 1 - br/pkg/lightning/checkpoints/checkpoints.go | 3 +-- br/pkg/lightning/config/BUILD.bazel | 1 - br/pkg/lightning/config/config.go | 3 +-- br/pkg/lightning/importer/BUILD.bazel | 1 - br/pkg/lightning/importer/import.go | 3 +-- br/pkg/lightning/importer/precheck_impl.go | 9 ++++----- br/pkg/lightning/importer/table_import.go | 7 +++---- br/pkg/lightning/mydump/BUILD.bazel | 1 - br/pkg/lightning/mydump/csv_parser.go | 3 +-- br/pkg/lightning/mydump/region.go | 3 +-- br/pkg/restore/client.go | 9 ++++----- br/pkg/restore/data.go | 11 +++++------ br/pkg/streamhelper/BUILD.bazel | 1 - br/pkg/streamhelper/client.go | 5 ++--- br/pkg/task/backup.go | 3 +-- br/pkg/task/backup_ebs.go | 3 +-- br/pkg/task/stream.go | 7 +++---- br/pkg/utils/BUILD.bazel | 1 - br/pkg/utils/retry.go | 3 +-- pkg/executor/BUILD.bazel | 1 - pkg/executor/adapter.go | 3 +-- pkg/executor/aggfuncs/BUILD.bazel | 1 - pkg/executor/aggfuncs/func_count_distinct.go | 3 +-- pkg/executor/aggregate/BUILD.bazel | 1 - pkg/executor/aggregate/agg_util.go | 3 +-- pkg/executor/builder.go | 5 ++--- pkg/executor/distsql.go | 5 ++--- pkg/executor/executor.go | 15 +++++++-------- pkg/executor/executor_required_rows_test.go | 5 ++--- pkg/executor/explain.go | 2 +- pkg/executor/index_merge_reader.go | 19 +++++++++---------- pkg/executor/infoschema_reader.go | 7 +++---- .../internal/calibrateresource/BUILD.bazel | 1 - .../calibrateresource/calibrate_resource.go | 5 ++--- pkg/executor/pipelined_window.go | 7 +++---- pkg/executor/show.go | 3 +-- pkg/executor/simple.go | 17 ++++++++--------- pkg/executor/sort.go | 3 +-- pkg/executor/split.go | 3 +-- .../table_readers_required_rows_test.go | 3 +-- pkg/executor/window.go | 3 +-- pkg/planner/core/exhaust_physical_plans.go | 3 +-- pkg/planner/core/logical_plan_builder.go | 9 ++++----- .../core/memtable_predicate_extractor.go | 13 ++++++------- pkg/planner/core/optimizer.go | 3 +-- pkg/planner/core/plan.go | 5 ++--- pkg/planner/core/plan_stats.go | 3 +-- pkg/planner/core/planbuilder.go | 3 +-- pkg/planner/core/rule_topn_push_down.go | 3 +-- pkg/planner/core/stats.go | 5 ++--- pkg/planner/core/task.go | 3 +-- pkg/statistics/BUILD.bazel | 1 - pkg/statistics/estimate.go | 6 ++---- .../handle/cache/internal/lfu/BUILD.bazel | 1 - .../handle/cache/internal/lfu/lfu_cache.go | 3 +-- pkg/statistics/handle/extstats/BUILD.bazel | 1 - .../handle/extstats/extended_stats.go | 3 +-- pkg/statistics/handle/storage/BUILD.bazel | 1 - pkg/statistics/handle/storage/read.go | 3 +-- pkg/statistics/scalar.go | 5 ++--- 70 files changed, 109 insertions(+), 179 deletions(-) diff --git a/br/pkg/lightning/backend/external/BUILD.bazel b/br/pkg/lightning/backend/external/BUILD.bazel index 999b8460fd9e3..34e04ec24137d 100644 --- a/br/pkg/lightning/backend/external/BUILD.bazel +++ b/br/pkg/lightning/backend/external/BUILD.bazel @@ -32,7 +32,6 @@ go_library( "//pkg/sessionctx/variable", "//pkg/util/hack", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/size", "@com_github_cockroachdb_pebble//:pebble", "@com_github_docker_go_units//:go-units", diff --git a/br/pkg/lightning/backend/external/byte_reader.go b/br/pkg/lightning/backend/external/byte_reader.go index d762699e2f658..86ffe9536f542 100644 --- a/br/pkg/lightning/backend/external/byte_reader.go +++ b/br/pkg/lightning/backend/external/byte_reader.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/tidb/br/pkg/membuf" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -247,7 +246,7 @@ func (r *byteReader) cloneSlices() { } func (r *byteReader) next(n int) []byte { - end := mathutil.Min(r.curBufOffset+n, len(r.curBuf)) + end := min(r.curBufOffset+n, len(r.curBuf)) ret := r.curBuf[r.curBufOffset:end] r.curBufOffset += len(ret) return ret diff --git a/br/pkg/lightning/backend/kv/BUILD.bazel b/br/pkg/lightning/backend/kv/BUILD.bazel index f104d0aee76ef..207297a01ddc1 100644 --- a/br/pkg/lightning/backend/kv/BUILD.bazel +++ b/br/pkg/lightning/backend/kv/BUILD.bazel @@ -34,7 +34,6 @@ go_library( "//pkg/tablecodec", "//pkg/types", "//pkg/util/chunk", - "//pkg/util/mathutil", "//pkg/util/topsql/stmtstats", "@com_github_docker_go_units//:go-units", "@com_github_pingcap_errors//:errors", diff --git a/br/pkg/lightning/backend/kv/session.go b/br/pkg/lightning/backend/kv/session.go index ac04a462e8618..eb6148a097b8e 100644 --- a/br/pkg/lightning/backend/kv/session.go +++ b/br/pkg/lightning/backend/kv/session.go @@ -33,7 +33,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/topsql/stmtstats" "go.uber.org/zap" ) @@ -110,7 +109,7 @@ func (mb *MemBuf) Recycle(buf *BytesBuf) { // AllocateBuf allocates a byte buffer. func (mb *MemBuf) AllocateBuf(size int) { mb.Lock() - size = mathutil.Max(units.MiB, int(utils.NextPowerOfTwo(int64(size)))*2) + size = max(units.MiB, int(utils.NextPowerOfTwo(int64(size)))*2) var ( existingBuf *BytesBuf existingBufIdx int diff --git a/br/pkg/lightning/backend/local/BUILD.bazel b/br/pkg/lightning/backend/local/BUILD.bazel index 34bfa91e5722f..7ff8ac076ccbd 100644 --- a/br/pkg/lightning/backend/local/BUILD.bazel +++ b/br/pkg/lightning/backend/local/BUILD.bazel @@ -59,7 +59,6 @@ go_library( "//pkg/util/compress", "//pkg/util/engine", "//pkg/util/hack", - "//pkg/util/mathutil", "//pkg/util/ranger", "@com_github_cockroachdb_pebble//:pebble", "@com_github_cockroachdb_pebble//sstable", diff --git a/br/pkg/lightning/backend/local/checksum.go b/br/pkg/lightning/backend/local/checksum.go index ef0f82d010a4d..e6c44d873e2d9 100644 --- a/br/pkg/lightning/backend/local/checksum.go +++ b/br/pkg/lightning/backend/local/checksum.go @@ -33,7 +33,6 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/sessionctx/variable" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tipb/go-tipb" tikvstore "github.com/tikv/client-go/v2/kv" "github.com/tikv/client-go/v2/oracle" @@ -332,7 +331,7 @@ func (e *TiKVChecksumManager) checksumDB(ctx context.Context, tableInfo *checkpo break } if distSQLScanConcurrency > MinDistSQLScanConcurrency { - distSQLScanConcurrency = mathutil.Max(distSQLScanConcurrency/2, MinDistSQLScanConcurrency) + distSQLScanConcurrency = max(distSQLScanConcurrency/2, MinDistSQLScanConcurrency) } } diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 88819fa7d5f8a..73de300bb1372 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -60,7 +60,6 @@ import ( "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/engine" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/tikv/client-go/v2/oracle" tikvclient "github.com/tikv/client-go/v2/tikv" pd "github.com/tikv/pd/client" @@ -458,7 +457,7 @@ func NewBackendConfig(cfg *config.Config, maxOpenFiles int, keyspaceName, resour } func (c *BackendConfig) adjust() { - c.MaxOpenFiles = mathutil.Max(c.MaxOpenFiles, openFilesLowerThreshold) + c.MaxOpenFiles = max(c.MaxOpenFiles, openFilesLowerThreshold) } // Backend is a local backend. diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index 9f4ca3bd3b59e..328f17445a3a9 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -41,7 +41,6 @@ import ( "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/util/codec" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/multierr" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -245,7 +244,7 @@ func (local *Backend) SplitAndScatterRegionByRanges( } var syncLock sync.Mutex - size := mathutil.Min(len(splitKeyMap), local.RegionSplitConcurrency) + size := min(len(splitKeyMap), local.RegionSplitConcurrency) ch := make(chan *splitInfo, size) eg, splitCtx := errgroup.WithContext(ctx) diff --git a/br/pkg/lightning/backend/local/region_job.go b/br/pkg/lightning/backend/local/region_job.go index fa67ce39e5a16..3d50fa32d5a20 100644 --- a/br/pkg/lightning/backend/local/region_job.go +++ b/br/pkg/lightning/backend/local/region_job.go @@ -37,7 +37,6 @@ import ( "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/util/codec" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/tikv/client-go/v2/util" "go.uber.org/zap" "google.golang.org/grpc" @@ -544,7 +543,7 @@ func (local *Backend) doIngest(ctx context.Context, j *regionJob) (*sst.IngestRe var resp *sst.IngestResponse for start := 0; start < len(j.writeResult.sstMeta); start += batch { - end := mathutil.Min(start+batch, len(j.writeResult.sstMeta)) + end := min(start+batch, len(j.writeResult.sstMeta)) ingestMetas := j.writeResult.sstMeta[start:end] log.FromContext(ctx).Debug("ingest meta", zap.Reflect("meta", ingestMetas)) diff --git a/br/pkg/lightning/checkpoints/BUILD.bazel b/br/pkg/lightning/checkpoints/BUILD.bazel index ad0e6e373d0f9..ed3c69ff04935 100644 --- a/br/pkg/lightning/checkpoints/BUILD.bazel +++ b/br/pkg/lightning/checkpoints/BUILD.bazel @@ -22,7 +22,6 @@ go_library( "//pkg/parser/model", "//pkg/types", "//pkg/util/chunk", - "//pkg/util/mathutil", "//pkg/util/sqlexec", "@com_github_joho_sqltocsv//:sqltocsv", "@com_github_pingcap_errors//:errors", diff --git a/br/pkg/lightning/checkpoints/checkpoints.go b/br/pkg/lightning/checkpoints/checkpoints.go index 4d0328a88ddc3..fd5f318f041c0 100644 --- a/br/pkg/lightning/checkpoints/checkpoints.go +++ b/br/pkg/lightning/checkpoints/checkpoints.go @@ -39,7 +39,6 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/version/build" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -544,7 +543,7 @@ type RebaseCheckpointMerger struct { // MergeInto implements TableCheckpointMerger.MergeInto. func (merger *RebaseCheckpointMerger) MergeInto(cpd *TableCheckpointDiff) { cpd.hasRebase = true - cpd.allocBase = mathutil.Max(cpd.allocBase, merger.AllocBase) + cpd.allocBase = max(cpd.allocBase, merger.AllocBase) } // DestroyedTableCheckpoint is the checkpoint for a table that has been diff --git a/br/pkg/lightning/config/BUILD.bazel b/br/pkg/lightning/config/BUILD.bazel index dbb7d17d6e317..9c54f5609fc93 100644 --- a/br/pkg/lightning/config/BUILD.bazel +++ b/br/pkg/lightning/config/BUILD.bazel @@ -18,7 +18,6 @@ go_library( "//pkg/config", "//pkg/parser/mysql", "//pkg/util", - "//pkg/util/mathutil", "//pkg/util/table-filter", "//pkg/util/table-router", "@com_github_burntsushi_toml//:toml", diff --git a/br/pkg/lightning/config/config.go b/br/pkg/lightning/config/config.go index 6d71f35795b92..db60aea6d7871 100644 --- a/br/pkg/lightning/config/config.go +++ b/br/pkg/lightning/config/config.go @@ -39,7 +39,6 @@ import ( tidbcfg "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/mathutil" filter "github.com/pingcap/tidb/pkg/util/table-filter" router "github.com/pingcap/tidb/pkg/util/table-router" "go.uber.org/atomic" @@ -1370,7 +1369,7 @@ func (c *Conflict) adjust(i *TikvImporter, l *Lightning) error { if c.MaxRecordRows < 0 { maxErr := l.MaxError // Compatible with the old behavior that records all syntax,charset,type errors. - maxAccepted := mathutil.Max(maxErr.Syntax.Load(), maxErr.Charset.Load(), maxErr.Type.Load()) + maxAccepted := max(maxErr.Syntax.Load(), maxErr.Charset.Load(), maxErr.Type.Load()) if maxAccepted < defaultMaxRecordRows { maxAccepted = defaultMaxRecordRows } diff --git a/br/pkg/lightning/importer/BUILD.bazel b/br/pkg/lightning/importer/BUILD.bazel index e8d2987c5ff0f..d9562bf523b44 100644 --- a/br/pkg/lightning/importer/BUILD.bazel +++ b/br/pkg/lightning/importer/BUILD.bazel @@ -72,7 +72,6 @@ go_library( "//pkg/util/dbterror", "//pkg/util/engine", "//pkg/util/extsort", - "//pkg/util/mathutil", "//pkg/util/mock", "//pkg/util/regexpr-router", "//pkg/util/set", diff --git a/br/pkg/lightning/importer/import.go b/br/pkg/lightning/importer/import.go index ea5a25458529c..a87bf13812ee5 100644 --- a/br/pkg/lightning/importer/import.go +++ b/br/pkg/lightning/importer/import.go @@ -60,7 +60,6 @@ import ( "github.com/pingcap/tidb/pkg/store/driver" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/collate" - "github.com/pingcap/tidb/pkg/util/mathutil" regexprrouter "github.com/pingcap/tidb/pkg/util/regexpr-router" "github.com/pingcap/tidb/pkg/util/set" "github.com/prometheus/client_golang/prometheus" @@ -844,7 +843,7 @@ func (rc *Controller) restoreSchema(ctx context.Context) error { // we can handle the duplicated created with createIfNotExist statement // and we will check the schema in TiDB is valid with the datafile in DataCheck later. logTask := log.FromContext(ctx).Begin(zap.InfoLevel, "restore all schema") - concurrency := mathutil.Min(rc.cfg.App.RegionConcurrency, 8) + concurrency := min(rc.cfg.App.RegionConcurrency, 8) childCtx, cancel := context.WithCancel(ctx) p := parser.New() p.SetSQLMode(rc.cfg.TiDB.SQLMode) diff --git a/br/pkg/lightning/importer/precheck_impl.go b/br/pkg/lightning/importer/precheck_impl.go index 380f7b7997797..8ca5ba560d39c 100644 --- a/br/pkg/lightning/importer/precheck_impl.go +++ b/br/pkg/lightning/importer/precheck_impl.go @@ -45,7 +45,6 @@ import ( "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/engine" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/set" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" @@ -280,8 +279,8 @@ func (ci *emptyRegionCheckItem) Check(ctx context.Context) (*precheck.CheckResul } tableCount += len(info.Tables) } - errorThrehold := mathutil.Max(errorEmptyRegionCntPerStore, tableCount*3) - warnThrehold := mathutil.Max(warnEmptyRegionCntPerStore, tableCount) + errorThrehold := max(errorEmptyRegionCntPerStore, tableCount*3) + warnThrehold := max(warnEmptyRegionCntPerStore, tableCount) var ( errStores []string warnStores []string @@ -380,7 +379,7 @@ func (ci *regionDistributionCheckItem) Check(ctx context.Context) (*precheck.Che } tableCount += len(info.Tables) } - threhold := mathutil.Max(checkRegionCntRatioThreshold, tableCount) + threhold := max(checkRegionCntRatioThreshold, tableCount) if maxStore.Status.RegionCount <= threhold { return theResult, nil } @@ -1350,7 +1349,7 @@ func (ci *tableEmptyCheckItem) Check(ctx context.Context) (*precheck.CheckResult var lock sync.Mutex tableNames := make([]string, 0) - concurrency := mathutil.Min(tableCount, ci.cfg.App.RegionConcurrency) + concurrency := min(tableCount, ci.cfg.App.RegionConcurrency) type tableNameComponents struct { DBName string TableName string diff --git a/br/pkg/lightning/importer/table_import.go b/br/pkg/lightning/importer/table_import.go index 953d5cd8f0abb..d6ffc1aba76bb 100644 --- a/br/pkg/lightning/importer/table_import.go +++ b/br/pkg/lightning/importer/table_import.go @@ -52,7 +52,6 @@ import ( "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/extsort" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/multierr" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -182,12 +181,12 @@ func (tr *TableImporter) importTable( // rebase the allocator so it exceeds the number of rows. if tr.tableInfo.Core.ContainsAutoRandomBits() { - cp.AllocBase = mathutil.Max(cp.AllocBase, tr.tableInfo.Core.AutoRandID) + cp.AllocBase = max(cp.AllocBase, tr.tableInfo.Core.AutoRandID) if err := tr.alloc.Get(autoid.AutoRandomType).Rebase(context.Background(), cp.AllocBase, false); err != nil { return false, err } } else { - cp.AllocBase = mathutil.Max(cp.AllocBase, tr.tableInfo.Core.AutoIncID) + cp.AllocBase = max(cp.AllocBase, tr.tableInfo.Core.AutoIncID) if err := tr.alloc.Get(autoid.RowIDAllocType).Rebase(context.Background(), cp.AllocBase, false); err != nil { return false, err } @@ -1226,7 +1225,7 @@ func (tr *TableImporter) importKV( regionSplitSize = int64(config.SplitRegionSize) if err := rc.taskMgr.CheckTasksExclusively(ctx, func(tasks []taskMeta) ([]taskMeta, error) { if len(tasks) > 0 { - regionSplitSize = int64(config.SplitRegionSize) * int64(mathutil.Min(len(tasks), config.MaxSplitRegionSizeRatio)) + regionSplitSize = int64(config.SplitRegionSize) * int64(min(len(tasks), config.MaxSplitRegionSizeRatio)) } return nil, nil }); err != nil { diff --git a/br/pkg/lightning/mydump/BUILD.bazel b/br/pkg/lightning/mydump/BUILD.bazel index 92f1971fb8ab0..10d112fab0562 100644 --- a/br/pkg/lightning/mydump/BUILD.bazel +++ b/br/pkg/lightning/mydump/BUILD.bazel @@ -27,7 +27,6 @@ go_library( "//pkg/parser/mysql", "//pkg/types", "//pkg/util/filter", - "//pkg/util/mathutil", "//pkg/util/regexpr-router", "//pkg/util/slice", "//pkg/util/table-filter", diff --git a/br/pkg/lightning/mydump/csv_parser.go b/br/pkg/lightning/mydump/csv_parser.go index 5d202689a0884..5ddc816bccb1e 100644 --- a/br/pkg/lightning/mydump/csv_parser.go +++ b/br/pkg/lightning/mydump/csv_parser.go @@ -29,7 +29,6 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/worker" tidbconfig "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) var ( @@ -267,7 +266,7 @@ func (parser *CSVParser) peekBytes(cnt int) ([]byte, error) { if len(parser.buf) == 0 { return nil, io.EOF } - cnt = mathutil.Min(cnt, len(parser.buf)) + cnt = min(cnt, len(parser.buf)) return parser.buf[:cnt], nil } diff --git a/br/pkg/lightning/mydump/region.go b/br/pkg/lightning/mydump/region.go index 0e24530f2364e..2ec8aa2db0753 100644 --- a/br/pkg/lightning/mydump/region.go +++ b/br/pkg/lightning/mydump/region.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/worker" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) @@ -218,7 +217,7 @@ func MakeTableRegions( start := time.Now() - concurrency := mathutil.Max(cfg.Concurrency, 2) + concurrency := max(cfg.Concurrency, 2) var fileRegionsMap sync.Map eg, egCtx := errgroup.WithContext(ctx) diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index a8e8d6a5135ea..5b1a1503bf829 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -59,7 +59,6 @@ import ( "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/sqlexec" filter "github.com/pingcap/tidb/pkg/util/table-filter" "github.com/tikv/client-go/v2/oracle" @@ -1031,7 +1030,7 @@ func (rc *Client) createTablesInWorkerPool(ctx context.Context, dom *domain.Doma numOfTables := len(tables) for lastSent := 0; lastSent < numOfTables; lastSent += int(rc.batchDdlSize) { - end := mathutil.Min(lastSent+int(rc.batchDdlSize), len(tables)) + end := min(lastSent+int(rc.batchDdlSize), len(tables)) log.Info("create tables", zap.Int("table start", lastSent), zap.Int("table end", end)) tableSlice := tables[lastSent:end] @@ -3003,8 +3002,8 @@ func (rc *Client) RestoreMetaKVFilesWithBatchMethod( batchSize = f.Length } else { if f.MinTs <= rangeMax && batchSize+f.Length <= MetaKVBatchSize { - rangeMin = mathutil.Min(rangeMin, f.MinTs) - rangeMax = mathutil.Max(rangeMax, f.MaxTs) + rangeMin = min(rangeMin, f.MinTs) + rangeMax = max(rangeMax, f.MaxTs) batchSize += f.Length } else { // Either f.MinTS > rangeMax or f.MinTs is the filterTs we need. @@ -3606,7 +3605,7 @@ func (rc *Client) ResetTiFlashReplicas(ctx context.Context, g glue.Glue, storage for _, s := range allSchema { for _, t := range s.Tables { if t.TiFlashReplica != nil { - expectTiFlashStoreCount = mathutil.Max(expectTiFlashStoreCount, t.TiFlashReplica.Count) + expectTiFlashStoreCount = max(expectTiFlashStoreCount, t.TiFlashReplica.Count) recorder.AddTable(t.ID, *t.TiFlashReplica) needTiFlash = true } diff --git a/br/pkg/restore/data.go b/br/pkg/restore/data.go index b8c8459df9a6b..48f015d80a85e 100644 --- a/br/pkg/restore/data.go +++ b/br/pkg/restore/data.go @@ -17,7 +17,6 @@ import ( "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/utils/storewatch" "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/util/mathutil" tikvstore "github.com/tikv/client-go/v2/kv" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/txnkv/rangetask" @@ -226,7 +225,7 @@ func getStoreAddress(allStores []*metapb.Store, storeId uint64) string { func (recovery *Recovery) ReadRegionMeta(ctx context.Context) error { eg, ectx := errgroup.WithContext(ctx) totalStores := len(recovery.allStores) - workers := utils.NewWorkerPool(uint(mathutil.Min(totalStores, common.MaxStoreConcurrency)), "Collect Region Meta") // TODO: int overflow? + workers := utils.NewWorkerPool(uint(min(totalStores, common.MaxStoreConcurrency)), "Collect Region Meta") // TODO: int overflow? // TODO: optimize the ErroGroup when TiKV is panic metaChan := make(chan StoreMeta, 1024) @@ -339,7 +338,7 @@ func (recovery *Recovery) RecoverRegionOfStore(ctx context.Context, storeID uint func (recovery *Recovery) RecoverRegions(ctx context.Context) (err error) { eg, ectx := errgroup.WithContext(ctx) totalRecoveredStores := len(recovery.RecoveryPlan) - workers := utils.NewWorkerPool(uint(mathutil.Min(totalRecoveredStores, common.MaxStoreConcurrency)), "Recover Regions") + workers := utils.NewWorkerPool(uint(min(totalRecoveredStores, common.MaxStoreConcurrency)), "Recover Regions") for storeId, plan := range recovery.RecoveryPlan { if err := ectx.Err(); err != nil { @@ -403,7 +402,7 @@ func (recovery *Recovery) SpawnTiKVShutDownWatchers(ctx context.Context) { func (recovery *Recovery) WaitApply(ctx context.Context) (err error) { eg, ectx := errgroup.WithContext(ctx) totalStores := len(recovery.allStores) - workers := utils.NewWorkerPool(uint(mathutil.Min(totalStores, common.MaxStoreConcurrency)), "wait apply") + workers := utils.NewWorkerPool(uint(min(totalStores, common.MaxStoreConcurrency)), "wait apply") for _, store := range recovery.allStores { if err := ectx.Err(); err != nil { @@ -513,9 +512,9 @@ func (recovery *Recovery) MakeRecoveryPlan() error { regions[m.RegionId] = make([]*RecoverRegion, 0, len(recovery.allStores)) } regions[m.RegionId] = append(regions[m.RegionId], &RecoverRegion{m, storeId}) - maxId = mathutil.Max(maxId, mathutil.Max(m.RegionId, m.PeerId)) + maxId = max(maxId, max(m.RegionId, m.PeerId)) } - recovery.MaxAllocID = mathutil.Max(recovery.MaxAllocID, maxId) + recovery.MaxAllocID = max(recovery.MaxAllocID, maxId) } regionInfos := SortRecoverRegions(regions) diff --git a/br/pkg/streamhelper/BUILD.bazel b/br/pkg/streamhelper/BUILD.bazel index e1eb7adfb95ba..7e57de9888316 100644 --- a/br/pkg/streamhelper/BUILD.bazel +++ b/br/pkg/streamhelper/BUILD.bazel @@ -29,7 +29,6 @@ go_library( "//pkg/owner", "//pkg/util/codec", "//pkg/util/engine", - "//pkg/util/mathutil", "@com_github_gogo_protobuf//proto", "@com_github_golang_protobuf//proto", "@com_github_google_uuid//:uuid", diff --git a/br/pkg/streamhelper/client.go b/br/pkg/streamhelper/client.go index 7fdd49f79e5f2..ee4fb2fe02d2d 100644 --- a/br/pkg/streamhelper/client.go +++ b/br/pkg/streamhelper/client.go @@ -15,7 +15,6 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/redact" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/util/mathutil" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" ) @@ -368,7 +367,7 @@ func (t *Task) GetStorageCheckpoint(ctx context.Context) (uint64, error) { redact.Key(kv.Value)) } ts := binary.BigEndian.Uint64(kv.Value) - storageCheckpoint = mathutil.Max(storageCheckpoint, ts) + storageCheckpoint = max(storageCheckpoint, ts) } return storageCheckpoint, nil @@ -399,7 +398,7 @@ func (t *Task) GetGlobalCheckPointTS(ctx context.Context) (uint64, error) { return 0, errors.Trace(err) } - return mathutil.Max(checkpoint, ts), nil + return max(checkpoint, ts), nil } func (t *Task) UploadGlobalCheckpoint(ctx context.Context, ts uint64) error { diff --git a/br/pkg/task/backup.go b/br/pkg/task/backup.go index dc2a94c5e115a..497791b315d42 100644 --- a/br/pkg/task/backup.go +++ b/br/pkg/task/backup.go @@ -38,7 +38,6 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics/handle" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/spf13/pflag" "github.com/tikv/client-go/v2/oracle" kvutil "github.com/tikv/client-go/v2/util" @@ -722,7 +721,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig } } updateCh = g.StartProgress(ctx, "Checksum", checksumProgress, !cfg.LogProgress) - schemasConcurrency := uint(mathutil.Min(backup.DefaultSchemaConcurrency, schemas.Len())) + schemasConcurrency := uint(min(backup.DefaultSchemaConcurrency, schemas.Len())) err = schemas.BackupSchemas( ctx, metawriter, client.GetCheckpointRunner(), mgr.GetStorage(), statsHandle, backupTS, schemasConcurrency, cfg.ChecksumConcurrency, skipChecksum, updateCh) diff --git a/br/pkg/task/backup_ebs.go b/br/pkg/task/backup_ebs.go index e9f59f6d35a8e..982d732b714be 100644 --- a/br/pkg/task/backup_ebs.go +++ b/br/pkg/task/backup_ebs.go @@ -33,7 +33,6 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/spf13/pflag" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -346,7 +345,7 @@ func isRegionsHasHole(allRegions []*metapb.Region) bool { } func waitUntilAllScheduleStopped(ctx context.Context, cfg Config, allStores []*metapb.Store, mgr *conn.Mgr) ([]*metapb.Region, error) { - concurrency := mathutil.Min(len(allStores), common.MaxStoreConcurrency) + concurrency := min(len(allStores), common.MaxStoreConcurrency) workerPool := utils.NewWorkerPool(uint(concurrency), "collect schedule info") eg, ectx := errgroup.WithContext(ctx) diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index 3c3133edbd1b3..6fa3e243bc3ec 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -52,7 +52,6 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/spf13/pflag" "github.com/tikv/client-go/v2/config" @@ -1575,14 +1574,14 @@ func getLogRangeWithStorage( if err != nil { return backupLogInfo{}, errors.Trace(err) } - logMinTS := mathutil.Max(logStartTS, truncateTS) + logMinTS := max(logStartTS, truncateTS) // get max global resolved ts from metas. logMaxTS, err := getGlobalCheckpointFromStorage(ctx, s) if err != nil { return backupLogInfo{}, errors.Trace(err) } - logMaxTS = mathutil.Max(logMinTS, logMaxTS) + logMaxTS = max(logMinTS, logMaxTS) return backupLogInfo{ logMaxTS: logMaxTS, @@ -1604,7 +1603,7 @@ func getGlobalCheckpointFromStorage(ctx context.Context, s storage.ExternalStora return errors.Trace(err) } ts := binary.LittleEndian.Uint64(buff) - globalCheckPointTS = mathutil.Max(ts, globalCheckPointTS) + globalCheckPointTS = max(ts, globalCheckPointTS) return nil }) return globalCheckPointTS, errors.Trace(err) diff --git a/br/pkg/utils/BUILD.bazel b/br/pkg/utils/BUILD.bazel index ce3e3d89a9f78..395ce00da36d9 100644 --- a/br/pkg/utils/BUILD.bazel +++ b/br/pkg/utils/BUILD.bazel @@ -42,7 +42,6 @@ go_library( "//pkg/util/engine", "//pkg/util/sqlexec", "@com_github_cheggaaa_pb_v3//:pb", - "@com_github_cznic_mathutil//:mathutil", "@com_github_docker_go_units//:go-units", "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", diff --git a/br/pkg/utils/retry.go b/br/pkg/utils/retry.go index 130a1402ec149..bdf277aa876de 100644 --- a/br/pkg/utils/retry.go +++ b/br/pkg/utils/retry.go @@ -8,7 +8,6 @@ import ( "sync" "time" - "github.com/cznic/mathutil" "github.com/pingcap/errors" tmysql "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/parser/terror" @@ -171,7 +170,7 @@ func (r *RetryWithBackoffer) BackOff() error { // That intent will be fulfilled when calling `BackOff`. func (r *RetryWithBackoffer) RequestBackOff(ms int) { r.mu.Lock() - r.nextBackoff = mathutil.Max(r.nextBackoff, ms) + r.nextBackoff = max(r.nextBackoff, ms) r.mu.Unlock() } diff --git a/pkg/executor/BUILD.bazel b/pkg/executor/BUILD.bazel index 3a560ca8ceab1..323ea2b75467f 100644 --- a/pkg/executor/BUILD.bazel +++ b/pkg/executor/BUILD.bazel @@ -438,7 +438,6 @@ go_test( "//pkg/util/globalconn", "//pkg/util/hack", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/mock", "//pkg/util/paging", diff --git a/pkg/executor/adapter.go b/pkg/executor/adapter.go index 100cb2cffaf57..94e6914518008 100644 --- a/pkg/executor/adapter.go +++ b/pkg/executor/adapter.go @@ -59,7 +59,6 @@ import ( "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/pingcap/tidb/pkg/util/replayer" @@ -851,7 +850,7 @@ func (c *chunkRowRecordSet) Fields() []*ast.ResultField { func (c *chunkRowRecordSet) Next(_ context.Context, chk *chunk.Chunk) error { chk.Reset() if !chk.IsFull() && c.idx < len(c.rows) { - numToAppend := mathutil.Min(len(c.rows)-c.idx, chk.RequiredRows()-chk.NumRows()) + numToAppend := min(len(c.rows)-c.idx, chk.RequiredRows()-chk.NumRows()) chk.AppendRows(c.rows[c.idx : c.idx+numToAppend]) c.idx += numToAppend } diff --git a/pkg/executor/aggfuncs/BUILD.bazel b/pkg/executor/aggfuncs/BUILD.bazel index 89eaa4eced8b9..527a02b2fcf5f 100644 --- a/pkg/executor/aggfuncs/BUILD.bazel +++ b/pkg/executor/aggfuncs/BUILD.bazel @@ -46,7 +46,6 @@ go_library( "//pkg/util/collate", "//pkg/util/hack", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/selection", "//pkg/util/set", "//pkg/util/stringutil", diff --git a/pkg/executor/aggfuncs/func_count_distinct.go b/pkg/executor/aggfuncs/func_count_distinct.go index d33064b07d9a0..f3b549bdb6250 100644 --- a/pkg/executor/aggfuncs/func_count_distinct.go +++ b/pkg/executor/aggfuncs/func_count_distinct.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/hack" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/set" "github.com/pingcap/tidb/pkg/util/stringutil" ) @@ -590,7 +589,7 @@ func (p *partialResult4ApproxCountDistinct) readAndMerge(rb []byte) error { } if p.bufSize() < uint32(rhsSize) { - newSizeDegree := mathutil.Max(uniquesHashSetInitialSizeDegree, uint8(math.Log2(float64(rhsSize-1)))+2) + newSizeDegree := max(uniquesHashSetInitialSizeDegree, uint8(math.Log2(float64(rhsSize-1)))+2) p.resize(newSizeDegree) } diff --git a/pkg/executor/aggregate/BUILD.bazel b/pkg/executor/aggregate/BUILD.bazel index 8eb96d7e058cc..a60265d2b2a1e 100644 --- a/pkg/executor/aggregate/BUILD.bazel +++ b/pkg/executor/aggregate/BUILD.bazel @@ -31,7 +31,6 @@ go_library( "//pkg/util/execdetails", "//pkg/util/hack", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/set", "@com_github_pingcap_errors//:errors", diff --git a/pkg/executor/aggregate/agg_util.go b/pkg/executor/aggregate/agg_util.go index 95bd88b779022..fadc4cdb61c6d 100644 --- a/pkg/executor/aggregate/agg_util.go +++ b/pkg/executor/aggregate/agg_util.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -77,7 +76,7 @@ func getGroupKeyMemUsage(groupKey [][]byte) int64 { // GetGroupKey evaluates the group items and args of aggregate functions. func GetGroupKey(ctx sessionctx.Context, input *chunk.Chunk, groupKey [][]byte, groupByItems []expression.Expression) ([][]byte, error) { numRows := input.NumRows() - avlGroupKeyLen := mathutil.Min(len(groupKey), numRows) + avlGroupKeyLen := min(len(groupKey), numRows) for i := 0; i < avlGroupKeyLen; i++ { groupKey[i] = groupKey[i][:0] } diff --git a/pkg/executor/builder.go b/pkg/executor/builder.go index b20006c5f5267..8a697a9c5577d 100644 --- a/pkg/executor/builder.go +++ b/pkg/executor/builder.go @@ -72,7 +72,6 @@ import ( "github.com/pingcap/tidb/pkg/util/cteutil" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/execdetails" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/ranger" "github.com/pingcap/tidb/pkg/util/rowcodec" @@ -790,7 +789,7 @@ func (b *executorBuilder) buildLimit(v *plannercore.PhysicalLimit) exec.Executor if b.err != nil { return nil } - n := int(mathutil.Min(v.Count, uint64(b.ctx.GetSessionVars().MaxChunkSize))) + n := int(min(v.Count, uint64(b.ctx.GetSessionVars().MaxChunkSize))) base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) base.SetInitCap(n) e := &LimitExec{ @@ -1224,7 +1223,7 @@ func (b *executorBuilder) setTelemetryInfo(v *plannercore.DDL) { if b.Ti.PartitionTelemetry == nil { b.Ti.PartitionTelemetry = &PartitionTelemetryInfo{} } - b.Ti.PartitionTelemetry.TablePartitionMaxPartitionsNum = mathutil.Max(p.Num, uint64(len(p.Definitions))) + b.Ti.PartitionTelemetry.TablePartitionMaxPartitionsNum = max(p.Num, uint64(len(p.Definitions))) b.Ti.PartitionTelemetry.UseTablePartition = true switch p.Tp { diff --git a/pkg/executor/distsql.go b/pkg/executor/distsql.go index 90c2566880d9f..9f481556b128f 100644 --- a/pkg/executor/distsql.go +++ b/pkg/executor/distsql.go @@ -54,7 +54,6 @@ import ( "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/logutil/consistency" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/ranger" "github.com/pingcap/tipb/go-tipb" @@ -744,7 +743,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< } results = append(results, result) } - worker.batchSize = mathutil.Min(initBatchSize, worker.maxBatchSize) + worker.batchSize = min(initBatchSize, worker.maxBatchSize) if len(results) > 1 && len(e.byItems) != 0 { // e.Schema() not the output schema for indexReader, and we put byItems related column at first in `buildIndexReq`, so use nil here. ssr := distsql.NewSortedSelectResults(results, nil, e.byItems, e.memTracker) @@ -873,7 +872,7 @@ func (e *IndexLookUpExecutor) Next(ctx context.Context, req *chunk.Chunk) error return nil } if resultTask.cursor < len(resultTask.rows) { - numToAppend := mathutil.Min(len(resultTask.rows)-resultTask.cursor, req.RequiredRows()-req.NumRows()) + numToAppend := min(len(resultTask.rows)-resultTask.cursor, req.RequiredRows()-req.NumRows()) req.AppendRows(resultTask.rows[resultTask.cursor : resultTask.cursor+numToAppend]) resultTask.cursor += numToAppend if req.IsFull() { diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index d94a7b924fed4..fca75d706f40d 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -72,7 +72,6 @@ import ( "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/logutil/consistency" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/resourcegrouptag" "github.com/pingcap/tidb/pkg/util/sqlexec" @@ -246,7 +245,7 @@ func (e *CommandDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error { if e.cursor >= len(e.jobIDs) { return nil } - numCurBatch := mathutil.Min(req.Capacity(), len(e.jobIDs)-e.cursor) + numCurBatch := min(req.Capacity(), len(e.jobIDs)-e.cursor) for i := e.cursor; i < e.cursor+numCurBatch; i++ { req.AppendString(0, strconv.FormatInt(e.jobIDs[i], 10)) if e.errs != nil && e.errs[i] != nil { @@ -621,7 +620,7 @@ func (e *ShowDDLJobQueriesExec) Next(_ context.Context, req *chunk.Chunk) error if len(e.jobIDs) >= len(e.jobs) { return nil } - numCurBatch := mathutil.Min(req.Capacity(), len(e.jobs)-e.cursor) + numCurBatch := min(req.Capacity(), len(e.jobs)-e.cursor) for _, id := range e.jobIDs { for i := e.cursor; i < e.cursor+numCurBatch; i++ { if id == e.jobs[i].ID { @@ -713,7 +712,7 @@ func (e *ShowDDLJobQueriesWithRangeExec) Next(_ context.Context, req *chunk.Chun if int(e.offset) > len(e.jobs) { return nil } - numCurBatch := mathutil.Min(req.Capacity(), len(e.jobs)-e.cursor) + numCurBatch := min(req.Capacity(), len(e.jobs)-e.cursor) for i := e.cursor; i < e.cursor+numCurBatch; i++ { // i is make true to be >= int(e.offset) if i >= int(e.offset+e.limit) { @@ -763,7 +762,7 @@ func (e *ShowDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error { // Append running ddl jobs. if e.cursor < len(e.runningJobs) { - numCurBatch := mathutil.Min(req.Capacity(), len(e.runningJobs)-e.cursor) + numCurBatch := min(req.Capacity(), len(e.runningJobs)-e.cursor) for i := e.cursor; i < e.cursor+numCurBatch; i++ { e.appendJobToChunk(req, e.runningJobs[i], nil) } @@ -776,7 +775,7 @@ func (e *ShowDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error { if count < req.Capacity() { num := req.Capacity() - count remainNum := e.jobNumber - (e.cursor - len(e.runningJobs)) - num = mathutil.Min(num, remainNum) + num = min(num, remainNum) e.cacheJobs, err = e.historyJobIter.GetLastJobs(num, e.cacheJobs) if err != nil { return err @@ -942,7 +941,7 @@ func (e *CheckTableExec) Next(ctx context.Context, _ *chunk.Chunk) error { } taskCh := make(chan *IndexLookUpExecutor, len(e.srcs)) failure := atomicutil.NewBool(false) - concurrency := mathutil.Min(3, len(e.srcs)) + concurrency := min(3, len(e.srcs)) var wg util.WaitGroupWrapper for _, src := range e.srcs { taskCh <- src @@ -1422,7 +1421,7 @@ func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk { limitRequired = chk.RequiredRows() } - return chk.SetRequiredRows(mathutil.Min(limitTotal, limitRequired), e.MaxChunkSize()) + return chk.SetRequiredRows(min(limitTotal, limitRequired), e.MaxChunkSize()) } func init() { diff --git a/pkg/executor/executor_required_rows_test.go b/pkg/executor/executor_required_rows_test.go index fd836f9cfa007..a22f15e2494c6 100644 --- a/pkg/executor/executor_required_rows_test.go +++ b/pkg/executor/executor_required_rows_test.go @@ -35,7 +35,6 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/disk" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" @@ -90,7 +89,7 @@ func (r *requiredRowsDataSource) Next(ctx context.Context, req *chunk.Chunk) err if r.count > r.totalRows { return nil } - required := mathutil.Min(req.RequiredRows(), r.totalRows-r.count) + required := min(req.RequiredRows(), r.totalRows-r.count) for i := 0; i < required; i++ { req.AppendRow(r.genOneRow()) } @@ -195,7 +194,7 @@ func TestLimitRequiredRows(t *testing.T) { } func buildLimitExec(ctx sessionctx.Context, src exec.Executor, offset, count int) exec.Executor { - n := mathutil.Min(count, ctx.GetSessionVars().MaxChunkSize) + n := min(count, ctx.GetSessionVars().MaxChunkSize) base := exec.NewBaseExecutor(ctx, src.Schema(), 0, src) base.SetInitCap(n) limitExec := &LimitExec{ diff --git a/pkg/executor/explain.go b/pkg/executor/explain.go index 0946874bd0773..610368a8852cd 100644 --- a/pkg/executor/explain.go +++ b/pkg/executor/explain.go @@ -85,7 +85,7 @@ func (e *ExplainExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } - numCurRows := mathutil.Min(req.Capacity(), len(e.rows)-e.cursor) + numCurRows := min(req.Capacity(), len(e.rows)-e.cursor) for i := e.cursor; i < e.cursor+numCurRows; i++ { for j := range e.rows[i] { req.AppendString(j, e.rows[i][j]) diff --git a/pkg/executor/index_merge_reader.go b/pkg/executor/index_merge_reader.go index aaac0ae06f062..4d3a7c2f3065c 100644 --- a/pkg/executor/index_merge_reader.go +++ b/pkg/executor/index_merge_reader.go @@ -48,7 +48,6 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/ranger" "github.com/pingcap/tipb/go-tipb" @@ -429,7 +428,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, results = append(results, result) failpoint.Inject("testIndexMergePartialIndexWorkerCoprLeak", nil) } - worker.batchSize = mathutil.Min(e.MaxChunkSize(), worker.maxBatchSize) + worker.batchSize = min(e.MaxChunkSize(), worker.maxBatchSize) if len(results) > 1 && len(e.byItems) != 0 { // e.Schema() not the output schema for partialIndexReader, and we put byItems related column at first in `buildIndexReq`, so use nil here. ssr := distsql.NewSortedSelectResults(results, nil, e.byItems, e.memTracker) @@ -679,7 +678,7 @@ func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk. if w.pushedLimit.Offset+w.pushedLimit.Count <= w.scannedKeys { return handles, retChk, nil } - requiredRows = mathutil.Min(int(w.pushedLimit.Offset+w.pushedLimit.Count-w.scannedKeys), requiredRows) + requiredRows = min(int(w.pushedLimit.Offset+w.pushedLimit.Count-w.scannedKeys), requiredRows) } chk.SetRequiredRows(requiredRows, w.maxChunkSize) start := time.Now() @@ -825,7 +824,7 @@ func (e *IndexMergeReaderExecutor) Next(ctx context.Context, req *chunk.Chunk) e return nil } if resultTask.cursor < len(resultTask.rows) { - numToAppend := mathutil.Min(len(resultTask.rows)-resultTask.cursor, e.MaxChunkSize()-req.NumRows()) + numToAppend := min(len(resultTask.rows)-resultTask.cursor, e.MaxChunkSize()-req.NumRows()) req.AppendRows(resultTask.rows[resultTask.cursor : resultTask.cursor+numToAppend]) resultTask.cursor += numToAppend if req.NumRows() >= e.MaxChunkSize() { @@ -997,7 +996,7 @@ func (w *indexMergeProcessWorker) NewHandleHeap(taskMap map[int][]*indexMergeTab requiredCnt := uint64(0) if w.indexMerge.pushedLimit != nil { - requiredCnt = mathutil.Max(requiredCnt, w.indexMerge.pushedLimit.Count+w.indexMerge.pushedLimit.Offset) + requiredCnt = max(requiredCnt, w.indexMerge.pushedLimit.Count+w.indexMerge.pushedLimit.Offset) } return &handleHeap{ requiredCnt: requiredCnt, @@ -1092,7 +1091,7 @@ func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderBy(ctx context.Context, needCount := taskHeap.Len() if w.indexMerge.pushedLimit != nil { - needCount = mathutil.Max(0, taskHeap.Len()-int(w.indexMerge.pushedLimit.Offset)) + needCount = max(0, taskHeap.Len()-int(w.indexMerge.pushedLimit.Offset)) } if needCount == 0 { return @@ -1106,7 +1105,7 @@ func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderBy(ctx context.Context, batchSize := w.indexMerge.Ctx().GetSessionVars().IndexLookupSize tasks := make([]*indexMergeTableTask, 0, len(fhs)/batchSize+1) for len(fhs) > 0 { - l := mathutil.Min(len(fhs), batchSize) + l := min(len(fhs), batchSize) // Save the index order. indexOrder := kv.NewHandleMap() for i, h := range fhs[:l] { @@ -1158,7 +1157,7 @@ func pushedLimitCountingDown(pushedLimit *plannercore.PushedDownLimit, handles [ if fhsLen > pushedLimit.Count { handles = handles[:pushedLimit.Count] } - pushedLimit.Count -= mathutil.Min(pushedLimit.Count, fhsLen) + pushedLimit.Count -= min(pushedLimit.Count, fhsLen) return false, handles } @@ -1514,7 +1513,7 @@ func (w *indexMergeProcessWorker) fetchLoopIntersection(ctx context.Context, fet if w.indexMerge.partitionTableMode { partCnt = len(w.indexMerge.prunedPartitions) } - workerCnt := mathutil.Min(partCnt, maxWorkerCnt) + workerCnt := min(partCnt, maxWorkerCnt) failpoint.Inject("testIndexMergeIntersectionConcurrency", func(val failpoint.Value) { con := val.(int) if con != workerCnt { @@ -1749,7 +1748,7 @@ func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk. if w.pushedLimit.Offset+w.pushedLimit.Count <= w.scannedKeys { return handles, retChk, nil } - requiredRows = mathutil.Min(int(w.pushedLimit.Offset+w.pushedLimit.Count-w.scannedKeys), requiredRows) + requiredRows = min(int(w.pushedLimit.Offset+w.pushedLimit.Count-w.scannedKeys), requiredRows) } chk.SetRequiredRows(requiredRows, w.maxChunkSize) start := time.Now() diff --git a/pkg/executor/infoschema_reader.go b/pkg/executor/infoschema_reader.go index 449d41edb460f..d16f1b6620492 100644 --- a/pkg/executor/infoschema_reader.go +++ b/pkg/executor/infoschema_reader.go @@ -69,7 +69,6 @@ import ( "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/keydecoder" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/pingcap/tidb/pkg/util/resourcegrouptag" @@ -1280,7 +1279,7 @@ func (e *DDLJobsReaderExec) Next(_ context.Context, req *chunk.Chunk) error { // Append running DDL jobs. if e.cursor < len(e.runningJobs) { - num := mathutil.Min(req.Capacity(), len(e.runningJobs)-e.cursor) + num := min(req.Capacity(), len(e.runningJobs)-e.cursor) for i := e.cursor; i < e.cursor+num; i++ { e.appendJobToChunk(req, e.runningJobs[i], checker) req.AppendString(12, e.runningJobs[i].Query) @@ -2657,9 +2656,9 @@ func (r *dataLockWaitsTableRetriever) retrieve(ctx context.Context, sctx session // and resolving (optimistic lock "waiting") info // first we'll return the lockWaits, and then resolving, so we need to // do some index calculation here - lockWaitsStart := mathutil.Min(start, len(r.lockWaits)) + lockWaitsStart := min(start, len(r.lockWaits)) resolvingStart := start - lockWaitsStart - lockWaitsEnd := mathutil.Min(end, len(r.lockWaits)) + lockWaitsEnd := min(end, len(r.lockWaits)) resolvingEnd := end - lockWaitsEnd for rowIdx, lockWait := range r.lockWaits[lockWaitsStart:lockWaitsEnd] { row := make([]types.Datum, 0, len(r.columns)) diff --git a/pkg/executor/internal/calibrateresource/BUILD.bazel b/pkg/executor/internal/calibrateresource/BUILD.bazel index 8fde4e3a197de..40912760507ef 100644 --- a/pkg/executor/internal/calibrateresource/BUILD.bazel +++ b/pkg/executor/internal/calibrateresource/BUILD.bazel @@ -17,7 +17,6 @@ go_library( "//pkg/sessionctx/variable", "//pkg/sessiontxn/staleread", "//pkg/util/chunk", - "//pkg/util/mathutil", "//pkg/util/sqlexec", "@com_github_docker_go_units//:go-units", "@com_github_pingcap_errors//:errors", diff --git a/pkg/executor/internal/calibrateresource/calibrate_resource.go b/pkg/executor/internal/calibrateresource/calibrate_resource.go index e8b15e191b216..27083fc747b2a 100644 --- a/pkg/executor/internal/calibrateresource/calibrate_resource.go +++ b/pkg/executor/internal/calibrateresource/calibrate_resource.go @@ -35,7 +35,6 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/sessiontxn/staleread" "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/tikv/client-go/v2/oracle" resourceControlClient "github.com/tikv/pd/client/resource_group/controller" @@ -334,11 +333,11 @@ func (e *Executor) getTiDBQuota(ctx context.Context, exec sqlexec.RestrictedSQLE // If one of the two cpu usage is greater than the `valuableUsageThreshold`, we can accept it. // And if both are greater than the `lowUsageThreshold`, we can also accept it. if tikvQuota > valuableUsageThreshold || tidbQuota > valuableUsageThreshold { - quotas = append(quotas, rus.getValue()/mathutil.Max(tikvQuota, tidbQuota)) + quotas = append(quotas, rus.getValue()/max(tikvQuota, tidbQuota)) } else if tikvQuota < lowUsageThreshold || tidbQuota < lowUsageThreshold { lowCount++ } else { - quotas = append(quotas, rus.getValue()/mathutil.Max(tikvQuota, tidbQuota)) + quotas = append(quotas, rus.getValue()/max(tikvQuota, tidbQuota)) } rus.next() tidbCPUs.next() diff --git a/pkg/executor/pipelined_window.go b/pkg/executor/pipelined_window.go index 519762892b6b3..e63a4a59596d4 100644 --- a/pkg/executor/pipelined_window.go +++ b/pkg/executor/pipelined_window.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/mathutil" ) type dataInfo struct { @@ -260,7 +259,7 @@ func (e *PipelinedWindowExec) getStart(ctx sessionctx.Context) (uint64, error) { } if e.isRangeFrame { var start uint64 - for start = mathutil.Max(e.lastStartRow, e.stagedStartRow); start < e.rowCnt; start++ { + for start = max(e.lastStartRow, e.stagedStartRow); start < e.rowCnt; start++ { var res int64 var err error for i := range e.orderByCols { @@ -300,7 +299,7 @@ func (e *PipelinedWindowExec) getEnd(ctx sessionctx.Context) (uint64, error) { } if e.isRangeFrame { var end uint64 - for end = mathutil.Max(e.lastEndRow, e.stagedEndRow); end < e.rowCnt; end++ { + for end = max(e.lastEndRow, e.stagedEndRow); end < e.rowCnt; end++ { var res int64 var err error for i := range e.orderByCols { @@ -414,7 +413,7 @@ func (e *PipelinedWindowExec) produce(ctx sessionctx.Context, chk *chunk.Chunk, produced++ remained-- } - extend := mathutil.Min(e.curRowIdx, e.lastEndRow, e.lastStartRow) + extend := min(e.curRowIdx, e.lastEndRow, e.lastStartRow) if extend > e.rowStart { numDrop := extend - e.rowStart e.dropped += numDrop diff --git a/pkg/executor/show.go b/pkg/executor/show.go index 4202603a45c03..9875cfa117ced 100644 --- a/pkg/executor/show.go +++ b/pkg/executor/show.go @@ -75,7 +75,6 @@ import ( "github.com/pingcap/tidb/pkg/util/format" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/hint" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/sem" "github.com/pingcap/tidb/pkg/util/set" @@ -148,7 +147,7 @@ func (e *ShowExec) Next(ctx context.Context, req *chunk.Chunk) error { if e.cursor >= e.result.NumRows() { return nil } - numCurBatch := mathutil.Min(req.Capacity(), e.result.NumRows()-e.cursor) + numCurBatch := min(req.Capacity(), e.result.NumRows()-e.cursor) req.Append(e.result, e.cursor, e.cursor+numCurBatch) e.cursor += numCurBatch return nil diff --git a/pkg/executor/simple.go b/pkg/executor/simple.go index 19e52a9f3ae16..748abf88b17b4 100644 --- a/pkg/executor/simple.go +++ b/pkg/executor/simple.go @@ -60,7 +60,6 @@ import ( "github.com/pingcap/tidb/pkg/util/globalconn" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" pwdValidator "github.com/pingcap/tidb/pkg/util/password-validation" "github.com/pingcap/tidb/pkg/util/sem" "github.com/pingcap/tidb/pkg/util/sqlexec" @@ -873,22 +872,22 @@ func (info *passwordOrLockOptionsInfo) loadOptions(plOption []*ast.PasswordOrLoc case ast.Unlock: info.lockAccount = "N" case ast.FailedLoginAttempts: - info.failedLoginAttempts = mathutil.Min(option.Count, math.MaxInt16) + info.failedLoginAttempts = min(option.Count, math.MaxInt16) info.failedLoginAttemptsChange = true case ast.PasswordLockTime: - info.passwordLockTime = mathutil.Min(option.Count, math.MaxInt16) + info.passwordLockTime = min(option.Count, math.MaxInt16) info.passwordLockTimeChange = true case ast.PasswordLockTimeUnbounded: info.passwordLockTime = -1 info.passwordLockTimeChange = true case ast.PasswordHistory: - info.passwordHistory = mathutil.Min(option.Count, math.MaxUint16) + info.passwordHistory = min(option.Count, math.MaxUint16) info.passwordHistoryChange = true case ast.PasswordHistoryDefault: info.passwordHistory = notSpecified info.passwordHistoryChange = true case ast.PasswordReuseInterval: - info.passwordReuseInterval = mathutil.Min(option.Count, math.MaxUint16) + info.passwordReuseInterval = min(option.Count, math.MaxUint16) info.passwordReuseIntervalChange = true case ast.PasswordReuseDefault: info.passwordReuseInterval = notSpecified @@ -963,8 +962,8 @@ func readPasswordLockingInfo(ctx context.Context, sqlExecutor sqlexec.SQLExecuto if err != nil { return nil, err } - alterUserInfo.failedLoginAttempts = mathutil.Max(alterUserInfo.failedLoginAttempts, 0) - alterUserInfo.failedLoginAttempts = mathutil.Min(alterUserInfo.failedLoginAttempts, math.MaxInt16) + alterUserInfo.failedLoginAttempts = max(alterUserInfo.failedLoginAttempts, 0) + alterUserInfo.failedLoginAttempts = min(alterUserInfo.failedLoginAttempts, math.MaxInt16) } else { alterUserInfo.failedLoginAttemptsNotFound = true } @@ -977,8 +976,8 @@ func readPasswordLockingInfo(ctx context.Context, sqlExecutor sqlexec.SQLExecuto if err != nil { return nil, err } - alterUserInfo.passwordLockTime = mathutil.Max(alterUserInfo.passwordLockTime, -1) - alterUserInfo.passwordLockTime = mathutil.Min(alterUserInfo.passwordLockTime, math.MaxInt16) + alterUserInfo.passwordLockTime = max(alterUserInfo.passwordLockTime, -1) + alterUserInfo.passwordLockTime = min(alterUserInfo.passwordLockTime, math.MaxInt16) } else { alterUserInfo.passwordLockTimeChangeNotFound = true } diff --git a/pkg/executor/sort.go b/pkg/executor/sort.go index 793e6f2aa2545..d51d96b152a59 100644 --- a/pkg/executor/sort.go +++ b/pkg/executor/sort.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/disk" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" ) @@ -446,7 +445,7 @@ func (e *TopNExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } if !req.IsFull() { - numToAppend := mathutil.Min(len(e.rowPtrs)-e.Idx, req.RequiredRows()-req.NumRows()) + numToAppend := min(len(e.rowPtrs)-e.Idx, req.RequiredRows()-req.NumRows()) rows := make([]chunk.Row, numToAppend) for index := 0; index < numToAppend; index++ { rows[index] = e.rowChunks.GetRow(e.rowPtrs[e.Idx]) diff --git a/pkg/executor/split.go b/pkg/executor/split.go index afa1a93ee37ec..e2830894589a7 100644 --- a/pkg/executor/split.go +++ b/pkg/executor/split.go @@ -37,7 +37,6 @@ import ( "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" ) @@ -272,7 +271,7 @@ func getValuesList(lower, upper []byte, num int, valuesList [][]byte) [][]byte { // longestCommonPrefixLen gets the longest common prefix byte length. func longestCommonPrefixLen(s1, s2 []byte) int { - l := mathutil.Min(len(s1), len(s2)) + l := min(len(s1), len(s2)) i := 0 for ; i < l; i++ { if s1[i] != s2[i] { diff --git a/pkg/executor/table_readers_required_rows_test.go b/pkg/executor/table_readers_required_rows_test.go index fe313668c2ecf..a6f6b254ec85d 100644 --- a/pkg/executor/table_readers_required_rows_test.go +++ b/pkg/executor/table_readers_required_rows_test.go @@ -32,7 +32,6 @@ import ( "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" ) @@ -64,7 +63,7 @@ func (r *requiredRowsSelectResult) Next(ctx context.Context, chk *chunk.Chunk) e if r.count > r.totalRows { return nil } - required := mathutil.Min(chk.RequiredRows(), r.totalRows-r.count) + required := min(chk.RequiredRows(), r.totalRows-r.count) for i := 0; i < required; i++ { chk.AppendRow(r.genOneRow()) } diff --git a/pkg/executor/window.go b/pkg/executor/window.go index 93ba5f919cbfe..ec089fa977f18 100644 --- a/pkg/executor/window.go +++ b/pkg/executor/window.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // WindowExec is the executor for window functions. @@ -129,7 +128,7 @@ func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) { return nil } for i := 0; i < len(e.resultChunks); i++ { - remained := mathutil.Min(e.remainingRowsInChunk[i], remainingRowsInGroup) + remained := min(e.remainingRowsInChunk[i], remainingRowsInGroup) e.remainingRowsInChunk[i] -= remained remainingRowsInGroup -= remained diff --git a/pkg/planner/core/exhaust_physical_plans.go b/pkg/planner/core/exhaust_physical_plans.go index 718fd03864549..0df49021d55bb 100644 --- a/pkg/planner/core/exhaust_physical_plans.go +++ b/pkg/planner/core/exhaust_physical_plans.go @@ -40,7 +40,6 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/pingcap/tidb/pkg/util/ranger" "github.com/pingcap/tidb/pkg/util/set" @@ -1222,7 +1221,7 @@ func getColsNDVLowerBoundFromHistColl(colUIDs []int64, histColl *statistics.Hist if colStats == nil || !colStats.IsStatsInitialized() { continue } - maxNDV = mathutil.Max(maxNDV, colStats.NDV) + maxNDV = max(maxNDV, colStats.NDV) } return maxNDV } diff --git a/pkg/planner/core/logical_plan_builder.go b/pkg/planner/core/logical_plan_builder.go index 38f0d604d2a29..1f78b88be2ed3 100644 --- a/pkg/planner/core/logical_plan_builder.go +++ b/pkg/planner/core/logical_plan_builder.go @@ -62,7 +62,6 @@ import ( "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/pingcap/tidb/pkg/util/set" "github.com/pingcap/tidb/pkg/util/size" @@ -1948,12 +1947,12 @@ func unionJoinFieldType(a, b *types.FieldType) *types.FieldType { // ref2: https://github.com/pingcap/tidb/issues/24953 resultTp.AddFlag((a.GetFlag() & mysql.UnsignedFlag) & (b.GetFlag() & mysql.UnsignedFlag)) } - resultTp.SetDecimalUnderLimit(mathutil.Max(a.GetDecimal(), b.GetDecimal())) + resultTp.SetDecimalUnderLimit(max(a.GetDecimal(), b.GetDecimal())) // `flen - decimal` is the fraction before '.' if a.GetFlen() == -1 || b.GetFlen() == -1 { resultTp.SetFlenUnderLimit(-1) } else { - resultTp.SetFlenUnderLimit(mathutil.Max(a.GetFlen()-a.GetDecimal(), b.GetFlen()-b.GetDecimal()) + resultTp.GetDecimal()) + resultTp.SetFlenUnderLimit(max(a.GetFlen()-a.GetDecimal(), b.GetFlen()-b.GetDecimal()) + resultTp.GetDecimal()) } types.TryToFixFlenOfDatetime(resultTp) if resultTp.EvalType() != types.ETInt && (a.EvalType() == types.ETInt || b.EvalType() == types.ETInt) && resultTp.GetFlen() < mysql.MaxIntWidth { @@ -1977,7 +1976,7 @@ func (*PlanBuilder) setUnionFlen(resultTp *types.FieldType, cols []expression.Ex childTpCharLen = charsetInfo.Maxlen } } - resultTp.SetFlen(mathutil.Max(resultTp.GetFlen(), childTpCharLen*childTp.GetFlen())) + resultTp.SetFlen(max(resultTp.GetFlen(), childTpCharLen*childTp.GetFlen())) } } @@ -6896,7 +6895,7 @@ func restoreByItemText(item *ast.ByItem) string { } func compareItems(lItems []*ast.ByItem, rItems []*ast.ByItem) bool { - minLen := mathutil.Min(len(lItems), len(rItems)) + minLen := min(len(lItems), len(rItems)) for i := 0; i < minLen; i++ { res := strings.Compare(restoreByItemText(lItems[i]), restoreByItemText(rItems[i])) if res != 0 { diff --git a/pkg/planner/core/memtable_predicate_extractor.go b/pkg/planner/core/memtable_predicate_extractor.go index cd8d412ddd3a2..fb33a510e7bdc 100644 --- a/pkg/planner/core/memtable_predicate_extractor.go +++ b/pkg/planner/core/memtable_predicate_extractor.go @@ -36,7 +36,6 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/rowcodec" "github.com/pingcap/tidb/pkg/util/set" "github.com/pingcap/tidb/pkg/util/stringutil" @@ -466,28 +465,28 @@ func (helper extractHelper) extractTimeRange( switch fnName { case ast.EQ: - startTime = mathutil.Max(startTime, timestamp) + startTime = max(startTime, timestamp) if endTime == 0 { endTime = timestamp } else { - endTime = mathutil.Min(endTime, timestamp) + endTime = min(endTime, timestamp) } case ast.GT: // FixMe: add 1ms is not absolutely correct here, just because the log search precision is millisecond. - startTime = mathutil.Max(startTime, timestamp+int64(time.Millisecond)) + startTime = max(startTime, timestamp+int64(time.Millisecond)) case ast.GE: - startTime = mathutil.Max(startTime, timestamp) + startTime = max(startTime, timestamp) case ast.LT: if endTime == 0 { endTime = timestamp - int64(time.Millisecond) } else { - endTime = mathutil.Min(endTime, timestamp-int64(time.Millisecond)) + endTime = min(endTime, timestamp-int64(time.Millisecond)) } case ast.LE: if endTime == 0 { endTime = timestamp } else { - endTime = mathutil.Min(endTime, timestamp) + endTime = min(endTime, timestamp) } default: remained = append(remained, expr) diff --git a/pkg/planner/core/optimizer.go b/pkg/planner/core/optimizer.go index bd552f83706a7..74786afb6b166 100644 --- a/pkg/planner/core/optimizer.go +++ b/pkg/planner/core/optimizer.go @@ -46,7 +46,6 @@ import ( "github.com/pingcap/tidb/pkg/util" utilhint "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/set" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/pingcap/tipb/go-tipb" @@ -825,7 +824,7 @@ func calculateTiFlashStreamCountUsingMinLogicalCores(ctx context.Context, sctx s if row[4].GetString() == "cpu-logical-cores" { logicalCpus, err := strconv.Atoi(row[5].GetString()) if err == nil && logicalCpus > 0 { - minLogicalCores = mathutil.Min(minLogicalCores, uint64(logicalCpus)) + minLogicalCores = min(minLogicalCores, uint64(logicalCpus)) } } } diff --git a/pkg/planner/core/plan.go b/pkg/planner/core/plan.go index b16364cbc555a..2a3ade525dece 100644 --- a/pkg/planner/core/plan.go +++ b/pkg/planner/core/plan.go @@ -29,7 +29,6 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/execdetails" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/size" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/pingcap/tipb/go-tipb" @@ -147,7 +146,7 @@ func optimizeByShuffle4Window(pp *PhysicalWindow, ctx sessionctx.Context) *Physi if ndv <= 1 { return nil } - concurrency = mathutil.Min(concurrency, int(ndv)) + concurrency = min(concurrency, int(ndv)) byItems := make([]expression.Expression, 0, len(pp.PartitionBy)) for _, item := range pp.PartitionBy { @@ -188,7 +187,7 @@ func optimizeByShuffle4StreamAgg(pp *PhysicalStreamAgg, ctx sessionctx.Context) if ndv <= 1 { return nil } - concurrency = mathutil.Min(concurrency, int(ndv)) + concurrency = min(concurrency, int(ndv)) reqProp := &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} shuffle := PhysicalShuffle{ diff --git a/pkg/planner/core/plan_stats.go b/pkg/planner/core/plan_stats.go index bf363d0a6bb97..e5cf7a47956d0 100644 --- a/pkg/planner/core/plan_stats.go +++ b/pkg/planner/core/plan_stats.go @@ -27,7 +27,6 @@ import ( "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -110,7 +109,7 @@ func RequestLoadStats(ctx sessionctx.Context, neededHistItems []model.TableItemI if sessMaxExecutionTime <= 0 { sessMaxExecutionTime = maxDuration } - waitTime := mathutil.Min(syncWait, hintMaxExecutionTime, sessMaxExecutionTime) + waitTime := min(syncWait, hintMaxExecutionTime, sessMaxExecutionTime) var timeout = time.Duration(waitTime) err := domain.GetDomain(ctx).StatsHandle().SendLoadRequests(stmtCtx, neededHistItems, timeout) if err != nil { diff --git a/pkg/planner/core/planbuilder.go b/pkg/planner/core/planbuilder.go index d4ec806e2a6a2..4e109bd6a5eb1 100644 --- a/pkg/planner/core/planbuilder.go +++ b/pkg/planner/core/planbuilder.go @@ -61,7 +61,6 @@ import ( "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" utilparser "github.com/pingcap/tidb/pkg/util/parser" "github.com/pingcap/tidb/pkg/util/ranger" "github.com/pingcap/tidb/pkg/util/sem" @@ -637,7 +636,7 @@ func (hch *handleColHelper) pushMap(m map[int64][]HandleCols) { } func (hch *handleColHelper) mergeAndPush(m1, m2 map[int64][]HandleCols) { - newMap := make(map[int64][]HandleCols, mathutil.Max(len(m1), len(m2))) + newMap := make(map[int64][]HandleCols, max(len(m1), len(m2))) for k, v := range m1 { newMap[k] = make([]HandleCols, len(v)) copy(newMap[k], v) diff --git a/pkg/planner/core/rule_topn_push_down.go b/pkg/planner/core/rule_topn_push_down.go index 0bf451035b9f2..7f6716cc17948 100644 --- a/pkg/planner/core/rule_topn_push_down.go +++ b/pkg/planner/core/rule_topn_push_down.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/planner/util" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // pushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. @@ -61,7 +60,7 @@ func (lt *LogicalTopN) setChild(p LogicalPlan, opt *logicalOptimizeOp) LogicalPl dual.RowCount = 0 return dual } - dual.RowCount = int(mathutil.Min(numDualRows-lt.Offset, lt.Count)) + dual.RowCount = int(min(numDualRows-lt.Offset, lt.Count)) return dual } diff --git a/pkg/planner/core/stats.go b/pkg/planner/core/stats.go index 371e5b94aafcc..75338b971b303 100644 --- a/pkg/planner/core/stats.go +++ b/pkg/planner/core/stats.go @@ -39,7 +39,6 @@ import ( "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/ranger" "go.uber.org/zap" ) @@ -485,10 +484,10 @@ func getMinSelectivityFromPaths(paths []*util.AccessPath, totalRowCount float64) // For table path and index merge path, AccessPath.CountAfterIndex is not set and meaningless, // but we still consider their AccessPath.CountAfterAccess. if path.IsTablePath() || path.PartialIndexPaths != nil { - minSelectivity = mathutil.Min(minSelectivity, path.CountAfterAccess/totalRowCount) + minSelectivity = min(minSelectivity, path.CountAfterAccess/totalRowCount) continue } - minSelectivity = mathutil.Min(minSelectivity, path.CountAfterIndex/totalRowCount) + minSelectivity = min(minSelectivity, path.CountAfterIndex/totalRowCount) } return minSelectivity } diff --git a/pkg/planner/core/task.go b/pkg/planner/core/task.go index 72537f6be8783..07c9a92cdac13 100644 --- a/pkg/planner/core/task.go +++ b/pkg/planner/core/task.go @@ -35,7 +35,6 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/paging" "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/pingcap/tidb/pkg/util/size" @@ -377,7 +376,7 @@ func negotiateCommonType(lType, rType *types.FieldType) (*types.FieldType, bool, cDec = lType.GetDecimal() } lLen, rLen := lType.GetFlen()+lExtend, rType.GetFlen()+rExtend - cLen := mathutil.Max(lLen, rLen) + cLen := max(lLen, rLen) commonType.SetDecimalUnderLimit(cDec) commonType.SetFlenUnderLimit(cLen) } else if needConvert(lType, commonType) || needConvert(rType, commonType) { diff --git a/pkg/statistics/BUILD.bazel b/pkg/statistics/BUILD.bazel index d9c949de9369d..e4e5e1e9a7442 100644 --- a/pkg/statistics/BUILD.bazel +++ b/pkg/statistics/BUILD.bazel @@ -42,7 +42,6 @@ go_library( "//pkg/util/fastrand", "//pkg/util/hack", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/ranger", "//pkg/util/sqlexec", diff --git a/pkg/statistics/estimate.go b/pkg/statistics/estimate.go index bb1bc2fe5542e..52a7da9ac73e3 100644 --- a/pkg/statistics/estimate.go +++ b/pkg/statistics/estimate.go @@ -16,8 +16,6 @@ package statistics import ( "math" - - "github.com/pingcap/tidb/pkg/util/mathutil" ) // calculateEstimateNDV calculates the estimate ndv of a sampled data from a multisize with size total. @@ -45,7 +43,7 @@ func calculateEstimateNDV(h *topNHelper, rowCount uint64) (ndv uint64, scaleRati d := float64(sampleNDV) ndv = uint64(math.Sqrt(rowCountN/n)*f1 + d - f1 + 0.5) - ndv = mathutil.Max(ndv, sampleNDV) - ndv = mathutil.Min(ndv, rowCount) + ndv = max(ndv, sampleNDV) + ndv = min(ndv, rowCount) return ndv, scaleRatio } diff --git a/pkg/statistics/handle/cache/internal/lfu/BUILD.bazel b/pkg/statistics/handle/cache/internal/lfu/BUILD.bazel index ae179f12be3e9..1e732998623ab 100644 --- a/pkg/statistics/handle/cache/internal/lfu/BUILD.bazel +++ b/pkg/statistics/handle/cache/internal/lfu/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "//pkg/statistics/handle/cache/internal/metrics", "//pkg/util/intest", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "@com_github_dgraph_io_ristretto//:ristretto", "@org_golang_x_exp//maps", diff --git a/pkg/statistics/handle/cache/internal/lfu/lfu_cache.go b/pkg/statistics/handle/cache/internal/lfu/lfu_cache.go index 0382b592b0061..848aaecb863cb 100644 --- a/pkg/statistics/handle/cache/internal/lfu/lfu_cache.go +++ b/pkg/statistics/handle/cache/internal/lfu/lfu_cache.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/tidb/pkg/statistics/handle/cache/internal/metrics" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "go.uber.org/zap" "golang.org/x/exp/rand" @@ -69,7 +68,7 @@ func NewLFU(totalMemCost int64) (*LFU, error) { bufferItems := int64(64) cache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: mathutil.Max(mathutil.Min(cost/128, 1_000_000), 10), // assume the cost per table stats is 128 + NumCounters: max(min(cost/128, 1_000_000), 10), // assume the cost per table stats is 128 MaxCost: cost, BufferItems: bufferItems, OnEvict: result.onEvict, diff --git a/pkg/statistics/handle/extstats/BUILD.bazel b/pkg/statistics/handle/extstats/BUILD.bazel index a3442b1168c27..f9970e5f47d80 100644 --- a/pkg/statistics/handle/extstats/BUILD.bazel +++ b/pkg/statistics/handle/extstats/BUILD.bazel @@ -12,7 +12,6 @@ go_library( "//pkg/statistics", "//pkg/statistics/handle/util", "//pkg/util/logutil", - "//pkg/util/mathutil", "@com_github_pingcap_errors//:errors", "@org_uber_go_zap//:zap", ], diff --git a/pkg/statistics/handle/extstats/extended_stats.go b/pkg/statistics/handle/extstats/extended_stats.go index 4de8bd9f3f522..80502239ff14f 100644 --- a/pkg/statistics/handle/extstats/extended_stats.go +++ b/pkg/statistics/handle/extstats/extended_stats.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -88,7 +87,7 @@ func fillExtStatsCorrVals(sctx sessionctx.Context, item *statistics.ExtendedStat samplesX := collectors[colOffsets[0]].Samples // We would modify Ordinal of samplesY, so we make a deep copy. samplesY := statistics.CopySampleItems(collectors[colOffsets[1]].Samples) - sampleNum := mathutil.Min(len(samplesX), len(samplesY)) + sampleNum := min(len(samplesX), len(samplesY)) if sampleNum == 1 { item.ScalarVals = 1 return item diff --git a/pkg/statistics/handle/storage/BUILD.bazel b/pkg/statistics/handle/storage/BUILD.bazel index 0ed9cf56db5ff..fa0258181a51a 100644 --- a/pkg/statistics/handle/storage/BUILD.bazel +++ b/pkg/statistics/handle/storage/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//pkg/util/chunk", "//pkg/util/compress", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/sqlexec", "@com_github_klauspost_compress//gzip", diff --git a/pkg/statistics/handle/storage/read.go b/pkg/statistics/handle/storage/read.go index 6c1e4318dc21d..f59dd8e34b82c 100644 --- a/pkg/statistics/handle/storage/read.go +++ b/pkg/statistics/handle/storage/read.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" @@ -189,7 +188,7 @@ func ExtendedStatsFromStorage(sctx sessionctx.Context, table *statistics.Table, return table, nil } for _, row := range rows { - lastVersion = mathutil.Max(lastVersion, row.GetUint64(5)) + lastVersion = max(lastVersion, row.GetUint64(5)) name := row.GetString(0) status := uint8(row.GetInt64(1)) if status == statistics.ExtendedStatsDeleted || status == statistics.ExtendedStatsInited { diff --git a/pkg/statistics/scalar.go b/pkg/statistics/scalar.go index efdabfe97c76d..92f3e52db8f41 100644 --- a/pkg/statistics/scalar.go +++ b/pkg/statistics/scalar.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // calcFraction is used to calculate the fraction of the interval [lower, upper] that lies within the [lower, value] @@ -253,7 +252,7 @@ func EnumRangeValues(low, high types.Datum, lowExclude, highExclude bool) []type return values case types.KindMysqlDuration: lowDur, highDur := low.GetMysqlDuration(), high.GetMysqlDuration() - fsp := mathutil.Max(lowDur.Fsp, highDur.Fsp) + fsp := max(lowDur.Fsp, highDur.Fsp) stepSize := int64(math.Pow10(types.MaxFsp-fsp)) * int64(time.Microsecond) lowDur.Duration = lowDur.Duration.Round(time.Duration(stepSize)) remaining := int64(highDur.Duration-lowDur.Duration)/stepSize + 1 - int64(exclude) @@ -274,7 +273,7 @@ func EnumRangeValues(low, high types.Datum, lowExclude, highExclude bool) []type if lowTime.Type() != highTime.Type() { return nil } - fsp := mathutil.Max(lowTime.Fsp(), highTime.Fsp()) + fsp := max(lowTime.Fsp(), highTime.Fsp()) var stepSize int64 sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) if lowTime.Type() == mysql.TypeDate {