From e18ca24b0661fccab0d66a983fc4b677088a188e Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 3 Jan 2025 12:55:30 +0800 Subject: [PATCH] *: replace any with specific type in log (#58660) ref pingcap/tidb#31716 --- pkg/executor/cte.go | 10 +++---- pkg/executor/index_merge_reader.go | 4 +-- .../internal/mpp/executor_with_retry.go | 2 +- pkg/executor/traffic.go | 2 +- .../pool/workerpool/workpool_test.go | 2 +- pkg/store/copr/batch_coprocessor.go | 20 ++++++------- pkg/store/copr/batch_coprocessor_test.go | 2 +- pkg/util/memoryusagealarm/memoryusagealarm.go | 14 ++++----- pkg/util/tiflashcompute/topo_fetcher.go | 30 +++++++++---------- tests/globalkilltest/util.go | 2 +- 10 files changed, 44 insertions(+), 44 deletions(-) diff --git a/pkg/executor/cte.go b/pkg/executor/cte.go index 003fc0505fadf..d9ac97b943b38 100644 --- a/pkg/executor/cte.go +++ b/pkg/executor/cte.go @@ -126,7 +126,7 @@ func (e *CTEExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { func setFirstErr(firstErr error, newErr error, msg string) error { if newErr != nil { - logutil.BgLogger().Error("cte got error", zap.Any("err", newErr), zap.Any("extra msg", msg)) + logutil.BgLogger().Error("cte got error", zap.Any("err", newErr), zap.String("extra msg", msg)) if firstErr == nil { firstErr = newErr } @@ -768,8 +768,8 @@ func (p *cteProducer) checkAndUpdateCorColHashCode() bool { func (p *cteProducer) logTbls(ctx context.Context, err error, iterNum uint64, lvl zapcore.Level) { logutil.Logger(ctx).Log(lvl, "cte iteration info", - zap.Any("iterInTbl mem usage", p.iterInTbl.GetMemBytes()), zap.Any("iterInTbl disk usage", p.iterInTbl.GetDiskBytes()), - zap.Any("iterOutTbl mem usage", p.iterOutTbl.GetMemBytes()), zap.Any("iterOutTbl disk usage", p.iterOutTbl.GetDiskBytes()), - zap.Any("resTbl mem usage", p.resTbl.GetMemBytes()), zap.Any("resTbl disk usage", p.resTbl.GetDiskBytes()), - zap.Any("resTbl rows", p.resTbl.NumRows()), zap.Any("iteration num", iterNum), zap.Error(err)) + zap.Int64("iterInTbl mem usage", p.iterInTbl.GetMemBytes()), zap.Int64("iterInTbl disk usage", p.iterInTbl.GetDiskBytes()), + zap.Int64("iterOutTbl mem usage", p.iterOutTbl.GetMemBytes()), zap.Int64("iterOutTbl disk usage", p.iterOutTbl.GetDiskBytes()), + zap.Int64("resTbl mem usage", p.resTbl.GetMemBytes()), zap.Int64("resTbl disk usage", p.resTbl.GetDiskBytes()), + zap.Int("resTbl rows", p.resTbl.NumRows()), zap.Uint64("iteration num", iterNum), zap.Error(err)) } diff --git a/pkg/executor/index_merge_reader.go b/pkg/executor/index_merge_reader.go index cd88db82049da..620ae1efdfa5e 100644 --- a/pkg/executor/index_merge_reader.go +++ b/pkg/executor/index_merge_reader.go @@ -893,7 +893,7 @@ func handleWorkerPanic(ctx context.Context, finished, limitDone <-chan struct{}, defer close(ch) } if r == nil { - logutil.BgLogger().Debug("worker finish without panic", zap.Any("worker", worker)) + logutil.BgLogger().Debug("worker finish without panic", zap.String("worker", worker)) return } @@ -1916,7 +1916,7 @@ func (w *indexMergeTableScanWorker) pickAndExecTask(ctx context.Context, task ** func (*indexMergeTableScanWorker) handleTableScanWorkerPanic(ctx context.Context, finished <-chan struct{}, task **indexMergeTableTask, worker string) func(r any) { return func(r any) { if r == nil { - logutil.BgLogger().Debug("worker finish without panic", zap.Any("worker", worker)) + logutil.BgLogger().Debug("worker finish without panic", zap.String("worker", worker)) return } diff --git a/pkg/executor/internal/mpp/executor_with_retry.go b/pkg/executor/internal/mpp/executor_with_retry.go index fd2bd527dc48c..0efefcb8cb7e9 100644 --- a/pkg/executor/internal/mpp/executor_with_retry.go +++ b/pkg/executor/internal/mpp/executor_with_retry.go @@ -206,7 +206,7 @@ func (r *ExecutorWithRetry) nextWithRecovery(ctx context.Context) error { } logutil.BgLogger().Info("recovery mpp error succeed, begin next retry", - zap.Any("mppErr", mppErr), zap.Any("recoveryCnt", r.mppErrRecovery.RecoveryCnt())) + zap.Any("mppErr", mppErr), zap.Uint32("recoveryCnt", r.mppErrRecovery.RecoveryCnt())) if _, err := r.setupMPPCoordinator(r.ctx, true); err != nil { logutil.BgLogger().Error("setup resp iter when recovery mpp err failed", zap.Any("err", err)) diff --git a/pkg/executor/traffic.go b/pkg/executor/traffic.go index 889e3b99c0578..c16895c152f54 100644 --- a/pkg/executor/traffic.go +++ b/pkg/executor/traffic.go @@ -175,7 +175,7 @@ func request(ctx context.Context, exec exec.BaseExecutor, reader io.Reader, meth } } if err == nil { - logutil.Logger(ctx).Info("traffic request to tiproxy succeeds", zap.Any("addrs", addrs), zap.String("path", path)) + logutil.Logger(ctx).Info("traffic request to tiproxy succeeds", zap.Strings("addrs", addrs), zap.String("path", path)) } return resps, err } diff --git a/pkg/resourcemanager/pool/workerpool/workpool_test.go b/pkg/resourcemanager/pool/workerpool/workpool_test.go index 84569da0c386e..67f5967e6dcf1 100644 --- a/pkg/resourcemanager/pool/workerpool/workpool_test.go +++ b/pkg/resourcemanager/pool/workerpool/workpool_test.go @@ -47,7 +47,7 @@ func (w *MyWorker[T, R]) HandleTask(task int64Task, _ func(struct{})) { } func (w *MyWorker[T, R]) Close() { - logutil.BgLogger().Info("Close worker", zap.Any("id", w.id)) + logutil.BgLogger().Info("Close worker", zap.Int("id", w.id)) } func createMyWorker() Worker[int64Task, struct{}] { diff --git a/pkg/store/copr/batch_coprocessor.go b/pkg/store/copr/batch_coprocessor.go index 534feae30de08..655fd4a476e7b 100644 --- a/pkg/store/copr/batch_coprocessor.go +++ b/pkg/store/copr/batch_coprocessor.go @@ -555,7 +555,7 @@ func filterAliveStoresHelper(ctx context.Context, stores []string, ttl time.Dura } wg.Wait() - logutil.BgLogger().Info("detecting available mpp stores", zap.Any("total", len(stores)), zap.Any("alive", len(aliveIdx))) + logutil.BgLogger().Info("detecting available mpp stores", zap.Int("total", len(stores)), zap.Int("alive", len(aliveIdx))) return aliveIdx } @@ -648,7 +648,7 @@ func buildBatchCopTasksConsistentHash( } storesBefFilter := len(storesStr) storesStr = filterAliveStoresStr(ctx, storesStr, ttl, kvStore) - logutil.BgLogger().Info("topo filter alive", zap.Any("topo", storesStr)) + logutil.BgLogger().Info("topo filter alive", zap.Strings("topo", storesStr)) if len(storesStr) == 0 { errMsg := "Cannot find proper topo to dispatch MPPTask: " if storesBefFilter == 0 { @@ -707,16 +707,16 @@ func buildBatchCopTasksConsistentHash( } } logutil.BgLogger().Info("buildBatchCopTasksConsistentHash done", - zap.Any("len(tasks)", len(taskMap)), - zap.Any("len(tiflash_compute)", len(storesStr)), - zap.Any("dispatchPolicy", tiflashcompute.GetDispatchPolicy(dispatchPolicy))) + zap.Int("len(tasks)", len(taskMap)), + zap.Int("len(tiflash_compute)", len(storesStr)), + zap.String("dispatchPolicy", tiflashcompute.GetDispatchPolicy(dispatchPolicy))) if log.GetLevel() <= zap.DebugLevel { debugTaskMap := make(map[string]string, len(taskMap)) for s, b := range taskMap { debugTaskMap[s] = fmt.Sprintf("addr: %s; regionInfos: %v", b.storeAddr, b.regionInfos) } - logutil.BgLogger().Debug("detailed info buildBatchCopTasksConsistentHash", zap.Any("taskMap", debugTaskMap), zap.Any("allStores", storesStr)) + logutil.BgLogger().Debug("detailed info buildBatchCopTasksConsistentHash", zap.Any("taskMap", debugTaskMap), zap.Strings("allStores", storesStr)) } if elapsed := time.Since(start); elapsed > time.Millisecond*500 { @@ -1575,15 +1575,15 @@ func buildBatchCopTasksConsistentHashForPD(bo *backoff.Backoffer, } } logutil.BgLogger().Info("buildBatchCopTasksConsistentHashForPD done", - zap.Any("len(tasks)", len(taskMap)), - zap.Any("len(tiflash_compute)", len(stores)), - zap.Any("dispatchPolicy", tiflashcompute.GetDispatchPolicy(dispatchPolicy))) + zap.Int("len(tasks)", len(taskMap)), + zap.Int("len(tiflash_compute)", len(stores)), + zap.String("dispatchPolicy", tiflashcompute.GetDispatchPolicy(dispatchPolicy))) if log.GetLevel() <= zap.DebugLevel { debugTaskMap := make(map[string]string, len(taskMap)) for s, b := range taskMap { debugTaskMap[s] = fmt.Sprintf("addr: %s; regionInfos: %v", b.storeAddr, b.regionInfos) } - logutil.BgLogger().Debug("detailed info buildBatchCopTasksConsistentHashForPD", zap.Any("taskMap", debugTaskMap), zap.Any("allStores", storesStr)) + logutil.BgLogger().Debug("detailed info buildBatchCopTasksConsistentHashForPD", zap.Any("taskMap", debugTaskMap), zap.Strings("allStores", storesStr)) } break } diff --git a/pkg/store/copr/batch_coprocessor_test.go b/pkg/store/copr/batch_coprocessor_test.go index dfc945e50f946..d528eaf512fba 100644 --- a/pkg/store/copr/batch_coprocessor_test.go +++ b/pkg/store/copr/batch_coprocessor_test.go @@ -277,7 +277,7 @@ func TestTopoFetcherBackoff(t *testing.T) { if err := fetchTopoBo.Backoff(tikv.BoTiFlashRPC(), expectErr); err != nil { break } - logutil.BgLogger().Info("TestTopoFetcherBackoff", zap.Any("retryNum", retryNum)) + logutil.BgLogger().Info("TestTopoFetcherBackoff", zap.Int("retryNum", retryNum)) } dura := time.Since(start) // fetchTopoMaxBackoff is milliseconds. diff --git a/pkg/util/memoryusagealarm/memoryusagealarm.go b/pkg/util/memoryusagealarm/memoryusagealarm.go index 3f26956a78ff9..71bc8d8de4734 100644 --- a/pkg/util/memoryusagealarm/memoryusagealarm.go +++ b/pkg/util/memoryusagealarm/memoryusagealarm.go @@ -203,15 +203,15 @@ func (record *memoryUsageAlarm) doRecord(memUsage uint64, instanceMemoryUsage ui fields := make([]zap.Field, 0, 6) fields = append(fields, zap.Bool("is tidb_server_memory_limit set", record.isServerMemoryLimitSet)) if record.isServerMemoryLimitSet { - fields = append(fields, zap.Any("tidb_server_memory_limit", record.serverMemoryLimit)) - fields = append(fields, zap.Any("tidb-server memory usage", memUsage)) + fields = append(fields, zap.Uint64("tidb_server_memory_limit", record.serverMemoryLimit)) + fields = append(fields, zap.Uint64("tidb-server memory usage", memUsage)) } else { - fields = append(fields, zap.Any("system memory total", record.serverMemoryLimit)) - fields = append(fields, zap.Any("system memory usage", memUsage)) - fields = append(fields, zap.Any("tidb-server memory usage", instanceMemoryUsage)) + fields = append(fields, zap.Uint64("system memory total", record.serverMemoryLimit)) + fields = append(fields, zap.Uint64("system memory usage", memUsage)) + fields = append(fields, zap.Uint64("tidb-server memory usage", instanceMemoryUsage)) } - fields = append(fields, zap.Any("memory-usage-alarm-ratio", record.memoryUsageAlarmRatio)) - fields = append(fields, zap.Any("record path", record.baseRecordDir)) + fields = append(fields, zap.Float64("memory-usage-alarm-ratio", record.memoryUsageAlarmRatio)) + fields = append(fields, zap.String("record path", record.baseRecordDir)) logutil.BgLogger().Warn(fmt.Sprintf("tidb-server has the risk of OOM because of %s. Running SQLs and heap profile will be recorded in record path", alarmReason.String()), fields...) recordDir := filepath.Join(record.baseRecordDir, "record"+record.lastCheckTime.Format(time.RFC3339)) if record.err = disk.CheckAndCreateDir(recordDir); record.err != nil { diff --git a/pkg/util/tiflashcompute/topo_fetcher.go b/pkg/util/tiflashcompute/topo_fetcher.go index 28bb0bad034df..51b23c3304c41 100644 --- a/pkg/util/tiflashcompute/topo_fetcher.go +++ b/pkg/util/tiflashcompute/topo_fetcher.go @@ -133,8 +133,8 @@ func getAutoScalerType(typ string) int { // InitGlobalTopoFetcher init globalTopoFetcher if is in disaggregated-tiflash mode. It's not thread-safe. func InitGlobalTopoFetcher(typ string, addr string, clusterID string, isFixedPool bool) (err error) { - logutil.BgLogger().Info("init globalTopoFetcher", zap.Any("type", typ), zap.Any("addr", addr), - zap.Any("clusterID", clusterID), zap.Any("isFixedPool", isFixedPool)) + logutil.BgLogger().Info("init globalTopoFetcher", zap.String("type", typ), zap.String("addr", addr), + zap.String("clusterID", clusterID), zap.Bool("isFixedPool", isFixedPool)) if clusterID == "" || addr == "" { return errors.Errorf("ClusterID(%s) or AutoScaler(%s) addr is empty", clusterID, addr) } @@ -189,7 +189,7 @@ func (f *MockTopoFetcher) FetchAndGetTopo() ([]string, error) { } curTopo := f.getTopo() - logutil.BgLogger().Debug("FetchAndGetTopo", zap.Any("topo", curTopo)) + logutil.BgLogger().Debug("FetchAndGetTopo", zap.Strings("topo", curTopo)) return curTopo, nil } @@ -216,7 +216,7 @@ func (f *MockTopoFetcher) assureTopo(nodeNum int) error { RawQuery: para.Encode(), } url := u.String() - logutil.BgLogger().Info("assureTopo", zap.Any("url", url)) + logutil.BgLogger().Info("assureTopo", zap.String("url", url)) newTopo, err := mockHTTPGetAndParseResp(url) if err != nil { @@ -237,7 +237,7 @@ func (f *MockTopoFetcher) fetchTopo() error { Path: "/fetch_topo", } url := u.String() - logutil.BgLogger().Info("fetchTopo", zap.Any("url", url)) + logutil.BgLogger().Info("fetchTopo", zap.String("url", url)) newTopo, err := mockHTTPGetAndParseResp(url) if err != nil { @@ -265,9 +265,9 @@ func httpGetAndParseResp(url string) ([]byte, error) { } bStr := string(b) if resp.StatusCode != http.StatusOK { - logutil.BgLogger().Error("http get mock AutoScaler failed", zap.Any("url", url), - zap.Any("status code", http.StatusText(resp.StatusCode)), - zap.Any("http body", bStr)) + logutil.BgLogger().Error("http get mock AutoScaler failed", zap.String("url", url), + zap.String("status code", http.StatusText(resp.StatusCode)), + zap.String("http body", bStr)) return nil, errTopoFetcher.GenWithStackByArgs(httpGetFailedErrMsg) } return b, nil @@ -286,7 +286,7 @@ func mockHTTPGetAndParseResp(url string) ([]string, error) { if len(bStr) == 0 || len(newTopo) == 0 { return nil, errors.New("topo list is empty") } - logutil.BgLogger().Debug("httpGetAndParseResp succeed", zap.Any("new topo", newTopo)) + logutil.BgLogger().Debug("httpGetAndParseResp succeed", zap.Strings("new topo", newTopo)) return newTopo, nil } @@ -335,7 +335,7 @@ func (f *AWSTopoFetcher) RecoveryAndGetTopo(recovery RecoveryType, oriCNCnt int) func (f *AWSTopoFetcher) fetchAndGetTopo(recovery RecoveryType, oriCNCnt int) (curTopo []string, err error) { defer func() { - logutil.BgLogger().Info("AWSTopoFetcher FetchAndGetTopo done", zap.Any("curTopo", curTopo)) + logutil.BgLogger().Info("AWSTopoFetcher FetchAndGetTopo done", zap.Strings("curTopo", curTopo)) }() if recovery != RecoveryTypeNull && recovery != RecoveryTypeMemLimit { @@ -387,9 +387,9 @@ func (f *AWSTopoFetcher) tryUpdateTopo(newTopo *resumeAndGetTopologyResult) (upd cachedTopo, cachedTS := f.getTopo() newTS, err := strconv.ParseInt(newTopo.Timestamp, 10, 64) defer func() { - logutil.BgLogger().Info("try update topo", zap.Any("updated", updated), zap.Any("err", err), - zap.Any("cached TS", cachedTS), zap.Any("cached Topo", cachedTopo), - zap.Any("fetch TS", newTopo.Timestamp), zap.Any("converted TS", newTS), zap.Any("fetch topo", newTopo.Topology)) + logutil.BgLogger().Info("try update topo", zap.Bool("updated", updated), zap.Any("err", err), + zap.Int64("cached TS", cachedTS), zap.Strings("cached Topo", cachedTopo), + zap.String("fetch TS", newTopo.Timestamp), zap.Int64("converted TS", newTS), zap.Strings("fetch topo", newTopo.Topology)) }() if err != nil { return updated, errTopoFetcher.GenWithStackByArgs(parseTopoTSFailedErrMsg) @@ -418,7 +418,7 @@ func (f *AWSTopoFetcher) fetchFixedPoolTopo() error { Path: awsFixedPoolHTTPPath, } url := u.String() - logutil.BgLogger().Info("fetchFixedPoolTopo", zap.Any("url", url)) + logutil.BgLogger().Info("fetchFixedPoolTopo", zap.String("url", url)) newTopo, err := awsHTTPGetAndParseResp(url) if err != nil { @@ -452,7 +452,7 @@ func (f *AWSTopoFetcher) fetchTopo(recovery RecoveryType, oriCNCnt int) error { RawQuery: para.Encode(), } url := u.String() - logutil.BgLogger().Info("fetchTopo", zap.Any("url", url)) + logutil.BgLogger().Info("fetchTopo", zap.String("url", url)) newTopo, err := awsHTTPGetAndParseResp(url) if err != nil { diff --git a/tests/globalkilltest/util.go b/tests/globalkilltest/util.go index a912ae0ad0959..48286e1961d35 100644 --- a/tests/globalkilltest/util.go +++ b/tests/globalkilltest/util.go @@ -92,7 +92,7 @@ func checkTiKVStatus() error { return nil, errors.Trace(err) } - log.Info("TiKV status", zap.Any("status", resp.StatusCode)) + log.Info("TiKV status", zap.Int("status", resp.StatusCode)) if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("TiKV status code %d", resp.StatusCode) }