diff --git a/.gitignore b/.gitignore index 4c2ce9d9eba..b6907cd2dad 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ default.pd .DS_Store tags /.retools/ +default* diff --git a/Gopkg.lock b/Gopkg.lock index e2379a0b2ac..0fbe78ff348 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -325,6 +325,14 @@ pruneopts = "NUT" revision = "eec48234540b323cdfc3e92c4341d667fa245178" +[[projects]] + branch = "master" + digest = "1:fa93713f641c5eba4afa18d139504c08d956b47cb5901bbd791c3bff69b4af7e" + name = "github.com/pingcap/log" + packages = ["."] + pruneopts = "NUT" + revision = "479153f07ebdcbb42a14c99d9c16cefbe531071d" + [[projects]] digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" name = "github.com/pkg/errors" @@ -467,7 +475,7 @@ version = "v1.1.0" [[projects]] - digest = "1:25531f2a1f9e75b832a945c670bf84f8caf5bd0fe8d8ad5bab5f13e162680922" + digest = "1:4af5a022f8c5c97734c3c6fad58fdce078e2322ddc98d3102b225a729f03916e" name = "go.uber.org/zap" packages = [ ".", @@ -478,8 +486,8 @@ "zapcore", ] pruneopts = "NUT" - revision = "eeedf312bc6c57391d84767a4cd413f02a917974" - version = "v1.8.0" + revision = "27376062155ad36be76b0f12cf1572a221d3a48c" + version = "v1.10.0" [[projects]] branch = "master" @@ -622,7 +630,6 @@ "github.com/coreos/etcd/embed", "github.com/coreos/etcd/etcdserver", "github.com/coreos/etcd/mvcc/mvccpb", - "github.com/coreos/etcd/pkg/logutil", "github.com/coreos/etcd/pkg/transport", "github.com/coreos/etcd/pkg/types", "github.com/coreos/etcd/raft", @@ -643,6 +650,7 @@ "github.com/pingcap/kvproto/pkg/eraftpb", "github.com/pingcap/kvproto/pkg/metapb", "github.com/pingcap/kvproto/pkg/pdpb", + "github.com/pingcap/log", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/push", @@ -652,6 +660,7 @@ "github.com/unrolled/render", "github.com/urfave/negroni", "go.uber.org/zap", + "go.uber.org/zap/zapcore", "golang.org/x/net/context", "google.golang.org/grpc", "google.golang.org/grpc/codes", diff --git a/Gopkg.toml b/Gopkg.toml index 9c97b30373c..78979709138 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -43,3 +43,11 @@ [[override]] name = "github.com/BurntSushi/toml" revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" + +[[constraint]] + name = "github.com/pingcap/log" + branch = "master" + +[[constraint]] + name = "go.uber.org/zap" + version = "1.9.1" diff --git a/client/client.go b/client/client.go index 74071f22c50..4c35385eaf5 100644 --- a/client/client.go +++ b/client/client.go @@ -25,8 +25,9 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -139,7 +140,7 @@ type SecurityOption struct { // NewClient creates a PD client. func NewClient(pdAddrs []string, security SecurityOption) (Client, error) { - log.Infof("[pd] create pd client with endpoints %v", pdAddrs) + log.Info("[pd] create pd client with endpoints", zap.Strings("pd-address", pdAddrs)) ctx, cancel := context.WithCancel(context.Background()) c := &client{ urls: addrsToUrls(pdAddrs), @@ -158,7 +159,7 @@ func NewClient(pdAddrs []string, security SecurityOption) (Client, error) { if err := c.updateLeader(); err != nil { return nil, err } - log.Infof("[pd] init cluster id %v", c.clusterID) + log.Info("[pd] init cluster id", zap.Uint64("cluster-id", c.clusterID)) c.wg.Add(3) go c.tsLoop() @@ -185,7 +186,7 @@ func (c *client) initClusterID() error { members, err := c.getMembers(timeoutCtx, u) timeoutCancel() if err != nil || members.GetHeader() == nil { - log.Errorf("[pd] failed to get cluster id: %v", err) + log.Error("[pd] failed to get cluster id", zap.Error(err)) continue } c.clusterID = members.GetHeader().GetClusterId() @@ -241,7 +242,7 @@ func (c *client) switchLeader(addrs []string) error { return nil } - log.Infof("[pd] leader switches to: %v, previous: %v", addr, oldLeader) + log.Info("[pd] switch leader", zap.String("new-leader", addr), zap.String("old-leader", oldLeader)) if _, err := c.getOrCreateGRPCConn(addr); err != nil { return err } @@ -326,7 +327,7 @@ func (c *client) leaderLoop() { } if err := c.updateLeader(); err != nil { - log.Errorf("[pd] failed updateLeader: %v", err) + log.Error("[pd] failed updateLeader", zap.Error(err)) } } } @@ -384,7 +385,7 @@ func (c *client) tsLoop() { return default: } - log.Errorf("[pd] create tso stream error: %v", err) + log.Error("[pd] create tso stream error", zap.Error(err)) c.ScheduleCheckLeader() cancel() c.revokeTSORequest(errors.WithStack(err)) @@ -430,7 +431,7 @@ func (c *client) tsLoop() { return default: } - log.Errorf("[pd] getTS error: %v", err) + log.Error("[pd] getTS error", zap.Error(err)) c.ScheduleCheckLeader() cancel() stream, cancel = nil, nil @@ -512,7 +513,7 @@ func (c *client) Close() { defer c.connMu.Unlock() for _, cc := range c.connMu.clientConns { if err := cc.Close(); err != nil { - log.Errorf("[pd] failed close grpc clientConn: %v", err) + log.Error("[pd] failed close grpc clientConn", zap.Error(err)) } } } diff --git a/cmd/pd-server/main.go b/cmd/pd-server/main.go index 39c457415ce..8cf1306930c 100644 --- a/cmd/pd-server/main.go +++ b/cmd/pd-server/main.go @@ -16,18 +16,18 @@ package main import ( "context" "flag" - "fmt" "os" "os/signal" "syscall" - "github.com/grpc-ecosystem/go-grpc-prometheus" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/pkg/metricutil" "github.com/pingcap/pd/server" "github.com/pingcap/pd/server/api" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" // Register schedulers. _ "github.com/pingcap/pd/server/schedulers" @@ -41,7 +41,7 @@ func main() { if cfg.Version { server.PrintPDInfo() - os.Exit(0) + exit(0) } defer logutil.LogPanic() @@ -49,14 +49,24 @@ func main() { switch errors.Cause(err) { case nil: case flag.ErrHelp: - os.Exit(0) + exit(0) default: - log.Fatalf("parse cmd flags error: %s\n", fmt.Sprintf("%+v", err)) + log.Fatal("parse cmd flags error", zap.Error(err)) } + // New zap logger + err = cfg.SetupLogger() + if err == nil { + log.ReplaceGlobals(cfg.GetZapLogger(), cfg.GetZapLogProperties()) + } else { + log.Fatal("initialize logger error", zap.Error(err)) + } + // Flushing any buffered log entries + defer log.Sync() + // The old logger err = logutil.InitLogger(&cfg.Log) if err != nil { - log.Fatalf("initialize logger error: %s\n", fmt.Sprintf("%+v", err)) + log.Fatal("initialize logger error", zap.Error(err)) } server.LogPDInfo() @@ -72,15 +82,15 @@ func main() { err = server.PrepareJoinCluster(cfg) if err != nil { - log.Fatal("join error ", fmt.Sprintf("%+v", err)) + log.Fatal("join meet error", zap.Error(err)) } svr, err := server.CreateServer(cfg, api.NewHandler) if err != nil { - log.Fatalf("create server failed: %v", fmt.Sprintf("%+v", err)) + log.Fatal("create server failed", zap.Error(err)) } if err = server.InitHTTPClient(svr); err != nil { - log.Fatalf("initial http client for api handler failed: %v", fmt.Sprintf("%+v", err)) + log.Fatal("initial http client for api handler failed", zap.Error(err)) } sc := make(chan os.Signal, 1) @@ -98,17 +108,22 @@ func main() { }() if err := svr.Run(ctx); err != nil { - log.Fatalf("run server failed: %v", fmt.Sprintf("%+v", err)) + log.Fatal("run server failed", zap.Error(err)) } <-ctx.Done() - log.Infof("Got signal [%d] to exit.", sig) + log.Info("Got signal to exit", zap.String("signal", sig.String())) svr.Close() switch sig { case syscall.SIGTERM: - os.Exit(0) + exit(0) default: - os.Exit(1) + exit(1) } } + +func exit(code int) { + log.Sync() + os.Exit(code) +} diff --git a/pkg/etcdutil/etcdutil.go b/pkg/etcdutil/etcdutil.go index 2e9bdf1a1dd..2383ba4ca94 100644 --- a/pkg/etcdutil/etcdutil.go +++ b/pkg/etcdutil/etcdutil.go @@ -22,8 +22,9 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/pkg/types" + log "github.com/pingcap/log" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -59,7 +60,7 @@ func CheckClusterID(localClusterID types.ID, um types.URLsMap, tlsConfig *tls.Co trp.CloseIdleConnections() if gerr != nil { // Do not return error, because other members may be not ready. - log.Error(gerr) + log.Error("failed to get cluster from remote", zap.Error(gerr)) continue } diff --git a/pkg/logutil/log.go b/pkg/logutil/log.go index 5ae1bf9264c..23aa90e91b3 100644 --- a/pkg/logutil/log.go +++ b/pkg/logutil/log.go @@ -19,14 +19,16 @@ import ( "os" "path" "runtime" - "runtime/debug" "strings" "sync" "github.com/coreos/etcd/raft" "github.com/coreos/pkg/capnslog" + zaplog "github.com/pingcap/log" "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" "google.golang.org/grpc/grpclog" lumberjack "gopkg.in/natefinch/lumberjack.v2" ) @@ -144,6 +146,23 @@ func StringToLogLevel(level string) log.Level { return defaultLogLevel } +// StringToZapLogLevel translates log level string to log level. +func StringToZapLogLevel(level string) zapcore.Level { + switch strings.ToLower(level) { + case "fatal": + return zapcore.FatalLevel + case "error": + return zapcore.ErrorLevel + case "warn", "warning": + return zapcore.WarnLevel + case "debug": + return zapcore.DebugLevel + case "info": + return zapcore.InfoLevel + } + return zapcore.InfoLevel +} + // textFormatter is for compatibility with ngaut/log type textFormatter struct { DisableTimestamp bool @@ -196,7 +215,7 @@ func stringToLogFormatter(format string, disableTimestamp bool) log.Formatter { } // InitFileLog initializes file based logging options. -func InitFileLog(cfg *FileLogConfig) error { +func InitFileLog(cfg *zaplog.FileLogConfig) error { if st, err := os.Stat(cfg.Filename); err == nil { if st.IsDir() { return errors.New("can't use directory as log file name") @@ -238,7 +257,7 @@ func (lg *wrapLogrus) V(l int) bool { var once sync.Once // InitLogger initializes PD's logger. -func InitLogger(cfg *LogConfig) error { +func InitLogger(cfg *zaplog.Config) error { var err error once.Do(func() { @@ -271,6 +290,6 @@ func InitLogger(cfg *LogConfig) error { // Commonly used with a `defer`. func LogPanic() { if e := recover(); e != nil { - log.Fatalf("panic: %v, stack: %s", e, string(debug.Stack())) + zaplog.Fatal("panic", zap.Reflect("recover", e)) } } diff --git a/pkg/logutil/log_test.go b/pkg/logutil/log_test.go index 07b8815997e..654a6ada8c0 100644 --- a/pkg/logutil/log_test.go +++ b/pkg/logutil/log_test.go @@ -20,7 +20,9 @@ import ( "github.com/coreos/pkg/capnslog" . "github.com/pingcap/check" + zaplog "github.com/pingcap/log" log "github.com/sirupsen/logrus" + "go.uber.org/zap/zapcore" ) const ( @@ -51,9 +53,19 @@ func (s *testLogSuite) TestStringToLogLevel(c *C) { c.Assert(StringToLogLevel("whatever"), Equals, log.InfoLevel) } +func (s *testLogSuite) TestStringToZapLogLevel(c *C) { + c.Assert(StringToZapLogLevel("fatal"), Equals, zapcore.FatalLevel) + c.Assert(StringToZapLogLevel("ERROR"), Equals, zapcore.ErrorLevel) + c.Assert(StringToZapLogLevel("warn"), Equals, zapcore.WarnLevel) + c.Assert(StringToZapLogLevel("warning"), Equals, zapcore.WarnLevel) + c.Assert(StringToZapLogLevel("debug"), Equals, zapcore.DebugLevel) + c.Assert(StringToZapLogLevel("info"), Equals, zapcore.InfoLevel) + c.Assert(StringToZapLogLevel("whatever"), Equals, zapcore.InfoLevel) +} + // TestLogging assure log format and log redirection works. func (s *testLogSuite) TestLogging(c *C) { - conf := &LogConfig{Level: "warn", File: FileLogConfig{}} + conf := &zaplog.Config{Level: "warn", File: zaplog.FileLogConfig{}} c.Assert(InitLogger(conf), IsNil) log.SetOutput(s.buf) @@ -76,3 +88,8 @@ func (s *testLogSuite) TestLogging(c *C) { c.Assert(entry, Matches, logPattern) c.Assert(strings.Contains(entry, "log_test.go"), IsTrue) } + +func (s *testLogSuite) TestFileLog(c *C) { + c.Assert(InitFileLog(&zaplog.FileLogConfig{Filename: "/tmp"}), NotNil) + c.Assert(InitFileLog(&zaplog.FileLogConfig{Filename: "/tmp/test_file_log", MaxSize: 0}), IsNil) +} diff --git a/pkg/metricutil/metricutil.go b/pkg/metricutil/metricutil.go index 9650667c3e9..a84c4a039ef 100644 --- a/pkg/metricutil/metricutil.go +++ b/pkg/metricutil/metricutil.go @@ -17,10 +17,11 @@ import ( "time" "unicode" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/typeutil" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/push" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const zeroDuration = time.Duration(0) @@ -66,7 +67,7 @@ func prometheusPushClient(job, addr string, interval time.Duration) { prometheus.DefaultGatherer, ) if err != nil { - log.Errorf("could not push metrics to Prometheus Pushgateway: %v", err) + log.Error("could not push metrics to Prometheus Pushgateway", zap.Error(err)) } time.Sleep(interval) diff --git a/pkg/tempurl/tempurl.go b/pkg/tempurl/tempurl.go index 6adc4917adb..9ec9ea50eb5 100644 --- a/pkg/tempurl/tempurl.go +++ b/pkg/tempurl/tempurl.go @@ -19,7 +19,8 @@ import ( "sync" "time" - log "github.com/sirupsen/logrus" + log "github.com/pingcap/log" + "go.uber.org/zap" ) var ( @@ -42,12 +43,12 @@ func Alloc() string { func tryAllocTestURL() string { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { - log.Fatal(err) + log.Fatal("listen failed", zap.Error(err)) } addr := fmt.Sprintf("http://%s", l.Addr()) err = l.Close() if err != nil { - log.Fatal(err) + log.Fatal("close failed", zap.Error(err)) } testAddrMutex.Lock() diff --git a/server/api/log.go b/server/api/log.go index 714909f9f18..461e330cdd6 100644 --- a/server/api/log.go +++ b/server/api/log.go @@ -18,9 +18,9 @@ import ( "io/ioutil" "net/http" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/server" - log "github.com/sirupsen/logrus" "github.com/unrolled/render" ) @@ -51,7 +51,7 @@ func (h *logHandler) Handle(w http.ResponseWriter, r *http.Request) { } h.svr.SetLogLevel(level) - log.SetLevel(logutil.StringToLogLevel(level)) + log.SetLevel(logutil.StringToZapLogLevel(level)) h.rd.JSON(w, http.StatusOK, nil) } diff --git a/server/api/member.go b/server/api/member.go index 139caa75870..75eea86edb8 100644 --- a/server/api/member.go +++ b/server/api/member.go @@ -21,11 +21,12 @@ import ( "github.com/gorilla/mux" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/etcdutil" "github.com/pingcap/pd/server" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "github.com/unrolled/render" + "go.uber.org/zap" ) type memberHandler struct { @@ -58,12 +59,12 @@ func (h *memberHandler) getMembers() (*pdpb.GetMembersResponse, error) { // Fill leader priorities. for _, m := range members.GetMembers() { if h.svr.GetEtcdLeader() == 0 { - log.Warnf("no etcd leader, skip get leader priority, member: %v", m.GetMemberId()) + log.Warn("no etcd leader, skip get leader priority", zap.Uint64("member", m.GetMemberId())) continue } leaderPriority, e := h.svr.GetMemberLeaderPriority(m.GetMemberId()) if e != nil { - log.Errorf("failed to load leader priority, member: %v, err: %v", m.GetMemberId(), e) + log.Error("failed to load leader priority", zap.Uint64("member", m.GetMemberId()), zap.Error(err)) continue } m.LeaderPriority = int32(leaderPriority) diff --git a/server/api/redirector.go b/server/api/redirector.go index d3fff1cd153..4f1100ca082 100644 --- a/server/api/redirector.go +++ b/server/api/redirector.go @@ -19,8 +19,9 @@ import ( "net/url" "strings" + log "github.com/pingcap/log" "github.com/pingcap/pd/server" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -48,7 +49,7 @@ func (h *redirector) ServeHTTP(w http.ResponseWriter, r *http.Request, next http // Prevent more than one redirection. if name := r.Header.Get(redirectorHeader); len(name) != 0 { - log.Errorf("redirect from %v, but %v is not leader", name, h.s.Name()) + log.Error("redirect but server is not leader", zap.String("from", name), zap.String("server", h.s.Name())) http.Error(w, errRedirectToNotLeader, http.StatusInternalServerError) return } @@ -93,21 +94,21 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request) resp, err := p.client.Do(r) if err != nil { - log.Error(err) + log.Error("request failed", zap.Error(err)) continue } b, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { - log.Error(err) + log.Error("request failed", zap.Error(err)) continue } copyHeader(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) if _, err := w.Write(b); err != nil { - log.Error(err) + log.Error("write failed", zap.Error(err)) continue } diff --git a/server/api/region.go b/server/api/region.go index 4c4881ed99f..5399d8b3286 100644 --- a/server/api/region.go +++ b/server/api/region.go @@ -15,10 +15,8 @@ package api import ( "container/heap" - "fmt" "net/http" "strconv" - "strings" "github.com/gorilla/mux" "github.com/pingcap/kvproto/pkg/metapb" @@ -50,8 +48,8 @@ func newRegionInfo(r *core.RegionInfo) *regionInfo { } return ®ionInfo{ ID: r.GetID(), - StartKey: strings.Trim(fmt.Sprintf("%q", r.GetStartKey()), "\""), - EndKey: strings.Trim(fmt.Sprintf("%q", r.GetEndKey()), "\""), + StartKey: string(core.HexRegionKey(r.GetStartKey())), + EndKey: string(core.HexRegionKey(r.GetEndKey())), RegionEpoch: r.GetRegionEpoch(), Peers: r.GetPeers(), Leader: r.GetLeader(), diff --git a/server/api/server_test.go b/server/api/server_test.go index 68d9b6131d3..542342a8f02 100644 --- a/server/api/server_test.go +++ b/server/api/server_test.go @@ -18,12 +18,14 @@ import ( "net/http" "os" "strings" + "sync" "testing" "time" . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/testutil" "github.com/pingcap/pd/server" "google.golang.org/grpc" @@ -73,6 +75,8 @@ func mustNewServer(c *C) (*server.Server, cleanUpFunc) { return svrs[0], cleanup } +var zapLogOnce sync.Once + func mustNewCluster(c *C, num int) ([]*server.Config, []*server.Server, cleanUpFunc) { svrs := make([]*server.Server, 0, num) cfgs := server.NewTestMultiConfig(num) @@ -80,6 +84,11 @@ func mustNewCluster(c *C, num int) ([]*server.Config, []*server.Server, cleanUpF ch := make(chan *server.Server, num) for _, cfg := range cfgs { go func(cfg *server.Config) { + err := cfg.SetupLogger() + c.Assert(err, IsNil) + zapLogOnce.Do(func() { + log.ReplaceGlobals(cfg.GetZapLogger(), cfg.GetZapLogProperties()) + }) s, err := server.CreateServer(cfg, NewHandler) c.Assert(err, IsNil) err = s.Run(context.TODO()) diff --git a/server/api/util.go b/server/api/util.go index 386e07aaa38..ebf7bc62dab 100644 --- a/server/api/util.go +++ b/server/api/util.go @@ -20,10 +20,10 @@ import ( "io/ioutil" "net/http" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/apiutil" errcode "github.com/pingcap/pd/pkg/error_code" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "github.com/unrolled/render" ) @@ -42,7 +42,7 @@ var dialClient = &http.Client{ // If the error is nil, this also responds with a 500 and logs at the error level. func errorResp(rd *render.Render, w http.ResponseWriter, err error) { if err == nil { - log.Errorf("nil given to errorResp") + log.Error("nil is given to errorResp") rd.JSON(w, http.StatusInternalServerError, "nil error") return } diff --git a/server/cache/ttl.go b/server/cache/ttl.go index e0dd4d325e1..afb2823f3a4 100644 --- a/server/cache/ttl.go +++ b/server/cache/ttl.go @@ -17,7 +17,8 @@ import ( "sync" "time" - log "github.com/sirupsen/logrus" + log "github.com/pingcap/log" + "go.uber.org/zap" ) type ttlCacheItem struct { @@ -124,7 +125,7 @@ func (c *TTL) doGC() { } c.Unlock() - log.Debugf("GC %d items", count) + log.Debug("TTL GC items", zap.Int("count", count)) } } diff --git a/server/cluster.go b/server/cluster.go index 5e7a239a66f..8c89a5a2bb2 100644 --- a/server/cluster.go +++ b/server/cluster.go @@ -21,12 +21,13 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" errcode "github.com/pingcap/pd/pkg/error_code" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) var backgroundJobInterval = time.Minute @@ -354,7 +355,9 @@ func (c *RaftCluster) putStore(store *metapb.Store) error { // Check location labels. for _, k := range c.cachedCluster.GetLocationLabels() { if v := s.GetLabelValue(k); len(v) == 0 { - log.Warnf("missing location label %q in store %v", k, s) + log.Warn("missing location label", + zap.Stringer("store", s.Store), + zap.String("label-key", k)) } } return cluster.putStore(s) @@ -384,7 +387,9 @@ func (c *RaftCluster) RemoveStore(storeID uint64) error { } store.State = metapb.StoreState_Offline - log.Warnf("[store %d] store %s has been Offline", store.GetId(), store.GetAddress()) + log.Warn("store has been offline", + zap.Uint64("store-id", store.GetId()), + zap.String("store-address", store.GetAddress())) return cluster.putStore(store) } @@ -412,11 +417,13 @@ func (c *RaftCluster) BuryStore(storeID uint64, force bool) error { // revive:di if !force { return errors.New("store is still up, please remove store gracefully") } - log.Warnf("forcedly bury store %v", store) + log.Warn("forcedly bury store", zap.Stringer("store", store.Store)) } store.State = metapb.StoreState_Tombstone - log.Warnf("[store %d] store %s has been Tombstone", store.GetId(), store.GetAddress()) + log.Warn("store has been Tombstone", + zap.Uint64("store-id", store.GetId()), + zap.String("store-address", store.GetAddress())) return cluster.putStore(store) } @@ -433,7 +440,9 @@ func (c *RaftCluster) SetStoreState(storeID uint64, state metapb.StoreState) err } store.State = state - log.Warnf("[store %d] set state to %v", storeID, state.String()) + log.Warn("store update state", + zap.Uint64("store-id", storeID), + zap.Stringer("new-state", state)) return cluster.putStore(store) } @@ -476,7 +485,9 @@ func (c *RaftCluster) checkStores() { // If the store is empty, it can be buried. if cluster.getStoreRegionCount(offlineStore.GetId()) == 0 { if err := c.BuryStore(offlineStore.GetId(), false); err != nil { - log.Errorf("bury store %v failed: %v", offlineStore, err) + log.Error("bury store failed", + zap.Stringer("store", offlineStore), + zap.Error(err)) } } else { offlineStores = append(offlineStores, offlineStore) @@ -489,7 +500,7 @@ func (c *RaftCluster) checkStores() { if upStoreCount < c.s.GetConfig().Replication.MaxReplicas { for _, offlineStore := range offlineStores { - log.Warnf("store %v may not turn into Tombstone, there are no extra up node has enough space to accommodate the extra replica", offlineStore) + log.Warn("store may not turn into Tombstone, there are no extra up node has enough space to accommodate the extra replica", zap.Stringer("store", offlineStore)) } } } @@ -500,13 +511,18 @@ func (c *RaftCluster) checkOperators() { // after region is merged, it will not heartbeat anymore // the operator of merged region will not timeout actively if c.cachedCluster.GetRegion(op.RegionID()) == nil { - log.Debugf("remove operator %v cause region %d is merged", op, op.RegionID()) + log.Debug("remove operator cause region is merged", + zap.Uint64("region-id", op.RegionID()), + zap.Stringer("operator", op)) co.removeOperator(op) continue } if op.IsTimeout() { - log.Infof("[region %v] operator timeout: %s", op.RegionID(), op) + log.Info("operator timeout", + zap.Uint64("region-id", op.RegionID()), + zap.Stringer("operator", op)) + operatorCounter.WithLabelValues(op.Desc(), "timeout").Inc() co.removeOperator(op) } @@ -531,7 +547,7 @@ func (c *RaftCluster) collectHealthStatus() { client := c.s.GetClient() members, err := GetMembers(client) if err != nil { - log.Info("get members error:", err) + log.Error("get members error", zap.Error(err)) } unhealth := c.s.CheckHealth(members) for _, member := range members { diff --git a/server/cluster_info.go b/server/cluster_info.go index 91abab9c572..df9afb55065 100644 --- a/server/cluster_info.go +++ b/server/cluster_info.go @@ -21,10 +21,11 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) type clusterInfo struct { @@ -68,13 +69,19 @@ func loadClusterInfo(id core.IDAllocator, kv *core.KV, opt *scheduleOption) (*cl if err := kv.LoadStores(c.core.Stores); err != nil { return nil, err } - log.Infof("load %v stores cost %v", c.core.Stores.GetStoreCount(), time.Since(start)) + log.Info("load stores", + zap.Int("count", c.core.Stores.GetStoreCount()), + zap.Duration("cost", time.Since(start)), + ) start = time.Now() if err := kv.LoadRegions(c.core.Regions); err != nil { return nil, err } - log.Infof("load %v regions cost %v", c.core.Regions.GetRegionCount(), time.Since(start)) + log.Info("load regions", + zap.Int("count", c.core.Regions.GetRegionCount()), + zap.Duration("cost", time.Since(start)), + ) return c, nil } @@ -101,9 +108,11 @@ func (c *clusterInfo) OnStoreVersionChange() { c.opt.SetClusterVersion(*minVersion) err := c.opt.persist(c.kv) if err != nil { - log.Infof("persist cluster version meet error: %s", err) + log.Error("persist cluster version meet error", zap.Error(err)) } - log.Infof("cluster version changed from %s to %s", clusterVersion, minVersion) + log.Info("cluster version changed", + zap.Stringer("old-cluster-version", clusterVersion), + zap.Stringer("new-cluster-version", minVersion)) CheckPDVersion(c.opt) } } @@ -123,7 +132,7 @@ func (c *clusterInfo) allocID() (uint64, error) { func (c *clusterInfo) AllocPeer(storeID uint64) (*metapb.Peer, error) { peerID, err := c.allocID() if err != nil { - log.Errorf("failed to alloc peer: %v", err) + log.Error("failed to alloc peer", zap.Error(err)) return nil, err } peer := &metapb.Peer{ @@ -471,7 +480,10 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error { // Mark isNew if the region in cache does not have leader. var saveKV, saveCache, isNew bool if origin == nil { - log.Debugf("[region %d] Insert new region {%v}", region.GetID(), region) + log.Debug("insert new region", + zap.Uint64("region-id", region.GetID()), + zap.Stringer("meta-region", core.RegionToHexMeta(region.GetMeta())), + ) saveKV, saveCache, isNew = true, true, true } else { r := region.GetRegionEpoch() @@ -481,18 +493,32 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error { return ErrRegionIsStale(region.GetMeta(), origin.GetMeta()) } if r.GetVersion() > o.GetVersion() { - log.Infof("[region %d] %s, Version changed from {%d} to {%d}", region.GetID(), core.DiffRegionKeyInfo(origin, region), o.GetVersion(), r.GetVersion()) + log.Info("region Version changed", + zap.Uint64("region-id", region.GetID()), + zap.String("detail", core.DiffRegionKeyInfo(origin, region)), + zap.Uint64("old-version", o.GetVersion()), + zap.Uint64("new-version", r.GetVersion()), + ) saveKV, saveCache = true, true } if r.GetConfVer() > o.GetConfVer() { - log.Infof("[region %d] %s, ConfVer changed from {%d} to {%d}", region.GetID(), core.DiffRegionPeersInfo(origin, region), o.GetConfVer(), r.GetConfVer()) + log.Info("region ConfVer changed", + zap.Uint64("region-id", region.GetID()), + zap.String("detail", core.DiffRegionPeersInfo(origin, region)), + zap.Uint64("old-confver", o.GetConfVer()), + zap.Uint64("new-confver", r.GetConfVer()), + ) saveKV, saveCache = true, true } if region.GetLeader().GetId() != origin.GetLeader().GetId() { if origin.GetLeader().GetId() == 0 { isNew = true } else { - log.Infof("[region %d] Leader changed from store {%d} to {%d}", region.GetID(), origin.GetLeader().GetStoreId(), region.GetLeader().GetStoreId()) + log.Info("leader changed", + zap.Uint64("region-id", region.GetID()), + zap.Uint64("from", origin.GetLeader().GetStoreId()), + zap.Uint64("to", region.GetLeader().GetStoreId()), + ) } saveCache = true } @@ -517,7 +543,10 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error { if err := c.kv.SaveRegion(region.GetMeta()); err != nil { // Not successfully saved to kv is not fatal, it only leads to longer warm-up // after restart. Here we only log the error then go on updating cache. - log.Errorf("[region %d] fail to save region %v: %v", region.GetID(), region, err) + log.Error("fail to save region to kv", + zap.Uint64("region-id", region.GetID()), + zap.Stringer("region-meta", core.RegionToHexMeta(region.GetMeta())), + zap.Error(err)) } } if !isWriteUpdate && !isReadUpdate && !saveCache && !isNew { @@ -535,7 +564,10 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error { if c.kv != nil { for _, item := range overlaps { if err := c.kv.DeleteRegion(item); err != nil { - log.Errorf("[region %d] fail to delete region %v: %v", item.GetId(), item, err) + log.Error("fail to delete region from kv", + zap.Uint64("region-id", item.GetId()), + zap.Stringer("region-meta", core.RegionToHexMeta(item)), + zap.Error(err)) } } } diff --git a/server/cluster_worker.go b/server/cluster_worker.go index 57fbbe42636..1d381d711f5 100644 --- a/server/cluster_worker.go +++ b/server/cluster_worker.go @@ -15,14 +15,14 @@ package server import ( "bytes" - "fmt" "github.com/gogo/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // HandleRegionHeartbeat processes RegionInfo reports from client. @@ -33,8 +33,8 @@ func (c *RaftCluster) HandleRegionHeartbeat(region *core.RegionInfo) error { // If the region peer count is 0, then we should not handle this. if len(region.GetPeers()) == 0 { - log.Warnf("invalid region, zero region peer count - %v", region) - return errors.Errorf("invalid region, zero region peer count - %v", region) + log.Warn("invalid region, zero region peer count", zap.Stringer("region-meta", core.RegionToHexMeta(region.GetMeta()))) + return errors.Errorf("invalid region, zero region peer count: %v", core.RegionToHexMeta(region.GetMeta())) } c.coordinator.dispatch(region) @@ -76,7 +76,7 @@ func (c *RaftCluster) validRequestRegion(reqRegion *metapb.Region) error { startKey := reqRegion.GetStartKey() region, _ := c.GetRegionByKey(startKey) if region == nil { - return errors.Errorf("region not found, request region: %v", reqRegion) + return errors.Errorf("region not found, request region: %v", core.RegionToHexMeta(reqRegion)) } // If the request epoch is less than current region epoch, then returns an error. reqRegionEpoch := reqRegion.GetRegionEpoch() @@ -164,7 +164,10 @@ func (c *RaftCluster) handleReportSplit(request *pdpb.ReportSplitRequest) (*pdpb err := c.checkSplitRegion(left, right) if err != nil { - log.Warnf("report split region is invalid - %v, %v", request, fmt.Sprintf("%+v", err)) + log.Warn("report split region is invalid", + zap.Stringer("left-region", core.RegionToHexMeta(left)), + zap.Stringer("right-region", core.RegionToHexMeta(right)), + zap.Error(err)) return nil, err } @@ -172,20 +175,29 @@ func (c *RaftCluster) handleReportSplit(request *pdpb.ReportSplitRequest) (*pdpb originRegion := proto.Clone(right).(*metapb.Region) originRegion.RegionEpoch = nil originRegion.StartKey = left.GetStartKey() - log.Infof("[region %d] region split, generate new region: %v", originRegion.GetId(), left) + log.Info("region split, generate new region", + zap.Uint64("region-id", originRegion.GetId()), + zap.Stringer("region-meta", core.RegionToHexMeta(left))) return &pdpb.ReportSplitResponse{}, nil } func (c *RaftCluster) handleBatchReportSplit(request *pdpb.ReportBatchSplitRequest) (*pdpb.ReportBatchSplitResponse, error) { regions := request.GetRegions() + hrm := core.RegionsToHexMeta(regions) err := c.checkSplitRegions(regions) if err != nil { - log.Warnf("report batch split region is invalid - %v, %v", request, fmt.Sprintf("%+v", err)) + log.Warn("report batch split region is invalid", + zap.Stringer("region-meta", hrm), + zap.Error(err)) return nil, err } last := len(regions) - 1 originRegion := proto.Clone(regions[last]).(*metapb.Region) - log.Infof("[region %d] region split, generate %d new regions: %v", originRegion.GetId(), last, regions[:last]) + hrm = core.RegionsToHexMeta(regions[:last]) + log.Info("region batch split, generate new regions", + zap.Uint64("region-id", originRegion.GetId()), + zap.Stringer("origin", hrm), + zap.Int("total", last)) return &pdpb.ReportBatchSplitResponse{}, nil } diff --git a/server/config.go b/server/config.go index a295e7f92fb..767f0fe33e7 100644 --- a/server/config.go +++ b/server/config.go @@ -27,11 +27,13 @@ import ( "github.com/coreos/etcd/embed" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/go-semver/semver" - "github.com/pingcap/pd/pkg/logutil" + "github.com/pingcap/log" "github.com/pingcap/pd/pkg/metricutil" "github.com/pingcap/pd/pkg/typeutil" "github.com/pingcap/pd/server/namespace" "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) // Config is the pd server configuration. @@ -61,7 +63,7 @@ type Config struct { LeaderLease int64 `toml:"lease" json:"lease"` // Log related config. - Log logutil.LogConfig `toml:"log" json:"log"` + Log log.Config `toml:"log" json:"log"` // Backward compatibility. LogFileDeprecated string `toml:"log-file" json:"log-file"` @@ -123,6 +125,9 @@ type Config struct { heartbeatStreamBindInterval typeutil.Duration leaderPriorityCheckInterval typeutil.Duration + + logger *zap.Logger + logProps *log.ZapProperties } // NewConfig creates a new config. @@ -181,6 +186,8 @@ const ( defaultHeartbeatStreamRebindInterval = time.Minute defaultLeaderPriorityCheckInterval = time.Minute + + defaultDisableErrorVerbose = true ) func adjustString(v *string, defValue string) { @@ -390,6 +397,8 @@ func (c *Config) Adjust(meta *toml.MetaData) error { return err } + c.adjustLog(configMetaData.Child("log")) + adjustDuration(&c.heartbeatStreamBindInterval, defaultHeartbeatStreamRebindInterval) adjustDuration(&c.leaderPriorityCheckInterval, defaultLeaderPriorityCheckInterval) @@ -400,6 +409,12 @@ func (c *Config) Adjust(meta *toml.MetaData) error { return nil } +func (c *Config) adjustLog(meta *configMetaData) { + if !meta.IsDefined("disable-error-verbose") { + c.Log.DisableErrorVerbose = defaultDisableErrorVerbose + } +} + func (c *Config) clone() *Config { cfg := &Config{} *cfg = *c @@ -756,6 +771,27 @@ func ParseUrls(s string) ([]url.URL, error) { return urls, nil } +// SetupLogger setup the logger. +func (c *Config) SetupLogger() error { + lg, p, err := log.InitLogger(&c.Log, zap.AddStacktrace(zapcore.FatalLevel)) + if err != nil { + return err + } + c.logger = lg + c.logProps = p + return nil +} + +// GetZapLogger gets the created zap logger. +func (c *Config) GetZapLogger() *zap.Logger { + return c.logger +} + +// GetZapLogProperties gets properties of the zap logger. +func (c *Config) GetZapLogProperties() *log.ZapProperties { + return c.logProps +} + // generates a configuration for embedded etcd. func (c *Config) genEmbedEtcdConfig() (*embed.Config, error) { cfg := embed.NewConfig() @@ -780,7 +816,9 @@ func (c *Config) genEmbedEtcdConfig() (*embed.Config, error) { cfg.PeerTLSInfo.TrustedCAFile = c.Security.CAPath cfg.PeerTLSInfo.CertFile = c.Security.CertPath cfg.PeerTLSInfo.KeyFile = c.Security.KeyPath - + // TODO: update etcd + // cfg.ZapLoggerBuilder = embed.NewZapCoreLoggerBuilder(c.logger, c.logger.Core(), c.logSyncer) + // cfg.Logger = "zap" var err error cfg.LPUrls, err = ParseUrls(c.PeerUrls) diff --git a/server/coordinator.go b/server/coordinator.go index c9694b3432c..8cd15dfe306 100644 --- a/server/coordinator.go +++ b/server/coordinator.go @@ -23,13 +23,14 @@ import ( "github.com/pingcap/kvproto/pkg/eraftpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" "github.com/pingcap/pd/server/schedule" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -101,14 +102,14 @@ func (c *coordinator) dispatch(region *core.RegionInfo) { return } if op.IsFinish() { - log.Infof("[region %v] operator finish: %s", region.GetID(), op) + log.Info("operator finish", zap.Uint64("region-id", region.GetID()), zap.Reflect("operator", op)) operatorCounter.WithLabelValues(op.Desc(), "finish").Inc() operatorDuration.WithLabelValues(op.Desc()).Observe(op.ElapsedTime().Seconds()) c.pushHistory(op) c.opRecords.Put(op, pdpb.OperatorStatus_SUCCESS) c.removeOperator(op) } else if timeout { - log.Infof("[region %v] operator timeout: %s", region.GetID(), op) + log.Info("operator timeout", zap.Uint64("region-id", region.GetID()), zap.Reflect("operator", op)) operatorCounter.WithLabelValues(op.Desc(), "timeout").Inc() c.removeOperator(op) c.opRecords.Put(op, pdpb.OperatorStatus_TIMEOUT) @@ -232,16 +233,16 @@ func (c *coordinator) run() { if schedulerCfg.Disable { scheduleCfg.Schedulers[k] = schedulerCfg k++ - log.Info("skip create ", schedulerCfg.Type) + log.Info("skip create scheduler", zap.String("scheduler-type", schedulerCfg.Type)) continue } s, err := schedule.CreateScheduler(schedulerCfg.Type, c.limiter, schedulerCfg.Args...) if err != nil { - log.Fatalf("can not create scheduler %s: %v", schedulerCfg.Type, err) + log.Fatal("can not create scheduler", zap.String("scheduler-type", schedulerCfg.Type), zap.Error(err)) } - log.Infof("create scheduler %s", s.GetName()) + log.Info("create scheduler", zap.String("scheduler-name", s.GetName())) if err = c.addScheduler(s, schedulerCfg.Args...); err != nil { - log.Errorf("can not add scheduler %s: %v", s.GetName(), err) + log.Error("can not add scheduler", zap.String("scheduler-name", s.GetName()), zap.Error(err)) } // only record valid scheduler config @@ -254,7 +255,7 @@ func (c *coordinator) run() { // remove invalid scheduler config and persist scheduleCfg.Schedulers = scheduleCfg.Schedulers[:k] if err := c.cluster.opt.persist(c.cluster.kv); err != nil { - log.Errorf("can't persist schedule config: %v", err) + log.Error("cannot persist schedule config", zap.Error(err)) } c.wg.Add(1) @@ -439,7 +440,9 @@ func (c *coordinator) runScheduler(s *scheduleController) { } case <-s.Ctx().Done(): - log.Infof("%v has been stopped: %v", s.GetName(), s.Ctx().Err()) + log.Info("scheduler has been stopped", + zap.String("scheduler-name", s.GetName()), + zap.Error(s.Ctx().Err())) return } } @@ -448,12 +451,12 @@ func (c *coordinator) runScheduler(s *scheduleController) { func (c *coordinator) addOperatorLocked(op *schedule.Operator) bool { regionID := op.RegionID() - log.Infof("[region %v] add operator: %s", regionID, op) + log.Info("add operator", zap.Uint64("region-id", regionID), zap.Reflect("operator", op)) // If there is an old operator, replace it. The priority should be checked // already. if old, ok := c.operators[regionID]; ok { - log.Infof("[region %v] replace old operator: %s", regionID, old) + log.Info("replace old operator", zap.Uint64("region-id", regionID), zap.Reflect("operator", old)) operatorCounter.WithLabelValues(old.Desc(), "replaced").Inc() c.opRecords.Put(old, pdpb.OperatorStatus_REPLACE) c.removeOperatorLocked(old) @@ -493,15 +496,15 @@ func (c *coordinator) addOperator(ops ...*schedule.Operator) bool { func (c *coordinator) checkAddOperator(op *schedule.Operator) bool { region := c.cluster.GetRegion(op.RegionID()) if region == nil { - log.Debugf("[region %v] region not found, cancel add operator", op.RegionID()) + log.Debug("region not found, cancel add operator", zap.Uint64("region-id", op.RegionID())) return false } if region.GetRegionEpoch().GetVersion() != op.RegionEpoch().GetVersion() || region.GetRegionEpoch().GetConfVer() != op.RegionEpoch().GetConfVer() { - log.Debugf("[region %v] region epoch not match, %v vs %v, cancel add operator", op.RegionID(), region.GetRegionEpoch(), op.RegionEpoch()) + log.Debug("region epoch not match, cancel add operator", zap.Uint64("region-id", op.RegionID()), zap.Reflect("old", region.GetRegionEpoch()), zap.Reflect("new", op.RegionEpoch())) return false } if old := c.operators[op.RegionID()]; old != nil && !isHigherPriorityOperator(op, old) { - log.Debugf("[region %v] already have operator %s, cancel add operator", op.RegionID(), old) + log.Debug("already have operator, cancel add operator", zap.Uint64("region-id", op.RegionID()), zap.Reflect("old", old)) return false } return true @@ -576,7 +579,7 @@ func (c *coordinator) getHistory(start time.Time) []schedule.OperatorHistory { } func (c *coordinator) sendScheduleCommand(region *core.RegionInfo, step schedule.OperatorStep) { - log.Infof("[region %v] send schedule command: %s", region.GetID(), step) + log.Info("send schedule command", zap.Uint64("region-id", region.GetID()), zap.Stringer("step", step)) switch s := step.(type) { case schedule.TransferLeader: cmd := &pdpb.RegionHeartbeatResponse{ @@ -654,7 +657,7 @@ func (c *coordinator) sendScheduleCommand(region *core.RegionInfo, step schedule } c.hbStreams.sendMsg(region, cmd) default: - log.Errorf("unknown operatorStep: %v", step) + log.Error("unknown operator step", zap.Reflect("step", step)) } } diff --git a/server/core/region.go b/server/core/region.go index a24fed8e91c..5b91aac8e8d 100644 --- a/server/core/region.go +++ b/server/core/region.go @@ -15,6 +15,7 @@ package core import ( "bytes" + "encoding/hex" "fmt" "math/rand" "reflect" @@ -880,19 +881,68 @@ func DiffRegionPeersInfo(origin *RegionInfo, other *RegionInfo) string { func DiffRegionKeyInfo(origin *RegionInfo, other *RegionInfo) string { var ret []string if !bytes.Equal(origin.meta.StartKey, other.meta.StartKey) { - originKey := &metapb.Region{StartKey: origin.meta.StartKey} - otherKey := &metapb.Region{StartKey: other.meta.StartKey} - ret = append(ret, fmt.Sprintf("StartKey Changed:{%s} -> {%s}", originKey, otherKey)) + ret = append(ret, fmt.Sprintf("StartKey Changed:%s -> %s", HexRegionKey(origin.meta.StartKey), HexRegionKey(other.meta.StartKey))) } else { - ret = append(ret, fmt.Sprintf("StartKey:{%s}", &metapb.Region{StartKey: origin.meta.StartKey})) + ret = append(ret, fmt.Sprintf("StartKey:%s", HexRegionKey(origin.meta.StartKey))) } if !bytes.Equal(origin.meta.EndKey, other.meta.EndKey) { - originKey := &metapb.Region{EndKey: origin.meta.EndKey} - otherKey := &metapb.Region{EndKey: other.meta.EndKey} - ret = append(ret, fmt.Sprintf("EndKey Changed:{%s} -> {%s}", originKey, otherKey)) + ret = append(ret, fmt.Sprintf("EndKey Changed:%s -> %s", HexRegionKey(origin.meta.EndKey), HexRegionKey(other.meta.EndKey))) } else { - ret = append(ret, fmt.Sprintf("EndKey:{%s}", &metapb.Region{EndKey: origin.meta.EndKey})) + ret = append(ret, fmt.Sprintf("EndKey:%s", HexRegionKey(origin.meta.EndKey))) } return strings.Join(ret, ", ") } + +// HexRegionKey converts region key to hex format. Used for formating region in +// logs. +func HexRegionKey(key []byte) []byte { + return []byte(strings.ToUpper(hex.EncodeToString(key))) +} + +// RegionToHexMeta converts a region meta's keys to hex format. Used for formating +// region in logs. +func RegionToHexMeta(meta *metapb.Region) HexRegionMeta { + if meta == nil { + return HexRegionMeta{} + } + meta = proto.Clone(meta).(*metapb.Region) + meta.StartKey = HexRegionKey(meta.StartKey) + meta.EndKey = HexRegionKey(meta.EndKey) + return HexRegionMeta{meta} +} + +// HexRegionMeta is a region meta in the hex format. Used for formating region in logs. +type HexRegionMeta struct { + *metapb.Region +} + +func (h HexRegionMeta) String() string { + return strings.TrimSpace(proto.CompactTextString(h.Region)) +} + +// RegionsToHexMeta converts regions' meta keys to hex format. Used for formating +// region in logs. +func RegionsToHexMeta(regions []*metapb.Region) HexRegionsMeta { + hexRegionMetas := make([]*metapb.Region, len(regions)) + for i, region := range regions { + meta := proto.Clone(region).(*metapb.Region) + meta.StartKey = HexRegionKey(meta.StartKey) + meta.EndKey = HexRegionKey(meta.EndKey) + + hexRegionMetas[i] = meta + } + return HexRegionsMeta(hexRegionMetas) +} + +// HexRegionsMeta is a slice of regions' meta in the hex format. Used for formating +// region in logs. +type HexRegionsMeta []*metapb.Region + +func (h HexRegionsMeta) String() string { + var b strings.Builder + for _, r := range h { + b.WriteString(proto.CompactTextString(r)) + } + return strings.TrimSpace(b.String()) +} diff --git a/server/core/region_test.go b/server/core/region_test.go index 53fc0e7ca8f..69d3a156658 100644 --- a/server/core/region_test.go +++ b/server/core/region_test.go @@ -111,14 +111,14 @@ func (*testRegionKey) TestRegionKey(c *C) { expect string }{ {`"t\x80\x00\x00\x00\x00\x00\x00\xff!_r\x80\x00\x00\x00\x00\xff\x02\u007fY\x00\x00\x00\x00\x00\xfa"`, - `"t\200\000\000\000\000\000\000\377!_r\200\000\000\000\000\377\002\177Y\000\000\000\000\000\372"`}, + `7480000000000000FF215F728000000000FF027F590000000000FA`}, {"\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf8\"", - `"\200\000\000\000\000\000\000\377\005\000\000\000\000\000\000\000\370"`}, + `80000000000000FF0500000000000000F8`}, } for _, t := range testCase { got, err := strconv.Unquote(t.key) c.Assert(err, IsNil) - s := fmt.Sprintln(&metapb.Region{StartKey: []byte(got)}) + s := fmt.Sprintln(RegionToHexMeta(&metapb.Region{StartKey: []byte(got)})) c.Assert(strings.Contains(s, t.expect), IsTrue) // start key changed diff --git a/server/core/region_tree.go b/server/core/region_tree.go index 42cc6ebbaed..d46132fcbe8 100644 --- a/server/core/region_tree.go +++ b/server/core/region_tree.go @@ -17,7 +17,8 @@ import ( "github.com/google/btree" "github.com/pingcap/kvproto/pkg/metapb" - log "github.com/sirupsen/logrus" + log "github.com/pingcap/log" + "go.uber.org/zap" ) var _ btree.Item = ®ionItem{} @@ -89,7 +90,10 @@ func (t *regionTree) getOverlaps(region *metapb.Region) []*metapb.Region { func (t *regionTree) update(region *metapb.Region) []*metapb.Region { overlaps := t.getOverlaps(region) for _, item := range overlaps { - log.Debugf("[region %d] delete region {%v}, cause overlapping with region {%v}", item.GetId(), item, region) + log.Debug("overlapping region", + zap.Uint64("region-id", item.GetId()), + zap.Stringer("delete-region", RegionToHexMeta(item)), + zap.Stringer("update-region", RegionToHexMeta(region))) t.tree.Delete(®ionItem{item}) } diff --git a/server/core/store.go b/server/core/store.go index e619201e881..0ff02318b60 100644 --- a/server/core/store.go +++ b/server/core/store.go @@ -23,8 +23,9 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/pd/pkg/error_code" - log "github.com/sirupsen/logrus" + log "github.com/pingcap/log" + errcode "github.com/pingcap/pd/pkg/error_code" + "go.uber.org/zap" ) // StoreInfo contains information about a store. @@ -387,7 +388,8 @@ func (s *StoresInfo) BlockStore(storeID uint64) errcode.ErrorCode { func (s *StoresInfo) UnblockStore(storeID uint64) { store, ok := s.stores[storeID] if !ok { - log.Fatalf("store %d is unblocked, but it is not found", storeID) + log.Fatal("store is unblocked, but it is not found", + zap.Uint64("store-id", storeID)) } store.Unblock() } diff --git a/server/etcd_kv.go b/server/etcd_kv.go index 8553ec308b4..55a67baf027 100644 --- a/server/etcd_kv.go +++ b/server/etcd_kv.go @@ -19,8 +19,9 @@ import ( "time" "github.com/coreos/etcd/clientv3" + log "github.com/pingcap/log" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -83,7 +84,7 @@ func (kv *etcdKVBase) Save(key, value string) error { resp, err := kv.server.leaderTxn().Then(clientv3.OpPut(key, value)).Commit() if err != nil { - log.Errorf("save to etcd error: %v", err) + log.Error("save to etcd meet error", zap.Error(err)) return errors.WithStack(err) } if !resp.Succeeded { @@ -97,7 +98,7 @@ func (kv *etcdKVBase) Delete(key string) error { resp, err := kv.server.leaderTxn().Then(clientv3.OpDelete(key)).Commit() if err != nil { - log.Errorf("delete from etcd error: %v", err) + log.Error("delete from etcd meet error", zap.Error(err)) return errors.WithStack(err) } if !resp.Succeeded { @@ -113,10 +114,10 @@ func kvGet(c *clientv3.Client, key string, opts ...clientv3.OpOption) (*clientv3 start := time.Now() resp, err := clientv3.NewKV(c).Get(ctx, key, opts...) if err != nil { - log.Errorf("load from etcd error: %v", err) + log.Error("load from etcd meet error", zap.Error(err)) } if cost := time.Since(start); cost > kvSlowRequestTime { - log.Warnf("kv gets too slow: key %v cost %v err %v", key, cost, err) + log.Warn("kv gets too slow", zap.String("request-key", key), zap.Duration("cost", cost), zap.Error(err)) } return resp, errors.WithStack(err) diff --git a/server/grpc_service.go b/server/grpc_service.go index 669c2422252..95dc8a99d83 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -14,6 +14,7 @@ package server import ( + "context" "fmt" "io" "strconv" @@ -22,10 +23,10 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" - "golang.org/x/net/context" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -210,7 +211,9 @@ func (s *Server) PutStore(ctx context.Context, request *pdpb.PutStoreRequest) (* return nil, status.Errorf(codes.Unknown, err.Error()) } - log.Infof("put store ok - %v", store) + log.Info("put store ok", zap.Stringer("store", store)) + cluster.RLock() + defer cluster.RUnlock() cluster.cachedCluster.OnStoreVersionChange() return &pdpb.PutStoreResponse{ @@ -567,7 +570,7 @@ func (s *Server) PutClusterConfig(ctx context.Context, request *pdpb.PutClusterC return nil, status.Errorf(codes.Unknown, err.Error()) } - log.Infof("put cluster config ok - %v", conf) + log.Info("put cluster config ok", zap.Reflect("config", conf)) return &pdpb.PutClusterConfigResponse{ Header: s.header(), @@ -654,9 +657,12 @@ func (s *Server) UpdateGCSafePoint(ctx context.Context, request *pdpb.UpdateGCSa if err := s.kv.SaveGCSafePoint(newSafePoint); err != nil { return nil, err } - log.Infof("updated gc safe point to %d", newSafePoint) + log.Info("updated gc safe point", + zap.Uint64("safe-point", newSafePoint)) } else if newSafePoint < oldSafePoint { - log.Warnf("trying to update gc safe point from %d to %d", oldSafePoint, newSafePoint) + log.Warn("trying to update gc safe point", + zap.Uint64("old-safe-point", oldSafePoint), + zap.Uint64("new-safe-point", newSafePoint)) newSafePoint = oldSafePoint } diff --git a/server/handler.go b/server/handler.go index 244258bff82..d39ed77abd0 100644 --- a/server/handler.go +++ b/server/handler.go @@ -21,10 +21,11 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) var ( @@ -159,11 +160,11 @@ func (h *Handler) AddScheduler(name string, args ...string) error { if err != nil { return err } - log.Infof("create scheduler %s", s.GetName()) + log.Info("create scheduler", zap.String("scheduler-name", s.GetName())) if err = c.addScheduler(s, args...); err != nil { - log.Errorf("can not add scheduler %v: %v", s.GetName(), err) + log.Error("can not add scheduler", zap.String("scheduler-name", s.GetName()), zap.Error(err)) } else if err = h.opt.persist(c.cluster.kv); err != nil { - log.Errorf("can not persist scheduler config: %v", err) + log.Error("can not persist scheduler config", zap.Error(err)) } return err } @@ -175,9 +176,9 @@ func (h *Handler) RemoveScheduler(name string) error { return err } if err = c.removeScheduler(name); err != nil { - log.Errorf("can not remove scheduler %v: %v", name, err) + log.Error("can not remove scheduler", zap.String("scheduler-name", name), zap.Error(err)) } else if err = h.opt.persist(c.cluster.kv); err != nil { - log.Errorf("can not persist scheduler config: %v", err) + log.Error("can not persist scheduler config", zap.Error(err)) } return err } diff --git a/server/heartbeat_stream_test.go b/server/heartbeat_stream_test.go index 7d0746ec4a2..febe3c1990e 100644 --- a/server/heartbeat_stream_test.go +++ b/server/heartbeat_stream_test.go @@ -20,8 +20,10 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/testutil" "github.com/pingcap/pd/pkg/typeutil" + "go.uber.org/zap" ) var _ = Suite(&testHeartbeatStreamSuite{}) @@ -113,11 +115,15 @@ func newRegionheartbeatClient(c *C, grpcClient pdpb.PDClient) *regionHeartbeatCl } func (c *regionHeartbeatClient) close() { - c.stream.CloseSend() + if err := c.stream.CloseSend(); err != nil { + log.Error("failed to terminate client stream", zap.Error(err)) + } } func (c *regionHeartbeatClient) SendRecv(msg *pdpb.RegionHeartbeatRequest, timeout time.Duration) *pdpb.RegionHeartbeatResponse { - c.stream.Send(msg) + if err := c.stream.Send(msg); err != nil { + log.Error("send heartbeat message fail", zap.Error(err)) + } select { case <-time.After(timeout): return nil diff --git a/server/heartbeat_streams.go b/server/heartbeat_streams.go index 60e53db5c49..c224e620f78 100644 --- a/server/heartbeat_streams.go +++ b/server/heartbeat_streams.go @@ -20,9 +20,10 @@ import ( "time" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/server/core" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const heartbeatStreamKeepAliveInterval = time.Minute @@ -80,21 +81,24 @@ func (s *heartbeatStreams) run() { storeLabel := strconv.FormatUint(storeID, 10) if stream, ok := s.streams[storeID]; ok { if err := stream.Send(msg); err != nil { - log.Errorf("[region %v] send heartbeat message fail: %v", msg.RegionId, err) + log.Error("send heartbeat message fail", + zap.Uint64("region-id", msg.RegionId), zap.Error(err)) delete(s.streams, storeID) regionHeartbeatCounter.WithLabelValues(storeLabel, "push", "err").Inc() } else { regionHeartbeatCounter.WithLabelValues(storeLabel, "push", "ok").Inc() } } else { - log.Debugf("[region %v] heartbeat stream not found for store %v, skip send message", msg.RegionId, storeID) + log.Debug("heartbeat stream not found, skip send message", zap.Uint64("region-id", msg.RegionId), zap.Uint64("store-id", storeID)) regionHeartbeatCounter.WithLabelValues(storeLabel, "push", "skip").Inc() } case <-keepAliveTicker.C: for storeID, stream := range s.streams { storeLabel := strconv.FormatUint(storeID, 10) if err := stream.Send(keepAlive); err != nil { - log.Errorf("[store %v] send keepalive message fail: %v", storeID, err) + log.Error("send keepalive message fail", + zap.Uint64("target-store-id", storeID), + zap.Error(err)) delete(s.streams, storeID) regionHeartbeatCounter.WithLabelValues(storeLabel, "keepalive", "err").Inc() } else { diff --git a/server/id.go b/server/id.go index 17b3d3d8ec0..3c6f7b4ff17 100644 --- a/server/id.go +++ b/server/id.go @@ -17,8 +17,9 @@ import ( "sync" "github.com/coreos/etcd/clientv3" + log "github.com/pingcap/log" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -87,7 +88,7 @@ func (alloc *idAllocator) generate() (uint64, error) { return 0, errors.New("generate id failed, we may not leader") } - log.Infof("idAllocator allocates a new id: %d", end) + log.Info("idAllocator allocates a new id", zap.Uint64("alloc-id", end)) metadataGauge.WithLabelValues("idalloc").Set(float64(end)) return end, nil } diff --git a/server/join.go b/server/join.go index 3de0d21c94d..57eebf4f141 100644 --- a/server/join.go +++ b/server/join.go @@ -22,9 +22,10 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/embed" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/etcdutil" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -86,7 +87,7 @@ func PrepareJoinCluster(cfg *Config) error { if _, err := os.Stat(filePath); !os.IsNotExist(err) { s, err := ioutil.ReadFile(filePath) if err != nil { - log.Fatal("read the join config meet error: ", err) + log.Fatal("read the join config meet error", zap.Error(err)) } cfg.InitialCluster = strings.TrimSpace(string(s)) cfg.InitialClusterState = embed.ClusterStateFlagExisting @@ -176,14 +177,14 @@ func PrepareJoinCluster(cfg *Config) error { func isDataExist(d string) bool { dir, err := os.Open(d) if err != nil { - log.Error("failed to open:", err) + log.Error("failed to open directory", zap.Error(err)) return false } defer dir.Close() names, err := dir.Readdirnames(-1) if err != nil { - log.Error("failed to list:", err) + log.Error("failed to list directory", zap.Error(err)) return false } return len(names) != 0 diff --git a/server/leader.go b/server/leader.go index 2f226a1474d..5fbd73d2499 100644 --- a/server/leader.go +++ b/server/leader.go @@ -15,7 +15,6 @@ package server import ( "context" - "fmt" "math/rand" "path" "strings" @@ -24,10 +23,11 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/mvcc/mvccpb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/etcdutil" "github.com/pingcap/pd/pkg/logutil" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // The timeout to wait transfer etcd leader to complete. @@ -75,7 +75,7 @@ func (s *Server) leaderLoop() { for { if s.isClosed() { - log.Infof("server is closed, return leader loop") + log.Info("server is closed, return leader loop") return } @@ -87,7 +87,7 @@ func (s *Server) leaderLoop() { leader, rev, err := getLeader(s.client, s.getLeaderPath()) if err != nil { - log.Errorf("get leader err %v", err) + log.Error("get leader meet error", zap.Error(err)) time.Sleep(200 * time.Millisecond) continue } @@ -95,14 +95,14 @@ func (s *Server) leaderLoop() { if s.isSameLeader(leader) { // oh, we are already leader, we may meet something wrong // in previous campaignLeader. we can delete and campaign again. - log.Warnf("leader is still %s, delete and campaign again", leader) + log.Warn("the leader has not changed, delete and campaign again", zap.Stringer("old-leader", leader)) if err = s.deleteLeaderKey(); err != nil { - log.Errorf("delete leader key err %s", err) + log.Error("delete leader key meet error", zap.Error(err)) time.Sleep(200 * time.Millisecond) continue } } else { - log.Infof("leader is %s, watch it", leader) + log.Info("start watch leader", zap.Stringer("leader", leader)) s.watchLeader(leader, rev) log.Info("leader changed, try to campaign leader") } @@ -110,13 +110,15 @@ func (s *Server) leaderLoop() { etcdLeader := s.GetEtcdLeader() if etcdLeader != s.ID() { - log.Infof("%v is not etcd leader, skip campaign leader and check later", s.Name()) + log.Info("skip campaign leader and check later", + zap.String("server-name", s.Name()), + zap.Uint64("etcd-leader-id", etcdLeader)) time.Sleep(200 * time.Millisecond) continue } if err = s.campaignLeader(); err != nil { - log.Errorf("campaign leader err %s", fmt.Sprintf("%+v", err)) + log.Error("campaign leader meet error", zap.Error(err)) } } } @@ -136,20 +138,22 @@ func (s *Server) etcdLeaderLoop() { } myPriority, err := s.GetMemberLeaderPriority(s.ID()) if err != nil { - log.Errorf("failed to load leader priority: %v", err) + log.Error("failed to load leader priority", zap.Error(err)) break } leaderPriority, err := s.GetMemberLeaderPriority(etcdLeader) if err != nil { - log.Errorf("failed to load leader priority: %v", err) + log.Error("failed to load leader priority", zap.Error(err)) break } if myPriority > leaderPriority { err := s.MoveEtcdLeader(ctx, etcdLeader, s.ID()) if err != nil { - log.Errorf("failed to transfer etcd leader: %v", err) + log.Error("failed to transfer etcd leader", zap.Error(err)) } else { - log.Infof("etcd leader moved from %v to %v", etcdLeader, s.ID()) + log.Info("transfer etcd leader", + zap.Uint64("from", etcdLeader), + zap.Uint64("to", s.ID())) } } case <-ctx.Done(): @@ -200,14 +204,14 @@ func (s *Server) memberInfo() (member *pdpb.Member, marshalStr string) { data, err := leader.Marshal() if err != nil { // can't fail, so panic here. - log.Fatalf("marshal leader %s err %v", leader, err) + log.Fatal("marshal leader meet error", zap.Stringer("leader", leader), zap.Error(err)) } return leader, string(data) } func (s *Server) campaignLeader() error { - log.Infof("start to campaign leader %s", s.Name()) + log.Info("begin to campaign leader", zap.String("campaign-leader-name", s.Name())) lessor := clientv3.NewLease(s.client) defer func() { @@ -221,7 +225,7 @@ func (s *Server) campaignLeader() error { cancel() if cost := time.Since(start); cost > slowRequestTime { - log.Warnf("lessor grants too slow, cost %s", cost) + log.Warn("lessor grants too slow", zap.Duration("cost", cost)) } if err != nil { @@ -249,7 +253,7 @@ func (s *Server) campaignLeader() error { if err != nil { return errors.WithStack(err) } - log.Infof("campaign leader ok %s", s.Name()) + log.Info("campaign leader ok", zap.String("campaign-leader-name", s.Name())) err = s.scheduleOpt.reload(s.kv) if err != nil { @@ -273,8 +277,8 @@ func (s *Server) campaignLeader() error { s.enableLeader() defer s.disableLeader() - log.Infof("cluster version is %s", s.scheduleOpt.loadClusterVersion()) - log.Infof("PD cluster leader %s is ready to serve", s.Name()) + log.Info("load cluster version", zap.Stringer("cluster-version", s.scheduleOpt.loadClusterVersion())) + log.Info("PD cluster leader is ready to serve", zap.String("leader-name", s.Name())) CheckPDVersion(s.scheduleOpt) tsTicker := time.NewTicker(updateTimestampStep) @@ -294,7 +298,7 @@ func (s *Server) campaignLeader() error { } etcdLeader := s.GetEtcdLeader() if etcdLeader != s.ID() { - log.Infof("etcd leader changed, %s resigns leadership", s.Name()) + log.Info("etcd leader changed, resigns leadership", zap.String("old-leader-name", s.Name())) return nil } case <-ctx.Done(): @@ -322,12 +326,14 @@ func (s *Server) watchLeader(leader *pdpb.Member, revision int64) { for wresp := range rch { // meet compacted error, use the compact revision. if wresp.CompactRevision != 0 { - log.Warnf("required revision %d has been compacted, use the compact revision %d", revision, wresp.CompactRevision) + log.Warn("required revision has been compacted, use the compact revision", + zap.Int64("required-revision", revision), + zap.Int64("compact-revision", wresp.CompactRevision)) revision = wresp.CompactRevision break } if wresp.Canceled { - log.Errorf("leader watcher is canceled with revision: %d, error: %s", revision, wresp.Err()) + log.Error("leader watcher is canceled with", zap.Int64("revision", revision), zap.Error(wresp.Err())) return } @@ -351,7 +357,7 @@ func (s *Server) watchLeader(leader *pdpb.Member, revision int64) { // ResignLeader resigns current PD's leadership. If nextLeader is empty, all // other pd-servers can campaign. func (s *Server) ResignLeader(nextLeader string) error { - log.Infof("%s tries to resign leader with next leader directive: %v", s.Name(), nextLeader) + log.Info("try to resign leader to next leader", zap.String("from", s.Name()), zap.String("to", nextLeader)) // Determine next leaders. var leaderIDs []uint64 res, err := etcdutil.ListEtcdMembers(s.client) @@ -367,7 +373,7 @@ func (s *Server) ResignLeader(nextLeader string) error { return errors.New("no valid pd to transfer leader") } nextLeaderID := leaderIDs[rand.Intn(len(leaderIDs))] - log.Infof("%s ready to resign leader, next leader: %v", s.Name(), nextLeaderID) + log.Info("ready to resign leader", zap.String("name", s.Name()), zap.Uint64("next-id", nextLeaderID)) err = s.MoveEtcdLeader(s.serverLoopCtx, s.ID(), nextLeaderID) return errors.WithStack(err) } diff --git a/server/schedule/filters.go b/server/schedule/filters.go index 3cad8bb2460..d8d75f2887e 100644 --- a/server/schedule/filters.go +++ b/server/schedule/filters.go @@ -19,7 +19,6 @@ import ( "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" ) //revive:disable:unused-parameter @@ -38,7 +37,6 @@ func FilterSource(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("store%d", store.GetId()) for _, filter := range filters { if filter.FilterSource(opt, store) { - log.Debugf("[filter %T] filters store %v from source", filter, store) filterCounter.WithLabelValues("filter-source", storeID, filter.Type()).Inc() return true } @@ -51,7 +49,6 @@ func FilterTarget(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("store%d", store.GetId()) for _, filter := range filters { if filter.FilterTarget(opt, store) { - log.Debugf("[filter %T] filters store %v from target", filter, store) filterCounter.WithLabelValues("filter-target", storeID, filter.Type()).Inc() return true } diff --git a/server/schedule/merge_checker.go b/server/schedule/merge_checker.go index 46e5a621a98..f593eba6527 100644 --- a/server/schedule/merge_checker.go +++ b/server/schedule/merge_checker.go @@ -16,10 +16,11 @@ package schedule import ( "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // As region split history is not persisted. We put a special marker into @@ -109,7 +110,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) (*Operator, *Operator) { } checkerCounter.WithLabelValues("merge_checker", "new_operator").Inc() - log.Debugf("try to merge region {%v} into region {%v}", region, target) + log.Debug("try to merge region", zap.Stringer("from", core.RegionToHexMeta(region.GetMeta())), zap.Stringer("to", core.RegionToHexMeta(target.GetMeta()))) op1, op2, err := CreateMergeRegionOperator("merge-region", m.cluster, region, target, OpMerge) if err != nil { return nil, nil diff --git a/server/schedule/mockcluster.go b/server/schedule/mockcluster.go index 7f1fb08e66c..95ea98d7470 100644 --- a/server/schedule/mockcluster.go +++ b/server/schedule/mockcluster.go @@ -19,9 +19,10 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // MockCluster is used to mock clusterInfo for test use @@ -74,7 +75,7 @@ func (mc *MockCluster) RandHotRegionFromStore(store uint64, kind FlowKind) *core func (mc *MockCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { peerID, err := mc.allocID() if err != nil { - log.Errorf("failed to alloc peer: %v", err) + log.Error("failed to alloc peer", zap.Error(err)) return nil, err } peer := &metapb.Peer{ diff --git a/server/schedule/namespace_checker.go b/server/schedule/namespace_checker.go index 9a87ef0fb65..f2af08378d7 100644 --- a/server/schedule/namespace_checker.go +++ b/server/schedule/namespace_checker.go @@ -15,9 +15,10 @@ package schedule import ( "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // NamespaceChecker ensures region to go to the right place. @@ -63,7 +64,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator { if n.isExists(targetStores, peer.StoreId) { continue } - log.Debugf("[region %d] peer %v is not located in namespace target stores", region.GetID(), peer) + log.Debug("peer is not located in namespace target stores", zap.Uint64("region-id", region.GetID()), zap.Reflect("peer", peer)) newPeer := n.SelectBestPeerToRelocate(region, targetStores) if newPeer == nil { checkerCounter.WithLabelValues("namespace_checker", "no_target_peer").Inc() @@ -86,7 +87,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator { func (n *NamespaceChecker) SelectBestPeerToRelocate(region *core.RegionInfo, targets []*core.StoreInfo) *metapb.Peer { storeID := n.SelectBestStoreToRelocate(region, targets) if storeID == 0 { - log.Debugf("[region %d] has no best store to relocate", region.GetID()) + log.Debug("has no best store to relocate", zap.Uint64("region-id", region.GetID())) return nil } newPeer, err := n.cluster.AllocPeer(storeID) diff --git a/server/schedule/operator.go b/server/schedule/operator.go index 92b76ea43ec..861e2118d1e 100644 --- a/server/schedule/operator.go +++ b/server/schedule/operator.go @@ -23,9 +23,9 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - log "github.com/sirupsen/logrus" - + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" + "go.uber.org/zap" ) const ( @@ -82,7 +82,7 @@ func (ap AddPeer) String() string { func (ap AddPeer) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreVoter(ap.ToStore); p != nil { if p.GetId() != ap.PeerID { - log.Warnf("expect %v, but obtain voter %v", ap.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", ap.String()), zap.Uint64("obtain-voter", p.GetId())) return false } return region.GetPendingVoter(p.GetId()) == nil @@ -111,7 +111,7 @@ func (al AddLearner) String() string { func (al AddLearner) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreLearner(al.ToStore); p != nil { if p.GetId() != al.PeerID { - log.Warnf("expect %v, but obtain learner %v", al.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", al.String()), zap.Uint64("obtain-learner", p.GetId())) return false } return region.GetPendingLearner(p.GetId()) == nil @@ -140,7 +140,7 @@ func (pl PromoteLearner) String() string { func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreVoter(pl.ToStore); p != nil { if p.GetId() != pl.PeerID { - log.Warnf("expect %v, but obtain voter %v", pl.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", pl.String()), zap.Uint64("obtain-voter", p.GetId())) } return p.GetId() == pl.PeerID } @@ -477,7 +477,7 @@ func removePeerSteps(cluster Cluster, region *core.RegionInfo, storeID uint64, f } if len(steps) == 0 { err = errors.New("no suitable follower to become region leader") - log.Debugf("fail to create remove peer operator, region: %v, err: %v", region.GetID(), err) + log.Debug("fail to create remove peer operator", zap.Uint64("region-id", region.GetID()), zap.Error(err)) return } } @@ -540,7 +540,7 @@ func matchPeerSteps(cluster Cluster, source *core.RegionInfo, target *core.Regio peer, err := cluster.AllocPeer(storeID) if err != nil { - log.Debugf("peer alloc failed: %v", err) + log.Debug("peer alloc failed", zap.Error(err)) return nil, kind, err } if cluster.IsRaftLearnerEnabled() { diff --git a/server/schedule/replica_checker.go b/server/schedule/replica_checker.go index ddb198d05c9..98ce82a7eed 100644 --- a/server/schedule/replica_checker.go +++ b/server/schedule/replica_checker.go @@ -17,9 +17,10 @@ import ( "fmt" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // ReplicaChecker ensures region has the best replicas. @@ -58,7 +59,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { } if len(region.GetPeers()) < r.cluster.GetMaxReplicas() && r.cluster.IsMakeUpReplicaEnabled() { - log.Debugf("[region %d] has %d peers fewer than max replicas", region.GetID(), len(region.GetPeers())) + log.Debug("region has fewer than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) newPeer, _ := r.selectBestPeerToAddReplica(region, NewStorageThresholdFilter()) if newPeer == nil { checkerCounter.WithLabelValues("replica_checker", "no_target_store").Inc() @@ -82,7 +83,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { // when add learner peer, the number of peer will exceed max replicas for a while, // just comparing the the number of voters to avoid too many cancel add operator log. if len(region.GetVoters()) > r.cluster.GetMaxReplicas() && r.cluster.IsRemoveExtraReplicaEnabled() { - log.Debugf("[region %d] has %d peers more than max replicas", region.GetID(), len(region.GetPeers())) + log.Debug("region has more than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) oldPeer, _ := r.selectWorstPeer(region) if oldPeer == nil { checkerCounter.WithLabelValues("replica_checker", "no_worst_peer").Inc() @@ -111,7 +112,7 @@ func (r *ReplicaChecker) SelectBestReplacementStore(region *core.RegionInfo, old func (r *ReplicaChecker) selectBestPeerToAddReplica(region *core.RegionInfo, filters ...Filter) (*metapb.Peer, float64) { storeID, score := r.selectBestStoreToAddReplica(region, filters...) if storeID == 0 { - log.Debugf("[region %d] no best store to add replica", region.GetID()) + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) return nil, 0 } newPeer, err := r.cluster.AllocPeer(storeID) @@ -149,7 +150,7 @@ func (r *ReplicaChecker) selectWorstPeer(region *core.RegionInfo) (*metapb.Peer, selector := NewReplicaSelector(regionStores, r.cluster.GetLocationLabels(), r.filters...) worstStore := selector.SelectSource(r.cluster, regionStores) if worstStore == nil { - log.Debugf("[region %d] no worst store", region.GetID()) + log.Debug("no worst store", zap.Uint64("region-id", region.GetID())) return nil, 0 } return region.GetStorePeer(worstStore.GetId()), DistinctScore(r.cluster.GetLocationLabels(), regionStores, worstStore) @@ -167,7 +168,7 @@ func (r *ReplicaChecker) checkDownPeer(region *core.RegionInfo) *Operator { } store := r.cluster.GetStore(peer.GetStoreId()) if store == nil { - log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId()) + log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId())) return nil } if store.DownTime() < r.cluster.GetMaxStoreDownTime() { @@ -195,7 +196,7 @@ func (r *ReplicaChecker) checkOfflinePeer(region *core.RegionInfo) *Operator { for _, peer := range region.GetPeers() { store := r.cluster.GetStore(peer.GetStoreId()) if store == nil { - log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId()) + log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId())) return nil } if store.IsUp() { @@ -225,7 +226,7 @@ func (r *ReplicaChecker) checkBestReplacement(region *core.RegionInfo) *Operator } // Make sure the new peer is better than the old peer. if newScore <= oldScore { - log.Debugf("[region %d] newScore %f is not better than oldScore %f", region.GetID(), newScore, oldScore) + log.Debug("no better peer", zap.Uint64("region-id", region.GetID()), zap.Float64("new-score", newScore), zap.Float64("old-score", oldScore)) checkerCounter.WithLabelValues("replica_checker", "not_better").Inc() return nil } @@ -270,7 +271,7 @@ func (r *ReplicaChecker) fixPeer(region *core.RegionInfo, peer *metapb.Peer, sta storeID, _ := r.SelectBestReplacementStore(region, peer, NewStorageThresholdFilter()) if storeID == 0 { - log.Debugf("[region %d] no best store to add replica", region.GetID()) + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) return nil } newPeer, err := r.cluster.AllocPeer(storeID) diff --git a/server/schedule/scheduler.go b/server/schedule/scheduler.go index 24bc34ff47a..50800afa700 100644 --- a/server/schedule/scheduler.go +++ b/server/schedule/scheduler.go @@ -18,9 +18,10 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // Cluster provides an overview of a cluster's regions distribution. @@ -77,7 +78,7 @@ var schedulerMap = make(map[string]CreateSchedulerFunc) // func of a package. func RegisterScheduler(name string, createFn CreateSchedulerFunc) { if _, ok := schedulerMap[name]; ok { - log.Fatalf("duplicated scheduler name: %v", name) + log.Fatal("duplicated scheduler", zap.String("name", name)) } schedulerMap[name] = createFn } diff --git a/server/schedulers/adjacent_region.go b/server/schedulers/adjacent_region.go index 660de6ae4cf..0636873eb21 100644 --- a/server/schedulers/adjacent_region.go +++ b/server/schedulers/adjacent_region.go @@ -18,10 +18,11 @@ import ( "strconv" "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -194,7 +195,7 @@ func (l *balanceAdjacentRegionScheduler) process(cluster schedule.Cluster) []*sc defer func() { if l.cacheRegions.len() < 0 { - log.Fatalf("[%s]the cache overflow should never happen", l.GetName()) + log.Fatal("cache overflow", zap.String("scheduler", l.GetName())) } l.cacheRegions.head = head + 1 l.lastKey = r2.GetStartKey() diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index 566f8719279..09f02b9e662 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -17,10 +17,11 @@ import ( "fmt" "strconv" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -87,7 +88,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster, opInfluence return nil } - log.Debugf("[%s] store%d has the max leader score, store%d has the min leader score", l.GetName(), source.GetId(), target.GetId()) + log.Debug("store leader score", zap.String("scheduler", l.GetName()), zap.Uint64("max-store", source.GetId()), zap.Uint64("min-store", target.GetId())) sourceStoreLabel := strconv.FormatUint(source.GetId(), 10) targetStoreLabel := strconv.FormatUint(target.GetId(), 10) balanceLeaderCounter.WithLabelValues("high_score", sourceStoreLabel).Inc() @@ -105,7 +106,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster, opInfluence } // If no operator can be created for the selected stores, ignore them for a while. - log.Debugf("[%s] no operator created for selected store%d and store%d", l.GetName(), source.GetId(), target.GetId()) + log.Debug("no operator created for selected stores", zap.String("scheduler", l.GetName()), zap.Uint64("source", source.GetId()), zap.Uint64("target", target.GetId())) balanceLeaderCounter.WithLabelValues("add_taint", strconv.FormatUint(source.GetId(), 10)).Inc() l.taintStores.Put(source.GetId()) balanceLeaderCounter.WithLabelValues("add_taint", strconv.FormatUint(target.GetId(), 10)).Inc() @@ -116,13 +117,13 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster, opInfluence func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { region := cluster.RandLeaderRegion(source.GetId(), core.HealthRegion()) if region == nil { - log.Debugf("[%s] store%d has no leader", l.GetName(), source.GetId()) + log.Debug("store has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", source.GetId())) schedulerCounter.WithLabelValues(l.GetName(), "no_leader_region").Inc() return nil } target := l.selector.SelectTarget(cluster, cluster.GetFollowerStores(region)) if target == nil { - log.Debugf("[%s] region %d has no target store", l.GetName(), region.GetID()) + log.Debug("region has no target store", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_target_store").Inc() return nil } @@ -132,13 +133,13 @@ func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, clust func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { region := cluster.RandFollowerRegion(target.GetId(), core.HealthRegion()) if region == nil { - log.Debugf("[%s] store%d has no follower", l.GetName(), target.GetId()) + log.Debug("store has no follower", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", target.GetId())) schedulerCounter.WithLabelValues(l.GetName(), "no_follower_region").Inc() return nil } source := cluster.GetStore(region.GetLeader().GetStoreId()) if source == nil { - log.Debugf("[%s] region %d has no leader", l.GetName(), region.GetID()) + log.Debug("region has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_leader").Inc() return nil } @@ -147,17 +148,19 @@ func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluste func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, target *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { if cluster.IsRegionHot(region.GetID()) { - log.Debugf("[%s] region %d is hot region, ignore it", l.GetName(), region.GetID()) + log.Debug("region is hot region, ignore it", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "region_hot").Inc() return nil } if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) { - log.Debugf("[%s] skip balance region %d, source %d to target %d, source size: %v, source score: %v, source influence: %v, target size: %v, target score: %v, target influence: %v, average region size: %v", - l.GetName(), region.GetID(), source.GetId(), target.GetId(), - source.LeaderSize, source.LeaderScore(0), opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.LeaderKind), - target.LeaderSize, target.LeaderScore(0), opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.LeaderKind), - cluster.GetAverageRegionSize()) + log.Debug("skip balance region", + zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetId()), zap.Uint64("target-store", target.GetId()), + zap.Int64("source-size", source.LeaderSize), zap.Float64("source-score", source.LeaderScore(0)), + zap.Int64("source-influence", opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.LeaderKind)), + zap.Int64("target-size", target.LeaderSize), zap.Float64("target-score", target.LeaderScore(0)), + zap.Int64("target-influence", opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.LeaderKind)), + zap.Int64("average-region-size", cluster.GetAverageRegionSize())) schedulerCounter.WithLabelValues(l.GetName(), "skip").Inc() return nil } diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 22aa1a6e49d..99bebe78b53 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -18,10 +18,11 @@ import ( "strconv" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -82,8 +83,8 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster, opInfluence return nil } - log.Debugf("[%s] store%d has the max region score", s.GetName(), source.GetId()) sourceLabel := strconv.FormatUint(source.GetId(), 10) + log.Debug("store has the max region score", zap.String("scheduler", s.GetName()), zap.Uint64("store-id", source.GetId())) balanceRegionCounter.WithLabelValues("source_store", sourceLabel).Inc() var hasPotentialTarget bool @@ -96,18 +97,18 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster, opInfluence schedulerCounter.WithLabelValues(s.GetName(), "no_region").Inc() continue } - log.Debugf("[%s] select region%d", s.GetName(), region.GetID()) + log.Debug("select region", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) // We don't schedule region with abnormal number of replicas. if len(region.GetPeers()) != cluster.GetMaxReplicas() { - log.Debugf("[%s] region%d has abnormal replica count", s.GetName(), region.GetID()) + log.Debug("region has abnormal replica count", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "abnormal_replica").Inc() continue } // Skip hot regions. if cluster.IsRegionHot(region.GetID()) { - log.Debugf("[%s] region%d is hot", s.GetName(), region.GetID()) + log.Debug("region is hot", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "region_hot").Inc() continue } @@ -126,7 +127,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster, opInfluence if !hasPotentialTarget { // If no potential target store can be found for the selected store, ignore it for a while. - log.Debugf("[%s] no operator created for selected store%d", s.GetName(), source.GetId()) + log.Debug("no operator created for selected store", zap.String("scheduler", s.GetName()), zap.Uint64("store-id", source.GetId())) balanceRegionCounter.WithLabelValues("add_taint", sourceLabel).Inc() s.taintStores.Put(source.GetId()) } @@ -148,16 +149,16 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * } target := cluster.GetStore(storeID) - log.Debugf("[region %d] source store id is %v, target store id is %v", region.GetID(), source.GetId(), target.GetId()) + log.Debug("", zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetId()), zap.Uint64("target-store", target.GetId())) if !shouldBalance(cluster, source, target, region, core.RegionKind, opInfluence) { - log.Debugf("[%s] skip balance region %d, source %d to target %d ,source size: %v, source score: %v, source influence: %v, target size: %v, target score: %v, target influence: %v, average region size: %v", - s.GetName(), region.GetID(), source.GetId(), target.GetId(), - source.RegionSize, source.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0), - opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.RegionKind), - target.RegionSize, target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0), - opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.RegionKind), - cluster.GetAverageRegionSize()) + log.Debug("skip balance region", + zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetId()), zap.Uint64("target-store", target.GetId()), + zap.Int64("source-size", source.RegionSize), zap.Float64("source-score", source.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0)), + zap.Int64("source-influence", opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.RegionKind)), + zap.Int64("target-size", target.RegionSize), zap.Float64("target-score", target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0)), + zap.Int64("target-influence", opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.RegionKind)), + zap.Int64("average-region-size", cluster.GetAverageRegionSize())) schedulerCounter.WithLabelValues(s.GetName(), "skip").Inc() return nil } diff --git a/server/schedulers/base_scheduler.go b/server/schedulers/base_scheduler.go index 2352c7b4488..5ab355ca8b0 100644 --- a/server/schedulers/base_scheduler.go +++ b/server/schedulers/base_scheduler.go @@ -16,8 +16,8 @@ package schedulers import ( "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" ) // options for interval of schedulers @@ -46,7 +46,7 @@ func intervalGrow(x time.Duration, maxInterval time.Duration, typ intervalGrowth case zeroGrowth: return x default: - log.Fatal("unKnow interval growth type") + log.Fatal("unknown interval growth type") } return 0 } diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index 5e2abb73e8f..5df826601ee 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -20,9 +20,10 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -324,7 +325,7 @@ func (h *balanceHotRegionsScheduler) balanceByPeer(cluster schedule.Cluster, sto // because it doesn't exist in the system right now. destPeer, err := cluster.AllocPeer(destStoreID) if err != nil { - log.Errorf("failed to allocate peer: %v", err) + log.Error("failed to allocate peer", zap.Error(err)) return nil, nil, nil } diff --git a/server/schedulers/label.go b/server/schedulers/label.go index d0d8c913327..e06d53f437a 100644 --- a/server/schedulers/label.go +++ b/server/schedulers/label.go @@ -14,9 +14,10 @@ package schedulers import ( + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -63,10 +64,10 @@ func (s *labelScheduler) Schedule(cluster schedule.Cluster, opInfluence schedule schedulerCounter.WithLabelValues(s.GetName(), "skip").Inc() return nil } - log.Debugf("label scheduler reject leader store list: %v", rejectLeaderStores) + log.Debug("label scheduler reject leader store list", zap.Reflect("stores", rejectLeaderStores)) for id := range rejectLeaderStores { if region := cluster.RandLeaderRegion(id); region != nil { - log.Debugf("label scheduler selects region %d to transfer leader", region.GetID()) + log.Debug("label scheduler selects region to transfer leader", zap.Uint64("region-id", region.GetID())) excludeStores := make(map[uint64]struct{}) for _, p := range region.GetDownPeers() { excludeStores[p.GetPeer().GetStoreId()] = struct{}{} @@ -77,7 +78,7 @@ func (s *labelScheduler) Schedule(cluster schedule.Cluster, opInfluence schedule filter := schedule.NewExcludedFilter(nil, excludeStores) target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region), filter) if target == nil { - log.Debugf("label scheduler no target found for region %d", region.GetID()) + log.Debug("label scheduler no target found for region", zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "no_target").Inc() continue } diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index 07030081c17..e24271f935b 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -15,9 +15,10 @@ package schedulers import ( "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -110,7 +111,7 @@ func (s *shuffleRegionScheduler) scheduleAddPeer(cluster schedule.Cluster, filte newPeer, err := cluster.AllocPeer(target.GetId()) if err != nil { - log.Errorf("failed to allocate peer: %v", err) + log.Error("failed to allocate peer", zap.Error(err)) return nil } diff --git a/server/server.go b/server/server.go index da74b250e6f..98441c43dcd 100644 --- a/server/server.go +++ b/server/server.go @@ -31,12 +31,13 @@ import ( "github.com/coreos/go-semver/semver" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/etcdutil" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" "google.golang.org/grpc" ) @@ -97,11 +98,14 @@ type Server struct { lastSavedTime time.Time // For async region heartbeat. hbStreams *heartbeatStreams + // Zap logger + lg *zap.Logger + logProps *log.ZapProperties } // CreateServer creates the UNINITIALIZED pd server with given configuration. func CreateServer(cfg *Config, apiRegister func(*Server) http.Handler) (*Server, error) { - log.Infof("PD config - %v", cfg) + log.Info("PD Config", zap.Reflect("config", cfg)) rand.Seed(time.Now().UnixNano()) s := &Server{ @@ -124,11 +128,14 @@ func CreateServer(cfg *Config, apiRegister func(*Server) http.Handler) (*Server, s.etcdCfg = etcdCfg if EnableZap { // The etcd master version has removed embed.Config.SetupLogging. - // Now logger is set up automatically based on embed.Config.Logger, embed.Config.LogOutputs, embed.Config.Debug fields. - // Use zap logger in the test, otherwise will panic. Reference: https://github.com/coreos/etcd/blob/master/embed/config_logging.go#L45 + // Now logger is set up automatically based on embed.Config.Logger, + // Use zap logger in the test, otherwise will panic. + // Reference: https://github.com/coreos/etcd/blob/master/embed/config_logging.go#L45 s.etcdCfg.Logger = "zap" s.etcdCfg.LogOutputs = []string{"stdout"} } + s.lg = cfg.logger + s.logProps = cfg.logProps return s, nil } @@ -163,7 +170,7 @@ func (s *Server) startEtcd(ctx context.Context) error { } endpoints := []string{s.etcdCfg.ACUrls[0].String()} - log.Infof("create etcd v3 client with endpoints %v", endpoints) + log.Info("create etcd v3 client", zap.Strings("endpoints", endpoints)) client, err := clientv3.New(clientv3.Config{ Endpoints: endpoints, @@ -185,7 +192,7 @@ func (s *Server) startEtcd(ctx context.Context) error { if etcdServerID == m.ID { etcdPeerURLs := strings.Join(m.PeerURLs, ",") if s.cfg.AdvertisePeerUrls != etcdPeerURLs { - log.Infof("update advertise peer urls from %s to %s", s.cfg.AdvertisePeerUrls, etcdPeerURLs) + log.Info("update advertise peer urls", zap.String("from", s.cfg.AdvertisePeerUrls), zap.String("to", etcdPeerURLs)) s.cfg.AdvertisePeerUrls = etcdPeerURLs } } @@ -202,7 +209,7 @@ func (s *Server) startServer() error { if err = s.initClusterID(); err != nil { return err } - log.Infof("init cluster id %v", s.clusterID) + log.Info("init cluster id", zap.Uint64("cluster-id", s.clusterID)) // It may lose accuracy if use float64 to store uint64. So we store the // cluster id in label. metadataGauge.WithLabelValues(fmt.Sprintf("cluster%d", s.clusterID)).Set(0) @@ -277,7 +284,7 @@ var timeMonitorOnce sync.Once func (s *Server) Run(ctx context.Context) error { timeMonitorOnce.Do(func() { go StartMonitor(time.Now, func() { - log.Errorf("system time jumps backward") + log.Error("system time jumps backward") timeJumpBackCounter.Inc() }) }) @@ -334,7 +341,9 @@ func (s *Server) collectEtcdStateMetrics() { func (s *Server) bootstrapCluster(req *pdpb.BootstrapRequest) (*pdpb.BootstrapResponse, error) { clusterID := s.clusterID - log.Infof("try to bootstrap raft cluster %d with %v", clusterID, req) + log.Info("try to bootstrap raft cluster", + zap.Uint64("cluster-id", clusterID), + zap.String("request", fmt.Sprintf("%v", req))) if err := checkBootstrapRequest(clusterID, req); err != nil { return nil, err @@ -387,11 +396,11 @@ func (s *Server) bootstrapCluster(req *pdpb.BootstrapRequest) (*pdpb.BootstrapRe return nil, errors.WithStack(err) } if !resp.Succeeded { - log.Warnf("cluster %d already bootstrapped", clusterID) + log.Warn("cluster already bootstrapped", zap.Uint64("cluster-id", clusterID)) return nil, errors.Errorf("cluster %d already bootstrapped", clusterID) } - log.Infof("bootstrap cluster %d ok", clusterID) + log.Info("bootstrap cluster ok", zap.Uint64("cluster-id", clusterID)) if err := s.cluster.start(); err != nil { return nil, err @@ -491,7 +500,7 @@ func (s *Server) SetScheduleConfig(cfg ScheduleConfig) error { if err := s.scheduleOpt.persist(s.kv); err != nil { return err } - log.Infof("schedule config is updated: %+v, old: %+v", cfg, old) + log.Info("schedule config is updated", zap.Reflect("new", cfg), zap.Reflect("old", old)) return nil } @@ -509,11 +518,10 @@ func (s *Server) SetReplicationConfig(cfg ReplicationConfig) error { } old := s.scheduleOpt.rep.load() s.scheduleOpt.rep.store(&cfg) - s.scheduleOpt.persist(s.kv) if err := s.scheduleOpt.persist(s.kv); err != nil { return err } - log.Infof("replication config is updated: %+v, old: %+v", cfg, old) + log.Info("replication config is updated", zap.Reflect("new", cfg), zap.Reflect("old", old)) return nil } @@ -548,11 +556,11 @@ func (s *Server) SetNamespaceConfig(name string, cfg NamespaceConfig) { old := s.scheduleOpt.ns[name].load() n.store(&cfg) s.scheduleOpt.persist(s.kv) - log.Infof("namespace:%v config is updated: %+v, old: %+v", name, cfg, old) + log.Info("namespace config is updated", zap.String("name", name), zap.Reflect("new", cfg), zap.Reflect("old", old)) } else { s.scheduleOpt.ns[name] = newNamespaceOption(&cfg) s.scheduleOpt.persist(s.kv) - log.Infof("namespace:%v config is added: %+v", name, cfg) + log.Info("namespace config is added", zap.String("name", name), zap.Reflect("new", cfg)) } } @@ -562,7 +570,7 @@ func (s *Server) DeleteNamespaceConfig(name string) { cfg := n.load() delete(s.scheduleOpt.ns, name) s.scheduleOpt.persist(s.kv) - log.Infof("namespace:%v config is deleted: %+v", name, *cfg) + log.Info("namespace config is deleted", zap.String("name", name), zap.Reflect("config", *cfg)) } } @@ -573,7 +581,7 @@ func (s *Server) SetLabelProperty(typ, labelKey, labelValue string) error { if err != nil { return err } - log.Infof("label property config is updated: %+v", s.scheduleOpt.loadLabelPropertyConfig()) + log.Info("label property config is updated", zap.Reflect("config", s.scheduleOpt.loadLabelPropertyConfig())) return nil } @@ -584,7 +592,7 @@ func (s *Server) DeleteLabelProperty(typ, labelKey, labelValue string) error { if err != nil { return err } - log.Infof("label property config is updated: %+v", s.scheduleOpt.loadLabelPropertyConfig()) + log.Info("label property config is deleted", zap.Reflect("config", s.scheduleOpt.loadLabelPropertyConfig())) return nil } @@ -604,7 +612,7 @@ func (s *Server) SetClusterVersion(v string) error { if err != nil { return err } - log.Infof("cluster version is updated to %s", v) + log.Info("cluster version is updated", zap.String("new-version", v)) return nil } @@ -705,6 +713,8 @@ func (s *Server) GetMemberLeaderPriority(id uint64) (int, error) { // SetLogLevel sets log level. func (s *Server) SetLogLevel(level string) { s.cfg.Log.Level = level + log.SetLevel(logutil.StringToZapLogLevel(level)) + log.Warn("log level changed", zap.String("level", log.GetLevel().String())) } var healthURL = "/pd/ping" diff --git a/server/systime_mon.go b/server/systime_mon.go index d1e8fd09085..f8664be8aac 100644 --- a/server/systime_mon.go +++ b/server/systime_mon.go @@ -16,7 +16,8 @@ package server import ( "time" - log "github.com/sirupsen/logrus" + log "github.com/pingcap/log" + "go.uber.org/zap" ) // StartMonitor calls systimeErrHandler if system time jump backward. @@ -28,7 +29,7 @@ func StartMonitor(now func() time.Time, systimeErrHandler func()) { last := now().UnixNano() <-tick.C if now().UnixNano() < last { - log.Errorf("system time jump backward, last:%v", last) + log.Error("system time jump backward", zap.Int64("last", last)) systimeErrHandler() } } diff --git a/server/testutil.go b/server/testutil.go index 0596c58c4d3..17fdb57a61b 100644 --- a/server/testutil.go +++ b/server/testutil.go @@ -19,12 +19,15 @@ import ( "io/ioutil" "os" "strings" + "sync" "time" "github.com/coreos/etcd/embed" + "github.com/pingcap/log" "github.com/pingcap/pd/pkg/tempurl" "github.com/pingcap/pd/pkg/typeutil" "github.com/pingcap/pd/server/schedule" + "go.uber.org/zap" // Register namespace classifiers. _ "github.com/pingcap/pd/table" @@ -56,6 +59,8 @@ func NewTestServer() (*Config, *Server, CleanupFunc, error) { return cfg, s, cleanup, nil } +var zapLogOnce sync.Once + // NewTestSingleConfig is only for test to create one pd. // Because PD client also needs this, so export here. func NewTestSingleConfig() *Config { @@ -78,6 +83,13 @@ func NewTestSingleConfig() *Config { cfg.TickInterval = typeutil.NewDuration(100 * time.Millisecond) cfg.ElectionInterval = typeutil.NewDuration(3000 * time.Millisecond) cfg.leaderPriorityCheckInterval = typeutil.NewDuration(100 * time.Millisecond) + err := cfg.SetupLogger() + if err != nil { + log.Fatal("setup logger failed", zap.Error(err)) + } + zapLogOnce.Do(func() { + log.ReplaceGlobals(cfg.GetZapLogger(), cfg.GetZapLogProperties()) + }) cfg.Adjust(nil) diff --git a/server/tso.go b/server/tso.go index 73259bfdf37..f5f60658cb5 100644 --- a/server/tso.go +++ b/server/tso.go @@ -20,8 +20,9 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -91,7 +92,7 @@ func (s *Server) syncTimestamp() error { // If the current system time minus the saved etcd timestamp is less than `updateTimestampGuard`, // the timestamp allocation will start from the saved etcd timestamp temporarily. if subTimeByWallClock(next, last) < updateTimestampGuard { - log.Errorf("system time may be incorrect: last: %v next %v", last, next) + log.Error("system time may be incorrect", zap.Time("last", last), zap.Time("next", next)) next = last.Add(updateTimestampGuard) } @@ -101,7 +102,7 @@ func (s *Server) syncTimestamp() error { } tsoCounter.WithLabelValues("sync_ok").Inc() - log.Infof("sync and save timestamp: last %v save %v next %v", last, save, next) + log.Info("sync and save timestamp", zap.Time("last", last), zap.Time("save", save), zap.Time("next", next)) current := &atomicObject{ physical: next, @@ -134,7 +135,7 @@ func (s *Server) updateTimestamp() error { jetLag := subTimeByWallClock(now, prev.physical) if jetLag > 3*updateTimestampStep { - log.Warnf("clock offset: %v, prev: %v, now: %v", jetLag, prev.physical, now) + log.Warn("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now)) tsoCounter.WithLabelValues("slow_save").Inc() } @@ -150,7 +151,7 @@ func (s *Server) updateTimestamp() error { } else if prevLogical > maxLogical/2 { // The reason choosing maxLogical/2 here is that it's big enough for common cases. // Because there is enough timestamp can be allocated before next update. - log.Warnf("the logical time may be not enough, prevLogical: %v", prevLogical) + log.Warn("the logical time may be not enough", zap.Int64("prev-logical", prevLogical)) next = prev.physical.Add(time.Millisecond) } else { // It will still use the previous physical time to alloc the timestamp. @@ -185,7 +186,7 @@ func (s *Server) getRespTS(count uint32) (pdpb.Timestamp, error) { for i := 0; i < maxRetryCount; i++ { current, ok := s.ts.Load().(*atomicObject) if !ok || current.physical == zeroTime { - log.Errorf("we haven't synced timestamp ok, wait and retry, retry count %d", i) + log.Error("we haven't synced timestamp ok, wait and retry", zap.Int("retry-count", i)) time.Sleep(200 * time.Millisecond) continue } @@ -193,7 +194,9 @@ func (s *Server) getRespTS(count uint32) (pdpb.Timestamp, error) { resp.Physical = current.physical.UnixNano() / int64(time.Millisecond) resp.Logical = atomic.AddInt64(¤t.logical, int64(count)) if resp.Logical >= maxLogical { - log.Errorf("logical part outside of max logical interval %v, please check ntp time, retry count %d", resp, i) + log.Error("logical part outside of max logical interval, please check ntp time", + zap.Reflect("response", resp), + zap.Int("retry-count", i)) tsoCounter.WithLabelValues("logical_overflow").Inc() time.Sleep(updateTimestampStep) continue diff --git a/server/util.go b/server/util.go index 416cbb5cdcb..138efd5543b 100644 --- a/server/util.go +++ b/server/util.go @@ -26,9 +26,10 @@ import ( "github.com/golang/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/etcdutil" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -55,11 +56,11 @@ var dialClient = &http.Client{ // LogPDInfo prints the PD version information. func LogPDInfo() { - log.Infof("Welcome to Placement Driver (PD).") - log.Infof("Release Version: %s", PDReleaseVersion) - log.Infof("Git Commit Hash: %s", PDGitHash) - log.Infof("Git Branch: %s", PDGitBranch) - log.Infof("UTC Build Time: %s", PDBuildTS) + log.Info("Welcome to Placement Driver (PD)") + log.Info("PD", zap.String("release-version", PDReleaseVersion)) + log.Info("PD", zap.String("git-hash", PDGitHash)) + log.Info("PD", zap.String("git-branch", PDGitBranch)) + log.Info("PD", zap.String("utc-build-time", PDBuildTS)) } // PrintPDInfo prints the PD version information without log info. @@ -78,7 +79,10 @@ func CheckPDVersion(opt *scheduleOption) { } clusterVersion := opt.loadClusterVersion() if pdVersion.LessThan(clusterVersion) { - log.Warnf("PD version %s less than cluster version: %s, please upgrade PD", pdVersion, clusterVersion) + log.Warn( + "PD version less than cluster version, please upgrade PD", + zap.String("PD-version", pdVersion.String()), + zap.String("cluster-version", clusterVersion.String())) } } @@ -213,7 +217,10 @@ func (t *slowLogTxn) Commit() (*clientv3.TxnResponse, error) { cost := time.Since(start) if cost > slowRequestTime { - log.Warnf("txn runs too slow, resp: %v, err: %v, cost: %s", resp, err, cost) + log.Warn("txn runs too slow", + zap.Error(err), + zap.Reflect("response", resp), + zap.Duration("cost", cost)) } label := "success" if err != nil { diff --git a/server/version.go b/server/version.go index 6b853fb88aa..485b3c8647b 100644 --- a/server/version.go +++ b/server/version.go @@ -15,8 +15,9 @@ package server import ( "github.com/coreos/go-semver/semver" + log "github.com/pingcap/log" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // Feature supported features. @@ -53,7 +54,7 @@ var featuresDict = map[Feature]string{ func MinSupportedVersion(v Feature) semver.Version { target, ok := featuresDict[v] if !ok { - log.Fatalf("the corresponding version of the feature %d doesn't exist", v) + log.Fatal("the corresponding version of the feature doesn't exist", zap.Int("feature-number", int(v))) } version := MustParseVersion(target) return *version @@ -76,7 +77,7 @@ func ParseVersion(v string) (*semver.Version, error) { func MustParseVersion(v string) *semver.Version { ver, err := ParseVersion(v) if err != nil { - log.Fatalf("version string is illegal: %s", err) + log.Fatal("version string is illegal", zap.Error(err)) } return ver } diff --git a/table/namespace_classifier.go b/table/namespace_classifier.go index 1527084fe52..694499ef5cf 100644 --- a/table/namespace_classifier.go +++ b/table/namespace_classifier.go @@ -23,10 +23,11 @@ import ( "sync" "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -435,7 +436,9 @@ func (namespaceInfo *namespacesInfo) loadNamespaces(kv *core.KV, rangeLimit int) } if len(res) < rangeLimit { - log.Infof("load %v namespacesInfo cost %v", namespaceInfo.getNamespaceCount(), time.Since(start)) + log.Info("load namespaces information", + zap.Int("namespace-count", namespaceInfo.getNamespaceCount()), + zap.Duration("cost", time.Since(start))) return nil } } diff --git a/tools/pd-ctl/pdctl/command/region_command.go b/tools/pd-ctl/pdctl/command/region_command.go index e30bf484e2b..1f72960408a 100644 --- a/tools/pd-ctl/pdctl/command/region_command.go +++ b/tools/pd-ctl/pdctl/command/region_command.go @@ -23,7 +23,9 @@ import ( "strconv" "strings" + "github.com/pkg/errors" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) var ( @@ -214,27 +216,11 @@ func showRegionWithTableCommandFunc(cmd *cobra.Command, args []string) { fmt.Println(cmd.UsageString()) return } - - var ( - key string - err error - ) - - format := cmd.Flags().Lookup("format").Value.String() - switch format { - case "raw": - key = args[0] - case "encode": - key, err = decodeKey(args[0]) - if err != nil { - fmt.Println("Error: ", err) - return - } - default: - fmt.Println("Error: unknown format") + key, err := parseKey(cmd.Flags(), args[0]) + if err != nil { + fmt.Println("Error: ", err) return } - key = url.QueryEscape(key) prefix := regionKeyPrefix + "/" + key r, err := doRequest(cmd, prefix, http.MethodGet) @@ -245,6 +231,16 @@ func showRegionWithTableCommandFunc(cmd *cobra.Command, args []string) { fmt.Println(r) } +func parseKey(flags *pflag.FlagSet, key string) (string, error) { + switch flags.Lookup("format").Value.String() { + case "raw": + return key, nil + case "encode": + return decodeKey(key) + } + return "", errors.New("unknown format") +} + func decodeKey(text string) (string, error) { var buf []byte r := bytes.NewBuffer([]byte(text)) @@ -252,7 +248,7 @@ func decodeKey(text string) (string, error) { c, err := r.ReadByte() if err != nil { if err != io.EOF { - return "", err + return "", errors.WithStack(err) } break } @@ -278,7 +274,7 @@ func decodeKey(text string) (string, error) { n = append(n, r.Next(2)...) _, err := fmt.Sscanf(string(n), "%03o", &c) if err != nil { - return "", err + return "", errors.WithStack(err) } buf = append(buf, c) } diff --git a/tools/pd-ctl/pdctl/command/scheduler.go b/tools/pd-ctl/pdctl/command/scheduler.go index 8eac33ea044..cac0f0cb50a 100644 --- a/tools/pd-ctl/pdctl/command/scheduler.go +++ b/tools/pd-ctl/pdctl/command/scheduler.go @@ -148,10 +148,11 @@ func addSchedulerCommandFunc(cmd *cobra.Command, args []string) { // NewScatterRangeSchedulerCommand returns a command to add a scatter-range-scheduler. func NewScatterRangeSchedulerCommand() *cobra.Command { c := &cobra.Command{ - Use: "scatter-range ", + Use: "scatter-range [--format=raw|encode|hex] ", Short: "add a scheduler to scatter range", Run: addSchedulerForScatterRangeCommandFunc, } + c.Flags().String("format", "hex", "the key format") return c } @@ -160,11 +161,21 @@ func addSchedulerForScatterRangeCommandFunc(cmd *cobra.Command, args []string) { fmt.Println(cmd.UsageString()) return } + startKey, err := parseKey(cmd.Flags(), args[0]) + if err != nil { + fmt.Println("Error: ", err) + return + } + endKey, err := parseKey(cmd.Flags(), args[1]) + if err != nil { + fmt.Println("Error: ", err) + return + } input := make(map[string]interface{}) input["name"] = cmd.Name() - input["start_key"] = url.QueryEscape(args[0]) - input["end_key"] = url.QueryEscape(args[1]) + input["start_key"] = url.QueryEscape(startKey) + input["end_key"] = url.QueryEscape(endKey) input["range_name"] = args[2] postJSON(cmd, schedulersPrefix, input) } diff --git a/tools/pd-simulator/main.go b/tools/pd-simulator/main.go index 7684be7bfa1..f1e709df9f1 100644 --- a/tools/pd-simulator/main.go +++ b/tools/pd-simulator/main.go @@ -23,8 +23,7 @@ import ( "time" "github.com/BurntSushi/toml" - etcdlogutil "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/etcd/raft" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/server" "github.com/pingcap/pd/server/api" @@ -32,7 +31,6 @@ import ( "github.com/pingcap/pd/tools/pd-simulator/simulator" "github.com/pingcap/pd/tools/pd-simulator/simulator/cases" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" - log "github.com/sirupsen/logrus" "go.uber.org/zap" // Register schedulers. @@ -52,7 +50,6 @@ var ( func main() { flag.Parse() - initRaftLogger() simutil.InitLogger(*simLogLevel) schedule.Simulating = true @@ -72,18 +69,17 @@ func run(simCase string) { simConfig := simulator.NewSimConfig() if *configFile != "" { if _, err := toml.DecodeFile(*configFile, simConfig); err != nil { - simutil.Logger.Fatal(err) + simutil.Logger.Fatal("fail to decode file ", zap.Error(err)) } } simConfig.Adjust() - if *pdAddr != "" { simStart(*pdAddr, simCase, simConfig) } else { _, local, clean := NewSingleServer() err := local.Run(context.Background()) if err != nil { - simutil.Logger.Fatal("run server error:", err) + simutil.Logger.Fatal("run server error", zap.Error(err)) } for { if local.IsLeader() { @@ -101,7 +97,7 @@ func NewSingleServer() (*server.Config, *server.Server, server.CleanupFunc) { cfg.Log.Level = *serverLogLevel err := logutil.InitLogger(&cfg.Log) if err != nil { - log.Fatalf("initialize logger error: %s\n", err) + log.Fatal("initialize logger error", zap.Error(err)) } s, err := server.CreateServer(cfg, api.NewHandler) @@ -121,39 +117,16 @@ func cleanServer(cfg *server.Config) { os.RemoveAll(cfg.DataDir) } -func initRaftLogger() { - // etcd uses zap as the default Raft logger. - lcfg := &zap.Config{ - Level: zap.NewAtomicLevelAt(zap.InfoLevel), - Development: false, - Sampling: &zap.SamplingConfig{ - Initial: 100, - Thereafter: 100, - }, - Encoding: "json", - EncoderConfig: zap.NewProductionEncoderConfig(), - - // Passing no URLs here, because we don't want to output the Raft log. - OutputPaths: []string{}, - ErrorOutputPaths: []string{}, - } - lg, err := etcdlogutil.NewRaftLogger(lcfg) - if err != nil { - log.Fatalf("cannot create raft logger %v", err) - } - raft.SetLogger(lg) -} - func simStart(pdAddr string, simCase string, simConfig *simulator.SimConfig, clean ...server.CleanupFunc) { start := time.Now() driver, err := simulator.NewDriver(pdAddr, simCase, simConfig) if err != nil { - simutil.Logger.Fatal("create driver error:", err) + simutil.Logger.Fatal("create driver error", zap.Error(err)) } err = driver.Prepare() if err != nil { - simutil.Logger.Fatal("simulator prepare error:", err) + simutil.Logger.Fatal("simulator prepare error", zap.Error(err)) } var runInternal bool diff --git a/tools/pd-simulator/simulator/cases/add_nodes.go b/tools/pd-simulator/simulator/cases/add_nodes.go index dfe2571611a..eeb2d74139c 100644 --- a/tools/pd-simulator/simulator/cases/add_nodes.go +++ b/tools/pd-simulator/simulator/cases/add_nodes.go @@ -16,6 +16,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newAddNodes() *Case { @@ -65,8 +66,7 @@ func newAddNodes() *Case { } } - simutil.Logger.Infof("leader counts: %v", leaderCounts) - simutil.Logger.Infof("region counts: %v", regionCounts) + simutil.Logger.Info("current counts", zap.Ints("leader", leaderCounts), zap.Ints("region", regionCounts)) return res } return &simCase diff --git a/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go b/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go index 4fb14983d47..85fb8f08ecb 100644 --- a/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go +++ b/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go @@ -16,6 +16,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newAddNodesDynamic() *Case { @@ -83,8 +84,7 @@ func newAddNodesDynamic() *Case { } } - simutil.Logger.Infof("leader counts: %v", leaderCounts) - simutil.Logger.Infof("region counts: %v", regionCounts) + simutil.Logger.Info("current counts", zap.Ints("leader", leaderCounts), zap.Ints("region", regionCounts)) return res } return &simCase diff --git a/tools/pd-simulator/simulator/cases/balance_leader.go b/tools/pd-simulator/simulator/cases/balance_leader.go index 45d850b73b2..06e1aeac7b7 100644 --- a/tools/pd-simulator/simulator/cases/balance_leader.go +++ b/tools/pd-simulator/simulator/cases/balance_leader.go @@ -17,6 +17,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newBalanceLeader() *Case { @@ -52,7 +53,10 @@ func newBalanceLeader() *Case { count1 := regions.GetStoreLeaderCount(1) count2 := regions.GetStoreLeaderCount(2) count3 := regions.GetStoreLeaderCount(3) - simutil.Logger.Infof("leader counts: %v %v %v", count1, count2, count3) + simutil.Logger.Info("current leader counts", + zap.Int("first-store", count1), + zap.Int("second-store", count2), + zap.Int("third-store", count3)) return count1 <= 350 && count2 >= 300 && diff --git a/tools/pd-simulator/simulator/cases/delete_nodes.go b/tools/pd-simulator/simulator/cases/delete_nodes.go index 8857afb59c0..ade7d866186 100644 --- a/tools/pd-simulator/simulator/cases/delete_nodes.go +++ b/tools/pd-simulator/simulator/cases/delete_nodes.go @@ -18,6 +18,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newDeleteNodes() *Case { @@ -86,8 +87,7 @@ func newDeleteNodes() *Case { } } - simutil.Logger.Infof("leader counts: %v", leaderCounts) - simutil.Logger.Infof("region counts: %v", regionCounts) + simutil.Logger.Info("current counts", zap.Ints("leader", leaderCounts), zap.Ints("region", regionCounts)) return res } return &simCase diff --git a/tools/pd-simulator/simulator/cases/hot_read.go b/tools/pd-simulator/simulator/cases/hot_read.go index 8dc2ecf0ecb..f51a921a236 100644 --- a/tools/pd-simulator/simulator/cases/hot_read.go +++ b/tools/pd-simulator/simulator/cases/hot_read.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newHotRead() *Case { @@ -75,7 +76,7 @@ func newHotRead() *Case { leaderStore := regions.GetRegion(id).GetLeader().GetStoreId() leaderCount[int(leaderStore-1)]++ } - simutil.Logger.Infof("hot region count: %v", leaderCount) + simutil.Logger.Info("current hot region counts", zap.Reflect("hot-region", leaderCount)) // check count diff < 2. var min, max int diff --git a/tools/pd-simulator/simulator/cases/hot_write.go b/tools/pd-simulator/simulator/cases/hot_write.go index dbda162b55e..6de73b5a87a 100644 --- a/tools/pd-simulator/simulator/cases/hot_write.go +++ b/tools/pd-simulator/simulator/cases/hot_write.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newHotWrite() *Case { @@ -80,7 +81,7 @@ func newHotWrite() *Case { peerCount[int(p.GetStoreId()-1)]++ } } - simutil.Logger.Infof("hot region leader count: %v, peer count: %v", leaderCount, peerCount) + simutil.Logger.Info("current hot region counts", zap.Reflect("leader", leaderCount), zap.Reflect("peer", peerCount)) // check count diff <= 2. var minLeader, maxLeader, minPeer, maxPeer int diff --git a/tools/pd-simulator/simulator/cases/makeup_down_replica.go b/tools/pd-simulator/simulator/cases/makeup_down_replica.go index 70cfdc0d7c8..93997b614d1 100644 --- a/tools/pd-simulator/simulator/cases/makeup_down_replica.go +++ b/tools/pd-simulator/simulator/cases/makeup_down_replica.go @@ -16,6 +16,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newMakeupDownReplicas() *Case { @@ -73,7 +74,7 @@ func newMakeupDownReplicas() *Case { } sum += regionCount } - simutil.Logger.Infof("region counts: %v", regionCounts) + simutil.Logger.Info("current region counts", zap.Ints("region", regionCounts)) if down && sum < 1200 { // only need to print once diff --git a/tools/pd-simulator/simulator/cases/region_merge.go b/tools/pd-simulator/simulator/cases/region_merge.go index 5d1f2e7daca..1ff7d03fc0a 100644 --- a/tools/pd-simulator/simulator/cases/region_merge.go +++ b/tools/pd-simulator/simulator/cases/region_merge.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newRegionMerge() *Case { @@ -60,7 +61,11 @@ func newRegionMerge() *Case { count4 := regions.GetStoreRegionCount(4) sum := count1 + count2 + count3 + count4 - simutil.Logger.Infof("region counts: %v %v %v %v", count1, count2, count3, count4) + simutil.Logger.Info("current region counts", + zap.Int("first-store", count1), + zap.Int("second-store", count2), + zap.Int("third-store", count3), + zap.Int("fourth-store", count4)) return sum == 30 } return &simCase diff --git a/tools/pd-simulator/simulator/cases/region_split.go b/tools/pd-simulator/simulator/cases/region_split.go index fcf66ac9654..db1f0a55f24 100644 --- a/tools/pd-simulator/simulator/cases/region_split.go +++ b/tools/pd-simulator/simulator/cases/region_split.go @@ -17,6 +17,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) func newRegionSplit() *Case { @@ -58,7 +59,10 @@ func newRegionSplit() *Case { count1 := regions.GetStoreRegionCount(1) count2 := regions.GetStoreRegionCount(2) count3 := regions.GetStoreRegionCount(3) - simutil.Logger.Infof("region counts: %v %v %v", count1, count2, count3) + simutil.Logger.Info("current region counts", + zap.Int("first-store", count1), + zap.Int("second-store", count2), + zap.Int("third-store", count3)) return count1 > 5 && count2 > 5 && count3 > 5 } return &simCase diff --git a/tools/pd-simulator/simulator/client.go b/tools/pd-simulator/simulator/client.go index a7f587a3ce3..70a3022065d 100644 --- a/tools/pd-simulator/simulator/client.go +++ b/tools/pd-simulator/simulator/client.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" "github.com/pkg/errors" + "go.uber.org/zap" "google.golang.org/grpc" ) @@ -65,7 +66,7 @@ type client struct { // NewClient creates a PD client. func NewClient(pdAddr string, tag string) (Client, <-chan *pdpb.RegionHeartbeatResponse, error) { - simutil.Logger.Infof("[%s][pd] create pd client with endpoints %v", tag, pdAddr) + simutil.Logger.Info("create pd client with endpoints", zap.String("tag", tag), zap.String("pd-address", pdAddr)) ctx, cancel := context.WithCancel(context.Background()) c := &client{ url: pdAddr, @@ -83,7 +84,7 @@ func NewClient(pdAddr string, tag string) (Client, <-chan *pdpb.RegionHeartbeatR if err := c.initClusterID(); err != nil { return nil, nil, err } - simutil.Logger.Infof("[%s][pd] init cluster id %v", tag, c.clusterID) + simutil.Logger.Info("init cluster id", zap.String("tag", c.tag), zap.Uint64("cluster-id", c.clusterID)) c.wg.Add(1) go c.heartbeatStreamLoop() @@ -100,7 +101,7 @@ func (c *client) initClusterID() error { for i := 0; i < maxInitClusterRetries; i++ { members, err := c.getMembers(ctx) if err != nil || members.GetHeader() == nil { - simutil.Logger.Errorf("[%s][pd] failed to get cluster id: %v", c.tag, err) + simutil.Logger.Error("fail to get cluster id", zap.String("tag", c.tag), zap.Error(err)) continue } c.clusterID = members.GetHeader().GetClusterId() @@ -137,7 +138,7 @@ func (c *client) createHeartbeatStream() (pdpb.PD_RegionHeartbeatClient, context ctx, cancel = context.WithCancel(c.ctx) stream, err = c.pdClient().RegionHeartbeat(ctx) if err != nil { - simutil.Logger.Errorf("[%s][pd] create region heartbeat stream error: %v", c.tag, err) + simutil.Logger.Error("create region heartbeat stream error", zap.String("tag", c.tag), zap.Error(err)) cancel() select { case <-time.After(time.Second): @@ -166,7 +167,7 @@ func (c *client) heartbeatStreamLoop() { go c.receiveRegionHeartbeat(ctx, stream, errCh, wg) select { case err := <-errCh: - simutil.Logger.Infof("[%s][pd] heartbeat stream get error: %s ", c.tag, err) + simutil.Logger.Error("heartbeat stream get error", zap.String("tag", c.tag), zap.Error(err)) cancel() case <-c.ctx.Done(): simutil.Logger.Info("cancel heartbeat stream loop") @@ -211,7 +212,7 @@ func (c *client) reportRegionHeartbeat(ctx context.Context, stream pdpb.PD_Regio err := stream.Send(request) if err != nil { errCh <- err - simutil.Logger.Errorf("[%s][pd] report regionHeartbeat error: %v", c.tag, err) + simutil.Logger.Error("report regionHeartbeat error", zap.String("tag", c.tag), zap.Error(err)) } case <-ctx.Done(): return @@ -224,7 +225,7 @@ func (c *client) Close() { c.wg.Wait() if err := c.clientConn.Close(); err != nil { - simutil.Logger.Errorf("[%s][pd] failed close grpc clientConn: %v", c.tag, err) + simutil.Logger.Error("fail to close grpc client connection", zap.String("tag", c.tag), zap.Error(err)) } } @@ -281,7 +282,7 @@ func (c *client) PutStore(ctx context.Context, store *metapb.Store) error { return err } if resp.Header.GetError() != nil { - simutil.Logger.Info(resp.Header.GetError()) + simutil.Logger.Error("put store error", zap.Reflect("error", resp.Header.GetError())) return nil } return nil @@ -298,7 +299,7 @@ func (c *client) StoreHeartbeat(ctx context.Context, stats *pdpb.StoreStats) err return err } if resp.Header.GetError() != nil { - simutil.Logger.Info(resp.Header.GetError()) + simutil.Logger.Error("store heartbeat error", zap.Reflect("error", resp.Header.GetError())) return nil } return nil diff --git a/tools/pd-simulator/simulator/drive.go b/tools/pd-simulator/simulator/drive.go index ac2e4b6894b..a23d9c81c00 100644 --- a/tools/pd-simulator/simulator/drive.go +++ b/tools/pd-simulator/simulator/drive.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/pd/tools/pd-simulator/simulator/cases" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" "github.com/pkg/errors" + "go.uber.org/zap" ) // Driver promotes the cluster status change. @@ -70,9 +71,9 @@ func (d *Driver) Prepare() error { err = d.client.Bootstrap(ctx, store, region) cancel() if err != nil { - simutil.Logger.Fatal("bootstrapped error: ", err) + simutil.Logger.Fatal("bootstrap error", zap.Error(err)) } else { - simutil.Logger.Debug("Bootstrap success") + simutil.Logger.Debug("bootstrap success") } // Setup alloc id. diff --git a/tools/pd-simulator/simulator/event.go b/tools/pd-simulator/simulator/event.go index 7b19a1b40e5..dc52ca20ec0 100644 --- a/tools/pd-simulator/simulator/event.go +++ b/tools/pd-simulator/simulator/event.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/cases" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) // Event affects the status of the cluster. @@ -83,8 +84,9 @@ func (e *WriteFlowOnSpot) Run(raft *RaftEngine, tickCount int64) bool { res := e.in.Step(tickCount) for key, size := range res { region := raft.SearchRegion([]byte(key)) + simutil.Logger.Debug("search the region", zap.Reflect("region", region.GetMeta())) if region == nil { - simutil.Logger.Errorf("region not found for key %s", key) + simutil.Logger.Error("region not found for key", zap.String("key", key)) continue } raft.updateRegionStore(region, size) @@ -103,7 +105,7 @@ func (e *WriteFlowOnRegion) Run(raft *RaftEngine, tickCount int64) bool { for id, bytes := range res { region := raft.GetRegion(id) if region == nil { - simutil.Logger.Errorf("region %d not found", id) + simutil.Logger.Error("region is not found", zap.Uint64("region-id", id)) continue } raft.updateRegionStore(region, bytes) @@ -136,7 +138,7 @@ func (e *AddNodes) Run(raft *RaftEngine, tickCount int64) bool { } if _, ok := raft.conn.Nodes[id]; ok { - simutil.Logger.Infof("Node %d already existed", id) + simutil.Logger.Info("node has already existed", zap.Uint64("node-id", id)) return false } @@ -150,14 +152,14 @@ func (e *AddNodes) Run(raft *RaftEngine, tickCount int64) bool { } n, err := NewNode(s, raft.conn.pdAddr, config.StoreIOMBPerSecond) if err != nil { - simutil.Logger.Errorf("Add node %d failed: %v", id, err) + simutil.Logger.Error("add node failed", zap.Uint64("node-id", id), zap.Error(err)) return false } raft.conn.Nodes[id] = n n.raftEngine = raft err = n.Start() if err != nil { - simutil.Logger.Errorf("Start node %d failed: %v", id, err) + simutil.Logger.Error("start node failed", zap.Uint64("node-id", id), zap.Error(err)) } return false } @@ -176,7 +178,7 @@ func (e *DeleteNodes) Run(raft *RaftEngine, tickCount int64) bool { node := raft.conn.Nodes[id] if node == nil { - simutil.Logger.Errorf("Node %d not existed", id) + simutil.Logger.Error("node is not existed", zap.Uint64("node-id", id)) return false } delete(raft.conn.Nodes, id) diff --git a/tools/pd-simulator/simulator/node.go b/tools/pd-simulator/simulator/node.go index c2c5814ea3a..464553cb4b2 100644 --- a/tools/pd-simulator/simulator/node.go +++ b/tools/pd-simulator/simulator/node.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/pd/tools/pd-simulator/simulator/cases" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) const ( @@ -130,7 +131,10 @@ func (n *Node) stepTask() { for _, task := range n.tasks { task.Step(n.raftEngine) if task.IsFinished() { - simutil.Logger.Infof("[store %d][region %d] task finished: %s final: %v", n.GetId(), task.RegionID(), task.Desc(), n.raftEngine.GetRegion(task.RegionID())) + simutil.Logger.Debug("task finished", + zap.Uint64("node-id", n.Id), + zap.Uint64("region-id", task.RegionID()), + zap.String("task", task.Desc())) delete(n.tasks, task.RegionID()) } } @@ -152,7 +156,9 @@ func (n *Node) storeHeartBeat() { ctx, cancel := context.WithTimeout(n.ctx, pdTimeout) err := n.client.StoreHeartbeat(ctx, n.stats) if err != nil { - simutil.Logger.Infof("[store %d] report heartbeat error: %s", n.GetId(), err) + simutil.Logger.Info("report heartbeat error", + zap.Uint64("node-id", n.GetId()), + zap.Error(err)) } cancel() } @@ -167,7 +173,10 @@ func (n *Node) regionHeartBeat() { ctx, cancel := context.WithTimeout(n.ctx, pdTimeout) err := n.client.RegionHeartbeat(ctx, region) if err != nil { - simutil.Logger.Infof("[node %d][region %d] report heartbeat error: %s", n.Id, region.GetID(), err) + simutil.Logger.Info("report heartbeat error", + zap.Uint64("node-id", n.Id), + zap.Uint64("region-id", region.GetID()), + zap.Error(err)) } cancel() } @@ -180,7 +189,10 @@ func (n *Node) reportRegionChange() { ctx, cancel := context.WithTimeout(n.ctx, pdTimeout) err := n.client.RegionHeartbeat(ctx, region) if err != nil { - simutil.Logger.Infof("[node %d][region %d] report heartbeat error: %s", n.Id, region.GetID(), err) + simutil.Logger.Info("report heartbeat error", + zap.Uint64("node-id", n.Id), + zap.Uint64("region-id", region.GetID()), + zap.Error(err)) } cancel() } @@ -192,7 +204,10 @@ func (n *Node) AddTask(task Task) { n.Lock() defer n.Unlock() if t, ok := n.tasks[task.RegionID()]; ok { - simutil.Logger.Infof("[node %d][region %d] already exists task : %s", n.Id, task.RegionID(), t.Desc()) + simutil.Logger.Info("task has already existed", + zap.Uint64("node-id", n.Id), + zap.Uint64("region-id", task.RegionID()), + zap.String("task", t.Desc())) return } n.tasks[task.RegionID()] = task @@ -203,7 +218,7 @@ func (n *Node) Stop() { n.cancel() n.client.Close() n.wg.Wait() - simutil.Logger.Infof("node %d stopped", n.Id) + simutil.Logger.Info("node stopped", zap.Uint64("node-id", n.Id)) } func (n *Node) incUsedSize(size uint64) { diff --git a/tools/pd-simulator/simulator/raft.go b/tools/pd-simulator/simulator/raft.go index ba7c9fe48c8..b44adb84d97 100644 --- a/tools/pd-simulator/simulator/raft.go +++ b/tools/pd-simulator/simulator/raft.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/pd/tools/pd-simulator/simulator/cases" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" "github.com/pkg/errors" + "go.uber.org/zap" ) // RaftEngine records all raft infomations. @@ -100,10 +101,13 @@ func (r *RaftEngine) stepLeader(region *core.RegionInfo) { newRegion := region.Clone(core.WithLeader(newLeader)) if newLeader == nil { r.SetRegion(newRegion) - simutil.Logger.Infof("[region %d] no leader", region.GetID()) + simutil.Logger.Info("region has no leader", zap.Uint64("region-id", region.GetID())) return } - simutil.Logger.Infof("[region %d] elect new leader: %+v,old leader: %+v", region.GetID(), newLeader, region.GetLeader()) + simutil.Logger.Info("region elects a new leader", + zap.Uint64("region-id", region.GetID()), + zap.Reflect("new-leader", newLeader), + zap.Reflect("old-leader", region.GetLeader())) r.SetRegion(newRegion) r.recordRegionChange(newRegion) } @@ -120,7 +124,7 @@ func (r *RaftEngine) stepSplit(region *core.RegionInfo) { var err error ids[i], err = r.allocID(region.GetLeader().GetStoreId()) if err != nil { - simutil.Logger.Infof("alloc id failed: %s", err) + simutil.Logger.Error("alloc id failed", zap.Error(err)) return } } @@ -145,6 +149,11 @@ func (r *RaftEngine) stepSplit(region *core.RegionInfo) { r.SetRegion(right) r.SetRegion(left) + simutil.Logger.Debug("region split", + zap.Uint64("region-id", region.GetID()), + zap.Reflect("origin", region.GetMeta()), + zap.Reflect("left", left.GetMeta()), + zap.Reflect("right", right.GetMeta())) r.recordRegionChange(left) r.recordRegionChange(right) } @@ -182,7 +191,7 @@ func (r *RaftEngine) updateRegionReadBytes(readBytes map[uint64]int64) { for id, bytes := range readBytes { region := r.GetRegion(id) if region == nil { - simutil.Logger.Errorf("region %d not found", id) + simutil.Logger.Error("region is not found", zap.Uint64("region-id", id)) continue } newRegion := region.Clone(core.SetReadBytes(uint64(bytes))) diff --git a/tools/pd-simulator/simulator/simutil/logger.go b/tools/pd-simulator/simulator/simutil/logger.go index f2e912294dc..3fbe79504a9 100644 --- a/tools/pd-simulator/simulator/simutil/logger.go +++ b/tools/pd-simulator/simulator/simutil/logger.go @@ -14,15 +14,16 @@ package simutil import ( - "github.com/pingcap/pd/pkg/logutil" - log "github.com/sirupsen/logrus" + log "github.com/pingcap/log" + "go.uber.org/zap" ) // Logger is the global logger used for simulator. -var Logger *log.Logger +var Logger *zap.Logger -// InitLogger initializes the Logger with log level. -func InitLogger(level string) { - Logger = log.New() - Logger.Level = logutil.StringToLogLevel(level) +// InitLogger initializes the Logger with -log level. +func InitLogger(l string) { + conf := &log.Config{Level: l, File: log.FileLogConfig{}} + lg, _, _ := log.InitLogger(conf) + Logger = lg } diff --git a/tools/pd-simulator/simulator/task.go b/tools/pd-simulator/simulator/task.go index 3083fc601d1..9a626e90347 100644 --- a/tools/pd-simulator/simulator/task.go +++ b/tools/pd-simulator/simulator/task.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/tools/pd-simulator/simulator/simutil" + "go.uber.org/zap" ) // Task running in node. @@ -239,7 +240,7 @@ func (a *addPeer) Step(r *RaftEngine) { snapshotSize := region.GetApproximateSize() sendNode := r.conn.Nodes[region.GetLeader().GetStoreId()] if sendNode == nil { - simutil.Logger.Errorf("failed to sent snapshot: node %d has been deleted", sendNode.Id) + simutil.Logger.Error("failed to sent snapshot, node has been deleted", zap.Uint64("node-id", sendNode.Id)) a.finished = true return } @@ -250,7 +251,7 @@ func (a *addPeer) Step(r *RaftEngine) { recvNode := r.conn.Nodes[a.peer.GetStoreId()] if recvNode == nil { - simutil.Logger.Errorf("failed to receive snapshot: node %d has been deleted", recvNode.Id) + simutil.Logger.Error("failed to receive snapshot: node has been deleted", zap.Uint64("node-id", recvNode.Id)) a.finished = true return } diff --git a/tools/pd-tso-bench/main.go b/tools/pd-tso-bench/main.go index cb1079b478c..f2494faf8ec 100644 --- a/tools/pd-tso-bench/main.go +++ b/tools/pd-tso-bench/main.go @@ -17,15 +17,16 @@ import ( "context" "flag" "fmt" - "log" "os" "os/signal" "sync" "syscall" "time" - "github.com/pingcap/pd/client" + log "github.com/pingcap/log" + pd "github.com/pingcap/pd/client" "github.com/pkg/errors" + "go.uber.org/zap" ) var ( @@ -47,7 +48,7 @@ func main() { KeyPath: *keyPath, }) if err != nil { - log.Fatal(err) + log.Fatal(fmt.Sprintf("%v", err)) } ctx, cancel := context.WithCancel(context.Background()) @@ -55,7 +56,7 @@ func main() { for i := 0; i < *concurrency; i++ { _, _, err = pdCli.GetTS(ctx) if err != nil { - log.Fatal(err) + log.Fatal("get tso failed", zap.Error(err)) } } @@ -210,7 +211,7 @@ func reqWorker(ctx context.Context, pdCli pd.Client, durCh chan time.Duration) { } if err != nil { - log.Fatal(err) + log.Fatal(fmt.Sprintf("%v", err)) } dur := time.Since(start) diff --git a/vendor/github.com/pingcap/log/LICENSE b/vendor/github.com/pingcap/log/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/pingcap/log/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/pingcap/log/config.go b/vendor/github.com/pingcap/log/config.go new file mode 100644 index 00000000000..0db66249794 --- /dev/null +++ b/vendor/github.com/pingcap/log/config.go @@ -0,0 +1,108 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + defaultLogMaxSize = 300 // MB +) + +// FileLogConfig serializes file log related config in toml/json. +type FileLogConfig struct { + // Log filename, leave empty to disable file log. + Filename string `toml:"filename" json:"filename"` + // Is log rotate enabled. + LogRotate bool `toml:"log-rotate" json:"log-rotate"` + // Max size for a single file, in MB. + MaxSize int `toml:"max-size" json:"max-size"` + // Max log keep days, default is never deleting. + MaxDays int `toml:"max-days" json:"max-days"` + // Maximum number of old log files to retain. + MaxBackups int `toml:"max-backups" json:"max-backups"` +} + +// Config serializes log related config in toml/json. +type Config struct { + // Log level. + Level string `toml:"level" json:"level"` + // Log format. one of json, text, or console. + Format string `toml:"format" json:"format"` + // Disable automatic timestamps in output. + DisableTimestamp bool `toml:"disable-timestamp" json:"disable-timestamp"` + // File log config. + File FileLogConfig `toml:"file" json:"file"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `toml:"development" json:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `toml:"disable-caller" json:"disable-caller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `toml:"disable-stacktrace" json:"disable-stacktrace"` + // DisableErrorVerbose stops annotating logs with the full verbose error + // message. + DisableErrorVerbose bool `toml:"disable-error-verbose" json:"disable-error-verbose"` + // SamplingConfig sets a sampling strategy for the logger. Sampling caps the + // global CPU and I/O load that logging puts on your process while attempting + // to preserve a representative subset of your logs. + // + // Values configured here are per-second. See zapcore.NewSampler for details. + Sampling *zap.SamplingConfig `toml:"sampling" json:"sampling"` +} + +// ZapProperties records some information about zap. +type ZapProperties struct { + Core zapcore.Core + Syncer zapcore.WriteSyncer + Level zap.AtomicLevel +} + +func newZapTextEncoder(cfg *Config) zapcore.Encoder { + return NewTextEncoder(cfg) +} + +func (cfg *Config) buildOptions(errSink zapcore.WriteSyncer) []zap.Option { + opts := []zap.Option{zap.ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, zap.Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, zap.AddCaller()) + } + + stackLevel := zap.ErrorLevel + if cfg.Development { + stackLevel = zap.WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, zap.AddStacktrace(stackLevel)) + } + + if cfg.Sampling != nil { + opts = append(opts, zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + })) + } + return opts +} diff --git a/vendor/github.com/pingcap/log/global.go b/vendor/github.com/pingcap/log/global.go new file mode 100644 index 00000000000..5d00254d961 --- /dev/null +++ b/vendor/github.com/pingcap/log/global.go @@ -0,0 +1,70 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func Debug(msg string, fields ...zap.Field) { + L().WithOptions(zap.AddCallerSkip(1)).Debug(msg, fields...) +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func Info(msg string, fields ...zap.Field) { + L().WithOptions(zap.AddCallerSkip(1)).Info(msg, fields...) +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func Warn(msg string, fields ...zap.Field) { + L().WithOptions(zap.AddCallerSkip(1)).Warn(msg, fields...) +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func Error(msg string, fields ...zap.Field) { + L().WithOptions(zap.AddCallerSkip(1)).Error(msg, fields...) +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func Panic(msg string, fields ...zap.Field) { + L().WithOptions(zap.AddCallerSkip(1)).Panic(msg, fields...) +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func Fatal(msg string, fields ...zap.Field) { + L().WithOptions(zap.AddCallerSkip(1)).Fatal(msg, fields...) +} + +// SetLevel alters the logging level. +func SetLevel(l zapcore.Level) { + _globalP.Level.SetLevel(l) +} + +// GetLevel gets the logging level. +func GetLevel() zapcore.Level { + return _globalP.Level.Level() +} diff --git a/vendor/github.com/pingcap/log/log.go b/vendor/github.com/pingcap/log/log.go new file mode 100644 index 00000000000..1135b3d327f --- /dev/null +++ b/vendor/github.com/pingcap/log/log.go @@ -0,0 +1,122 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "errors" + "os" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + lumberjack "gopkg.in/natefinch/lumberjack.v2" +) + +// InitLogger initializes a zap logger. +func InitLogger(cfg *Config, opts ...zap.Option) (*zap.Logger, *ZapProperties, error) { + var output zapcore.WriteSyncer + if len(cfg.File.Filename) > 0 { + lg, err := initFileLog(&cfg.File) + if err != nil { + return nil, nil, err + } + output = zapcore.AddSync(lg) + } else { + stdOut, close, err := zap.Open([]string{"stdout"}...) + if err != nil { + close() + return nil, nil, err + } + output = stdOut + } + return InitLoggerWithWriteSyncer(cfg, output, opts...) +} + +// InitLoggerWithWriteSyncer initializes a zap logger with specified write syncer. +func InitLoggerWithWriteSyncer(cfg *Config, output zapcore.WriteSyncer, opts ...zap.Option) (*zap.Logger, *ZapProperties, error) { + level := zap.NewAtomicLevel() + err := level.UnmarshalText([]byte(cfg.Level)) + if err != nil { + return nil, nil, err + } + core := NewTextCore(newZapTextEncoder(cfg).(*textEncoder), output, level) + opts = append(cfg.buildOptions(output), opts...) + lg := zap.New(core, opts...) + r := &ZapProperties{ + Core: core, + Syncer: output, + Level: level, + } + return lg, r, nil +} + +// initFileLog initializes file based logging options. +func initFileLog(cfg *FileLogConfig) (*lumberjack.Logger, error) { + if st, err := os.Stat(cfg.Filename); err == nil { + if st.IsDir() { + return nil, errors.New("can't use directory as log file name") + } + } + if cfg.MaxSize == 0 { + cfg.MaxSize = defaultLogMaxSize + } + + // use lumberjack to logrotate + return &lumberjack.Logger{ + Filename: cfg.Filename, + MaxSize: cfg.MaxSize, + MaxBackups: cfg.MaxBackups, + MaxAge: cfg.MaxDays, + LocalTime: true, + }, nil +} + +func newStdLogger() (*zap.Logger, *ZapProperties) { + conf := &Config{Level: "info", File: FileLogConfig{}} + lg, r, _ := InitLogger(conf) + return lg, r +} + +var ( + _globalL, _globalP = newStdLogger() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *zap.Logger { + return _globalL +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *zap.SugaredLogger { + return _globalS +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger. +// It's unsafe for concurrent use. +func ReplaceGlobals(logger *zap.Logger, props *ZapProperties) { + _globalL = logger + _globalS = logger.Sugar() + _globalP = props +} + +// Sync flushes any buffered log entries. +func Sync() error { + err := L().Sync() + if err != nil { + return err + } + return S().Sync() +} diff --git a/vendor/github.com/pingcap/log/zap_text_core.go b/vendor/github.com/pingcap/log/zap_text_core.go new file mode 100644 index 00000000000..34e7b9aac76 --- /dev/null +++ b/vendor/github.com/pingcap/log/zap_text_core.go @@ -0,0 +1,77 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import "go.uber.org/zap/zapcore" + +// NewTextCore creates a Core that writes logs to a WriteSyncer. +func NewTextCore(enc *textEncoder, ws zapcore.WriteSyncer, enab zapcore.LevelEnabler) zapcore.Core { + return &textIOCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +// textIOCore is a copy of zapcore.ioCore that only accept *textEncoder +// it can be removed after https://github.com/uber-go/zap/pull/685 be merged +type textIOCore struct { + zapcore.LevelEnabler + enc *textEncoder + out zapcore.WriteSyncer +} + +func (c *textIOCore) With(fields []zapcore.Field) zapcore.Core { + clone := c.clone() + // it's different to ioCore, here call textEncoder#addFields to fix https://github.com/pingcap/log/issues/3 + clone.enc.addFields(fields) + return clone +} + +func (c *textIOCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *textIOCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > zapcore.ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue https://github.com/uber-go/zap/issues/370. + c.Sync() + } + return nil +} + +func (c *textIOCore) Sync() error { + return c.out.Sync() +} + +func (c *textIOCore) clone() *textIOCore { + return &textIOCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone().(*textEncoder), + out: c.out, + } +} diff --git a/vendor/github.com/pingcap/log/zap_text_encoder.go b/vendor/github.com/pingcap/log/zap_text_encoder.go new file mode 100644 index 00000000000..3a0444c997e --- /dev/null +++ b/vendor/github.com/pingcap/log/zap_text_encoder.go @@ -0,0 +1,656 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package log + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "math" + "strings" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" +) + +// DefaultTimeEncoder serializes time.Time to a human-readable formatted string +func DefaultTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + s := t.Format("2006/01/02 15:04:05.000 -07:00") + if e, ok := enc.(*textEncoder); ok { + for _, c := range []byte(s) { + e.buf.AppendByte(c) + } + return + } + enc.AppendString(s) +} + +// ShortCallerEncoder serializes a caller in file:line format. +func ShortCallerEncoder(caller zapcore.EntryCaller, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(getCallerString(caller)) +} + +func getCallerString(ec zapcore.EntryCaller) string { + if !ec.Defined { + return "" + } + + idx := strings.LastIndexByte(ec.File, '/') + buf := _pool.Get() + for i := idx + 1; i < len(ec.File); i++ { + b := ec.File[i] + switch { + case b >= 'A' && b <= 'Z': + buf.AppendByte(b) + case b >= 'a' && b <= 'z': + buf.AppendByte(b) + case b >= '0' && b <= '9': + buf.AppendByte(b) + case b == '.' || b == '-' || b == '_': + buf.AppendByte(b) + default: + } + } + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// For JSON-escaping; see textEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _textPool = sync.Pool{New: func() interface{} { + return &textEncoder{} +}} + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) + +func getTextEncoder() *textEncoder { + return _textPool.Get().(*textEncoder) +} + +func putTextEncoder(enc *textEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _textPool.Put(enc) +} + +type textEncoder struct { + *zapcore.EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + disableErrorVerbose bool + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder +} + +// NewTextEncoder creates a fast, low-allocation Text encoder. The encoder +// appropriately escapes all field keys and values. +func NewTextEncoder(cfg *Config) zapcore.Encoder { + cc := zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "time", + LevelKey: "level", + NameKey: "name", + CallerKey: "caller", + MessageKey: "message", + StacktraceKey: "stack", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: DefaultTimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: ShortCallerEncoder, + } + if cfg.DisableTimestamp { + cc.TimeKey = "" + } + return &textEncoder{ + EncoderConfig: &cc, + buf: _pool.Get(), + spaced: false, + disableErrorVerbose: cfg.DisableErrorVerbose, + } +} + +func (enc *textEncoder) AddArray(key string, arr zapcore.ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *textEncoder) AddObject(key string, obj zapcore.ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *textEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *textEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *textEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *textEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *textEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *textEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *textEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *textEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = _pool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +func (enc *textEncoder) AddReflected(key string, obj interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addKey(key) + enc.AppendByteString(enc.reflectBuf.Bytes()) + return nil +} + +func (enc *textEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *textEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *textEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *textEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *textEncoder) AppendArray(arr zapcore.ArrayMarshaler) error { + enc.addElementSeparator() + ne := enc.cloned() + ne.buf.AppendByte('[') + err := arr.MarshalLogArray(ne) + ne.buf.AppendByte(']') + enc.AppendByteString(ne.buf.Bytes()) + ne.buf.Free() + putTextEncoder(ne) + return err +} + +func (enc *textEncoder) AppendObject(obj zapcore.ObjectMarshaler) error { + enc.addElementSeparator() + ne := enc.cloned() + ne.buf.AppendByte('{') + err := obj.MarshalLogObject(ne) + ne.buf.AppendByte('}') + enc.AppendByteString(ne.buf.Bytes()) + ne.buf.Free() + putTextEncoder(ne) + return err +} + +func (enc *textEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *textEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + if !enc.needDoubleQuotes(string(val)) { + enc.safeAddByteString(val) + return + } + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +func (enc *textEncoder) AppendComplex128(val complex128) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') +} + +func (enc *textEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *textEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *textEncoder) AppendReflected(val interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.AppendByteString(enc.reflectBuf.Bytes()) + return nil +} + +func (enc *textEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.safeAddStringWithQuote(val) +} + +func (enc *textEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *textEncoder) beginQuoteFiled() { + if enc.buf.Len() > 0 { + enc.buf.AppendByte(' ') + } + enc.buf.AppendByte('[') +} + +func (enc *textEncoder) endQuoteFiled() { + enc.buf.AppendByte(']') +} + +func (enc *textEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *textEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *textEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *textEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *textEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *textEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *textEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *textEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *textEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *textEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *textEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *textEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *textEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *textEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *textEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *textEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *textEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *textEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *textEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *textEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *textEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *textEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *textEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *textEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *textEncoder) Clone() zapcore.Encoder { + clone := enc.cloned() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *textEncoder) cloned() *textEncoder { + clone := getTextEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.disableErrorVerbose = enc.disableErrorVerbose + clone.buf = _pool.Get() + return clone +} + +func (enc *textEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { + final := enc.cloned() + if final.TimeKey != "" { + final.beginQuoteFiled() + final.AppendTime(ent.Time) + final.endQuoteFiled() + } + + if final.LevelKey != "" { + final.beginQuoteFiled() + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + final.endQuoteFiled() + } + + if ent.LoggerName != "" && final.NameKey != "" { + final.beginQuoteFiled() + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = zapcore.FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + final.endQuoteFiled() + } + if ent.Caller.Defined && final.CallerKey != "" { + final.beginQuoteFiled() + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + final.endQuoteFiled() + } + // add Message + if len(ent.Message) > 0 { + final.beginQuoteFiled() + final.AppendString(ent.Message) + final.endQuoteFiled() + } + if enc.buf.Len() > 0 { + final.buf.AppendByte(' ') + final.buf.Write(enc.buf.Bytes()) + } + final.addFields(fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.beginQuoteFiled() + final.AddString(final.StacktraceKey, ent.Stack) + final.endQuoteFiled() + } + + if final.LineEnding != "" { + final.buf.AppendString(final.LineEnding) + } else { + final.buf.AppendString(zapcore.DefaultLineEnding) + } + + ret := final.buf + putTextEncoder(final) + return ret, nil +} + +func (enc *textEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *textEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } +} + +func (enc *textEncoder) addKey(key string) { + enc.addElementSeparator() + enc.safeAddStringWithQuote(key) + enc.buf.AppendByte('=') +} + +func (enc *textEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ', '=': + return + default: + enc.buf.AppendByte(',') + } +} + +func (enc *textEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString("NaN") + case math.IsInf(val, 1): + enc.buf.AppendString("+Inf") + case math.IsInf(val, -1): + enc.buf.AppendString("-Inf") + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *textEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddStringWithQuote will automatically add quotoes. +func (enc *textEncoder) safeAddStringWithQuote(s string) { + if !enc.needDoubleQuotes(s) { + enc.safeAddString(s) + return + } + enc.buf.AppendByte('"') + enc.safeAddString(s) + enc.buf.AppendByte('"') +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *textEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// See [log-fileds](https://github.com/tikv/rfcs/blob/master/text/2018-12-19-unified-log-format.md#log-fields-section). +func (enc *textEncoder) needDoubleQuotes(s string) bool { + for i := 0; i < len(s); { + b := s[i] + if b <= 0x20 { + return true + } + switch b { + case '\\', '"', '[', ']', '=': + return true + } + i++ + } + return false +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *textEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *textEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} + +func (enc *textEncoder) addFields(fields []zapcore.Field) { + for _, f := range fields { + if f.Type == zapcore.ErrorType { + // handle ErrorType in pingcap/log to fix "[key=?,keyVerbose=?]" problem. + // see more detail at https://github.com/pingcap/log/pull/5 + enc.encodeError(f) + continue + } + enc.beginQuoteFiled() + f.AddTo(enc) + enc.endQuoteFiled() + } +} + +func (enc *textEncoder) encodeError(f zapcore.Field) { + err := f.Interface.(error) + basic := err.Error() + enc.beginQuoteFiled() + enc.AddString(f.Key, basic) + enc.endQuoteFiled() + if enc.disableErrorVerbose { + return + } + if e, isFormatter := err.(fmt.Formatter); isFormatter { + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.beginQuoteFiled() + enc.AddString(f.Key+"Verbose", verbose) + enc.endQuoteFiled() + } + } +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index d15f7fdb35b..7592e8c63f6 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -98,6 +98,15 @@ func (b *Buffer) Write(bs []byte) (int, error) { return len(bs), nil } +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + // Free returns the Buffer to its Pool. // // Callers must not retain references to the Buffer after calling Free. diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index dae130303e7..6fe17d9e0f5 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -74,10 +74,10 @@ type Config struct { // EncoderConfig sets options for the chosen encoder. See // zapcore.EncoderConfig for details. EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` - // OutputPaths is a list of paths to write logging output to. See Open for - // details. + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` - // ErrorOutputPaths is a list of paths to write internal logger errors to. + // ErrorOutputPaths is a list of URLs to write internal logger errors to. // The default is standard error. // // Note that this setting only affects internal errors; for sample code that diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go index 3f16a8d4576..8638dd1b965 100644 --- a/vendor/go.uber.org/zap/doc.go +++ b/vendor/go.uber.org/zap/doc.go @@ -48,7 +48,7 @@ // "attempt", 3, // "backoff", time.Second, // ) -// sugar.Printf("failed to fetch URL: %s", "http://example.com") +// sugar.Infof("failed to fetch URL: %s", "http://example.com") // // By default, loggers are unbuffered. However, since zap's low-level APIs // allow buffering, calling Sync before letting your process exit is a good diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go index d02232e39fa..c1ac0507cd9 100644 --- a/vendor/go.uber.org/zap/global.go +++ b/vendor/go.uber.org/zap/global.go @@ -31,7 +31,6 @@ import ( ) const ( - _stdLogDefaultDepth = 2 _loggerWriterDepth = 2 _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + "https://github.com/uber-go/zap/issues/new and reference this error: %v" diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go new file mode 100644 index 00000000000..6b5dbda8076 --- /dev/null +++ b/vendor/go.uber.org/zap/global_go112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build go1.12 + +package zap + +const _stdLogDefaultDepth = 1 diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go new file mode 100644 index 00000000000..d3ab9af933e --- /dev/null +++ b/vendor/go.uber.org/zap/global_prego112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build !go1.12 + +package zap + +const _stdLogDefaultDepth = 2 diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index f171c3847f4..1b0ecaca9c1 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -48,11 +48,11 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.Method { - case "GET": + case http.MethodGet: current := lvl.Level() enc.Encode(payload{Level: ¤t}) - case "PUT": + case http.MethodPut: var req payload if errmess := func() string { diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 00000000000..ff0becfe5d0 --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var ( + _sinkMutex sync.RWMutex + _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme +) + +func init() { + resetSinkRegistry() +} + +func resetSinkRegistry() { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + _sinkFactories = map[string]func(*url.URL) (Sink, error){ + schemeFile: newFileSink, + } +} + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 3.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := _sinkFactories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + _sinkFactories[normalized] = factory + return nil +} + +func newSink(rawURL string) (Sink, error) { + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + _sinkMutex.RLock() + factory, ok := _sinkFactories[u.Scheme] + _sinkMutex.RUnlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +func newFileSink(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + switch u.Path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index 16f55ce4875..86a709ab0be 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -21,21 +21,33 @@ package zap import ( + "fmt" + "io" "io/ioutil" - "os" "go.uber.org/zap/zapcore" "go.uber.org/multierr" ) -// Open is a high-level wrapper that takes a variadic number of paths, opens or -// creates each of the specified files, and combines them into a locked +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked // WriteSyncer. It also returns any error encountered and a function to close // any opened files. // -// Passing no paths returns a no-op WriteSyncer. The special paths "stdout" and -// "stderr" are interpreted as os.Stdout and os.Stderr, respectively. +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { writers, close, err := open(paths) if err != nil { @@ -47,33 +59,24 @@ func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { } func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { - var openErr error writers := make([]zapcore.WriteSyncer, 0, len(paths)) - files := make([]*os.File, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) close := func() { - for _, f := range files { - f.Close() + for _, c := range closers { + c.Close() } } + + var openErr error for _, path := range paths { - switch path { - case "stdout": - writers = append(writers, os.Stdout) - // Don't close standard out. - continue - case "stderr": - writers = append(writers, os.Stderr) - // Don't close standard error. + sink, err := newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) continue } - f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) - openErr = multierr.Append(openErr, err) - if err == nil { - writers = append(writers, f) - files = append(files, f) - } + writers = append(writers, sink) + closers = append(closers, sink) } - if openErr != nil { close() return writers, nil, openErr diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 6a5e33e2f79..ae772e4a170 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -160,7 +160,7 @@ func (f Field) AddTo(enc ObjectEncoder) { case NamespaceType: enc.OpenNamespace(f.Key) case StringerType: - enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) + err = encodeStringer(f.Key, f.Interface, enc) case ErrorType: encodeError(f.Key, f.Interface.(error), enc) case SkipType: @@ -199,3 +199,14 @@ func addFields(enc ObjectEncoder, fields []Field) { fields[i].AddTo(enc) } } + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (err error) { + defer func() { + if v := recover(); v != nil { + err = fmt.Errorf("PANIC=%v", v) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 1006ba2b119..9aec4eada31 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -44,10 +44,15 @@ func getJSONEncoder() *jsonEncoder { } func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } enc.EncoderConfig = nil enc.buf = nil enc.spaced = false enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil _jsonPool.Put(enc) } @@ -56,6 +61,10 @@ type jsonEncoder struct { buf *buffer.Buffer spaced bool // include spaces after colons and commas openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder } // NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder @@ -124,13 +133,27 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) { enc.AppendInt64(val) } +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + + // For consistency with our custom JSON encoder. + enc.reflectEnc.SetEscapeHTML(false) + } else { + enc.reflectBuf.Reset() + } +} + func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - marshaled, err := json.Marshal(obj) + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) if err != nil { return err } + enc.reflectBuf.TrimNewline() enc.addKey(key) - _, err = enc.buf.Write(marshaled) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) return err } @@ -213,12 +236,14 @@ func (enc *jsonEncoder) AppendInt64(val int64) { } func (enc *jsonEncoder) AppendReflected(val interface{}) error { - marshaled, err := json.Marshal(val) + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) if err != nil { return err } + enc.reflectBuf.TrimNewline() enc.addElementSeparator() - _, err = enc.buf.Write(marshaled) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) return err } diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go index 5c46bc13d6f..dfead0829d6 100644 --- a/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -43,7 +43,7 @@ func NewMapObjectEncoder() *MapObjectEncoder { // AddArray implements ObjectEncoder. func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { - arr := &sliceArrayEncoder{} + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} err := v.MarshalLogArray(arr) m.cur[key] = arr.elems return err @@ -158,7 +158,7 @@ func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { } func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) }