From 03b305edb77c7b58aa25cf7a93d131a36a2cab56 Mon Sep 17 00:00:00 2001 From: kennytm Date: Thu, 11 Apr 2019 16:29:29 +0800 Subject: [PATCH] *: use pingcap/log (zap) for logging Some redundant logs (e.g. logging about the same thing inside and outside a function) are removed. The {QueryRow,Transact,Exec}WithRetry functions are revamped to include the logger. --- cmd/tidb-lightning-ctl/main.go | 4 +- cmd/tidb-lightning/main.go | 13 +- go.mod | 4 +- lightning/common/log.go | 178 --- lightning/common/once_error.go | 5 +- lightning/common/once_error_test.go | 10 +- lightning/common/util.go | 158 +- lightning/common/version.go | 27 +- lightning/config/config.go | 14 +- lightning/kv/importer.go | 99 +- lightning/lightning.go | 14 +- lightning/log/log.go | 190 +++ lightning/mydump/csv_parser.rl | 8 +- lightning/mydump/csv_parser_generated.go | 1705 +++++++++++----------- lightning/mydump/loader.go | 31 +- lightning/mydump/parser.rl | 8 +- lightning/mydump/parser_generated.go | 96 +- lightning/mydump/reader.go | 8 +- lightning/restore/checkpoints.go | 61 +- lightning/restore/restore.go | 306 ++-- lightning/restore/tidb.go | 97 +- lightning/verification/checksum.go | 8 + tests/error_summary/run.sh | 8 +- 23 files changed, 1531 insertions(+), 1521 deletions(-) delete mode 100644 lightning/common/log.go create mode 100644 lightning/log/log.go diff --git a/cmd/tidb-lightning-ctl/main.go b/cmd/tidb-lightning-ctl/main.go index 39f94b940..6618e9909 100644 --- a/cmd/tidb-lightning-ctl/main.go +++ b/cmd/tidb-lightning-ctl/main.go @@ -71,10 +71,10 @@ func run() error { } if *logLevel != "" { - cfg.App.LogConfig.Level = *logLevel + cfg.App.Config.Level = *logLevel } if *logFilePath != "" { - cfg.App.LogConfig.File = *logFilePath + cfg.App.Config.File = *logFilePath } if *tidbHost != "" { cfg.TiDB.Host = *tidbHost diff --git a/cmd/tidb-lightning/main.go b/cmd/tidb-lightning/main.go index 40f38cccd..bfa96fff9 100644 --- a/cmd/tidb-lightning/main.go +++ b/cmd/tidb-lightning/main.go @@ -15,6 +15,7 @@ package main import ( "flag" + "fmt" _ "net/http/pprof" "os" "os/signal" @@ -22,9 +23,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb-lightning/lightning" - "github.com/pingcap/tidb-lightning/lightning/common" "github.com/pingcap/tidb-lightning/lightning/config" + "github.com/pingcap/tidb-lightning/lightning/log" plan "github.com/pingcap/tidb/planner/core" + "go.uber.org/zap" ) func setGlobalVars() { @@ -42,7 +44,8 @@ func main() { case flag.ErrHelp: os.Exit(0) default: - common.AppLogger.Fatalf("parse cmd flags error: %s", err) + fmt.Println("Failed to parse command flags: ", err) + os.Exit(1) } app := lightning.New(cfg) @@ -56,15 +59,15 @@ func main() { go func() { sig := <-sc - common.AppLogger.Infof("Got signal %v to exit.", sig) + log.L().Info("got signal to exit", zap.Stringer("signal", sig)) app.Stop() }() err = app.Run() if err != nil { - common.AppLogger.Error("tidb lightning encountered error:", errors.ErrorStack(err)) + log.L().Error("tidb lightning encountered error", zap.Error(err)) os.Exit(1) } - common.AppLogger.Info("tidb lightning exit.") + log.L().Info("tidb lightning exit") } diff --git a/go.mod b/go.mod index 9064ba583..cf528c070 100644 --- a/go.mod +++ b/go.mod @@ -26,12 +26,12 @@ require ( github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/satori/go.uuid v1.2.0 - github.com/sirupsen/logrus v1.3.0 + github.com/sirupsen/logrus v1.3.0 // indirect github.com/unrolled/render v0.0.0-20190117215946-449f39850074 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect + go.uber.org/zap v1.9.1 golang.org/x/text v0.3.0 google.golang.org/appengine v1.1.1-0.20180731164958-4216e58b9158 // indirect google.golang.org/grpc v1.18.0 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) diff --git a/lightning/common/log.go b/lightning/common/log.go deleted file mode 100644 index 0cc208485..000000000 --- a/lightning/common/log.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "bytes" - "fmt" - "path" - "runtime" - "strings" - "sync/atomic" - - "github.com/pingcap/errors" - zaplog "github.com/pingcap/log" - "github.com/pingcap/tidb/util/logutil" - log "github.com/sirupsen/logrus" - "gopkg.in/natefinch/lumberjack.v2" -) - -const ( - defaultLogTimeFormat = "2006/01/02 15:04:05.000" - defaultLogLevel = log.InfoLevel - defaultLogMaxDays = 7 - defaultLogMaxSize = 512 // MB -) - -// LogConfig serializes log related config in toml/json. -type LogConfig struct { - // Log level. - Level string `toml:"level" json:"level"` - // Log filename, leave empty to disable file log. - File string `toml:"file" json:"file"` - // Max size for a single file, in MB. - FileMaxSize int `toml:"max-size" json:"max-size"` - // Max log keep days, default is never deleting. - FileMaxDays int `toml:"max-days" json:"max-days"` - // Maximum number of old log files to retain. - FileMaxBackups int `toml:"max-backups" json:"max-backups"` -} - -func (cfg *LogConfig) Adjust() { - if len(cfg.File) > 0 { - if cfg.FileMaxSize == 0 { - cfg.FileMaxSize = defaultLogMaxSize - } - if cfg.FileMaxDays == 0 { - cfg.FileMaxDays = defaultLogMaxDays - } - } -} - -func stringToLogLevel(level string) log.Level { - switch strings.ToLower(level) { - case "fatal": - return log.FatalLevel - case "error": - return log.ErrorLevel - case "warn", "warning": - return log.WarnLevel - case "debug": - return log.DebugLevel - case "info": - return log.InfoLevel - } - return defaultLogLevel -} - -type SimpleTextFormater struct{} - -func (f *SimpleTextFormater) Format(entry *log.Entry) ([]byte, error) { - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - // timestamp - fmt.Fprintf(b, "%s ", entry.Time.Format(defaultLogTimeFormat)) - // code stack trace - if file, ok := entry.Data["file"]; ok { - fmt.Fprintf(b, "%s:%v:", file, entry.Data["line"]) - } - // level + message - fmt.Fprintf(b, " [%s] %s", entry.Level.String(), entry.Message) - - // others - for k, v := range entry.Data { - if k != "file" && k != "line" { - fmt.Fprintf(b, " %v=%v", k, v) - } - } - - b.WriteByte('\n') - - return b.Bytes(), nil -} - -// modifyHook injects file name and line pos into log entry. -type contextHook struct{} - -// Levels implements logrus.Hook interface. -func (hook *contextHook) Levels() []log.Level { - return log.AllLevels -} - -// Fire implements logrus.Hook interface -// https://github.com/sirupsen/logrus/issues/63 -func (hook *contextHook) Fire(entry *log.Entry) error { - pc := make([]uintptr, 3) - cnt := runtime.Callers(6, pc) - - for i := 0; i < cnt; i++ { - fu := runtime.FuncForPC(pc[i] - 1) - name := fu.Name() - if !isSkippedPackageName(name) { - file, line := fu.FileLine(pc[i] - 1) - entry.Data["file"] = path.Base(file) - entry.Data["line"] = line - break - } - } - return nil -} - -func isSkippedPackageName(name string) bool { - return strings.Contains(name, "github.com/sirupsen/logrus") || - strings.Contains(name, "github.com/coreos/pkg/capnslog") -} - -// AppLogger is a logger for lightning, different from tidb logger. -var AppLogger = log.New() - -func SetLevel(level log.Level) { - atomic.StoreUint32((*uint32)(&AppLogger.Level), uint32(level)) -} - -func GetLevel() (level log.Level) { - return log.Level(atomic.LoadUint32((*uint32)(&AppLogger.Level))) -} - -func InitLogger(cfg *LogConfig, tidbLoglevel string) error { - SetLevel(stringToLogLevel(cfg.Level)) - AppLogger.Hooks.Add(&contextHook{}) - AppLogger.Formatter = &SimpleTextFormater{} - - logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: tidbLoglevel}}) - - if len(cfg.File) > 0 { - if IsDirExists(cfg.File) { - return errors.Errorf("can't use directory as log file name : %s", cfg.File) - } - - // use lumberjack to logrotate - output := &lumberjack.Logger{ - Filename: cfg.File, - MaxAge: cfg.FileMaxDays, - MaxSize: cfg.FileMaxSize, - MaxBackups: cfg.FileMaxBackups, - LocalTime: true, - } - - AppLogger.Out = output - } - - return nil -} diff --git a/lightning/common/once_error.go b/lightning/common/once_error.go index fe117bb47..14c05193e 100644 --- a/lightning/common/once_error.go +++ b/lightning/common/once_error.go @@ -28,16 +28,13 @@ type OnceError struct { // Set assigns an error to this instance, if `e != nil`. // // If this method is called multiple times, only the first call is effective. -func (oe *OnceError) Set(tag string, e error) { +func (oe *OnceError) Set(e error) { if e != nil { oe.lock.Lock() if oe.err == nil { oe.err = e } oe.lock.Unlock() - if ShouldLogError(e) { - AppLogger.Errorf("[%s] error %v", tag, e) - } } } diff --git a/lightning/common/once_error_test.go b/lightning/common/once_error_test.go index e72ce25e2..80fb40da4 100644 --- a/lightning/common/once_error_test.go +++ b/lightning/common/once_error_test.go @@ -34,23 +34,23 @@ func (s *onceErrorSuite) TestOnceError(c *C) { c.Assert(err.Get(), IsNil) - err.Set("tag", nil) + err.Set(nil) c.Assert(err.Get(), IsNil) e := errors.New("1") - err.Set("tag", e) + err.Set(e) c.Assert(err.Get(), Equals, e) e2 := errors.New("2") - err.Set("tag", e2) + err.Set(e2) c.Assert(err.Get(), Equals, e) // e, not e2. - err.Set("tag", nil) + err.Set(nil) c.Assert(err.Get(), Equals, e) ch := make(chan struct{}) go func() { - err.Set("tag", nil) + err.Set(nil) ch <- struct{}{} }() <-ch diff --git a/lightning/common/util.go b/lightning/common/util.go index 8255e885d..c592916f9 100644 --- a/lightning/common/util.go +++ b/lightning/common/util.go @@ -30,9 +30,11 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" tmysql "github.com/pingcap/parser/mysql" - "github.com/sirupsen/logrus" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/pingcap/tidb-lightning/lightning/log" ) const ( @@ -64,90 +66,88 @@ func IsDirExists(name string) bool { return f != nil && f.IsDir() } -func QueryRowWithRetry(ctx context.Context, db *sql.DB, query string, dest ...interface{}) (err error) { - maxRetry := defaultMaxRetry - for i := 0; i < maxRetry; i++ { +// SQLWithRetry constructs a retryable transaction. +type SQLWithRetry struct { + DB *sql.DB + Logger log.Logger + HideQueryLog bool +} + +func (t SQLWithRetry) perform(ctx context.Context, parentLogger log.Logger, purpose string, action func() error) error { + var err error +outside: + for i := 0; i < defaultMaxRetry; i++ { + logger := parentLogger.With(zap.Int("retryCnt", i)) + if i > 0 { - AppLogger.Warnf("query %s retry %d", query, i) + logger.Warn(purpose + " retry start") time.Sleep(retryTimeout) } - err = db.QueryRowContext(ctx, query).Scan(dest...) - if err != nil { - if !IsRetryableError(err) { - return errors.Trace(err) - } - AppLogger.Warnf("query %s [error] %v", query, err) + err = action() + switch { + case err == nil: + return nil + case IsRetryableError(err): + logger.Warn(purpose+" failed but going to try again", log.ShortError(err)) continue + default: + break outside } - - return nil } - return errors.Errorf("query sql [%s] failed", query) + return errors.Annotatef(err, "%s failed", purpose) } -// TransactWithRetry executes an action in a transaction, and retry if the -// action failed with a retryable error. -func TransactWithRetry(ctx context.Context, db *sql.DB, purpose string, action func(context.Context, *sql.Tx) error) error { - maxRetry := defaultMaxRetry +func (t SQLWithRetry) QueryRow(ctx context.Context, purpose string, query string, dest ...interface{}) error { + logger := t.Logger + if !t.HideQueryLog { + logger = logger.With(zap.String("query", query)) + } + return t.perform(ctx, logger, purpose, func() error { + return t.DB.QueryRow(query).Scan(dest...) + }) +} - var err error - for i := 0; i < maxRetry; i++ { - if i > 0 { - AppLogger.Warnf("transaction %s retry %d", purpose, i) - time.Sleep(retryTimeout) +// Transact executes an action in a transaction, and retry if the +// action failed with a retryable error. +func (t SQLWithRetry) Transact(ctx context.Context, purpose string, action func(context.Context, *sql.Tx) error) error { + return t.perform(ctx, t.Logger, purpose, func() error { + txn, err := t.DB.BeginTx(ctx, nil) + if err != nil { + return errors.Annotate(err, "begin transaction failed") } - if err = transactImpl(ctx, db, purpose, action); err != nil { - if IsRetryableError(err) { - continue - } - if ShouldLogError(err) { - AppLogger.Errorf("transaction %s [error] %v", purpose, err) + err = action(ctx, txn) + if err != nil { + rerr := txn.Rollback() + if rerr != nil { + t.Logger.Error(purpose+" rollback transaction failed", log.ShortError(rerr)) } - return errors.Trace(err) + // we should return the exec err, instead of the rollback rerr. + // no need to errors.Trace() it, as the error comes from user code anyway. + return err } - return nil - } - - return errors.Annotatef(err, "transaction %s failed", purpose) -} - -func transactImpl(ctx context.Context, db *sql.DB, purpose string, action func(context.Context, *sql.Tx) error) error { - txn, err := db.BeginTx(ctx, nil) - if err != nil { - return errors.Annotate(err, "begin transaction failed") - } - - err = action(ctx, txn) - if err != nil { - AppLogger.Warnf("transaction %s [error]%v", purpose, err) - rerr := txn.Rollback() - if rerr != nil { - if ShouldLogError(rerr) { - AppLogger.Errorf("transaction %s [error] %v", purpose, rerr) - } + err = txn.Commit() + if err != nil { + return errors.Annotate(err, "commit transaction failed") } - // we should return the exec err, instead of the rollback rerr. - // no need to errors.Trace() it, as the error comes from user code anyway. - return err - } - err = txn.Commit() - if err != nil { - return errors.Annotate(err, "commit failed") - } - return nil + return nil + }) } -// ExecWithRetry executes a single SQL with optional retry. -func ExecWithRetry(ctx context.Context, db *sql.DB, purpose string, query string, args ...interface{}) error { - return errors.Trace(TransactWithRetry(ctx, db, purpose, func(c context.Context, tx *sql.Tx) error { - _, err := tx.ExecContext(c, query, args...) +// Exec executes a single SQL with optional retry. +func (t SQLWithRetry) Exec(ctx context.Context, purpose string, query string, args ...interface{}) error { + logger := t.Logger + if !t.HideQueryLog { + logger = logger.With(zap.String("query", query), zap.Reflect("args", args)) + } + return t.perform(ctx, logger, purpose, func() error { + _, err := t.DB.ExecContext(ctx, query, args...) return errors.Trace(err) - })) + }) } // IsRetryableError returns whether the error is transient (e.g. network @@ -182,35 +182,17 @@ func IsRetryableError(err error) bool { } } -// ShouldLogError returns whether the error should be logged. -// This function should only be used for inhabiting logs related to canceling, -// where the log is usually just noise. -// -// This function returns `false` when: -// -// - the error `IsContextCanceledError` -// - the log level is above "debug" -// -// This function also returns `false` when `err == nil`. -func ShouldLogError(err error) bool { - if err == nil { - return false - } - if AppLogger.IsLevelEnabled(logrus.DebugLevel) { - return true - } - return !IsContextCanceledError(err) -} - // IsContextCanceledError returns whether the error is caused by context // cancellation. This function should only be used when the code logic is -// affected by whether the error is canceling or not. Normally, you should -// simply use ShouldLogError. +// affected by whether the error is canceling or not. // // This function returns `false` (not a context-canceled error) if `err == nil`. func IsContextCanceledError(err error) bool { - err = errors.Cause(err) - return err == context.Canceled || status.Code(err) == codes.Canceled + return log.IsContextCanceledError(err) +} + +func ShouldLogError(err error) bool { + return true } // UniqueTable returns an unique table name. diff --git a/lightning/common/version.go b/lightning/common/version.go index 580f0e44d..84cd3e083 100644 --- a/lightning/common/version.go +++ b/lightning/common/version.go @@ -16,7 +16,8 @@ package common import ( "fmt" - log "github.com/sirupsen/logrus" + "github.com/pingcap/tidb-lightning/lightning/log" + "go.uber.org/zap" ) // Version information. @@ -41,20 +42,18 @@ func GetRawInfo() string { // PrintInfo prints some information of the app, like git hash, binary build time, etc. func PrintInfo(app string, callback func()) { - oriLevel := GetLevel() - SetLevel(log.InfoLevel) - printInfo(app) + oldLevel := log.SetLevel(zap.InfoLevel) + defer log.SetLevel(oldLevel) + + log.L().Info("Welcome to "+app, + zap.String("Release Version", ReleaseVersion), + zap.String("Git Commit Hash", GitHash), + zap.String("Git Branch", GitBranch), + zap.String("UTC Build Time", BuildTS), + zap.String("Go Version", GoVersion), + ) + if callback != nil { callback() } - SetLevel(oriLevel) -} - -func printInfo(app string) { - AppLogger.Infof("Welcome to %s", app) - AppLogger.Infof("Release Version: %s", ReleaseVersion) - AppLogger.Infof("Git Commit Hash: %s", GitHash) - AppLogger.Infof("Git Branch: %s", GitBranch) - AppLogger.Infof("UTC Build Time: %s", BuildTS) - AppLogger.Infof("Go Version: %s", GoVersion) } diff --git a/lightning/config/config.go b/lightning/config/config.go index ee37a6888..db123e727 100644 --- a/lightning/config/config.go +++ b/lightning/config/config.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/tidb-tools/pkg/filter" ) @@ -73,15 +74,12 @@ type Config struct { } func (c *Config) String() string { - bytes, err := json.Marshal(c) - if err != nil { - common.AppLogger.Errorf("marshal config to json error %v", err) - } + bytes, _ := json.Marshal(c) return string(bytes) } type Lightning struct { - common.LogConfig + log.Config TableConcurrency int `toml:"table-concurrency" json:"table-concurrency"` IndexConcurrency int `toml:"index-concurrency" json:"index-concurrency"` RegionConcurrency int `toml:"region-concurrency" json:"region-concurrency"` @@ -223,10 +221,10 @@ func LoadConfig(args []string) (*Config, error) { } if *logLevel != "" { - cfg.App.LogConfig.Level = *logLevel + cfg.App.Config.Level = *logLevel } if *logFilePath != "" { - cfg.App.LogConfig.File = *logFilePath + cfg.App.Config.File = *logFilePath } if *tidbHost != "" { cfg.TiDB.Host = *tidbHost @@ -286,6 +284,8 @@ func (cfg *Config) Load() error { } func (cfg *Config) Adjust() { + cfg.App.Config.Adjust() + // handle mydumper if cfg.Mydumper.BatchSize <= 0 { cfg.Mydumper.BatchSize = 100 * _G diff --git a/lightning/kv/importer.go b/lightning/kv/importer.go index 2f8e26de2..7dad677fb 100644 --- a/lightning/kv/importer.go +++ b/lightning/kv/importer.go @@ -16,19 +16,20 @@ package kv import ( "context" "fmt" - "os" "strings" "time" "github.com/pingcap/errors" + kv "github.com/pingcap/kvproto/pkg/import_kvpb" + sst "github.com/pingcap/kvproto/pkg/import_sstpb" + kvec "github.com/pingcap/tidb/util/kvencoder" "github.com/satori/go.uuid" + "go.uber.org/zap" "google.golang.org/grpc" - kv "github.com/pingcap/kvproto/pkg/import_kvpb" - sst "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/tidb-lightning/lightning/metric" - kvec "github.com/pingcap/tidb/util/kvencoder" ) const ( @@ -102,24 +103,15 @@ func (importer *Importer) SwitchMode(ctx context.Context, mode sst.SwitchMode) e Mode: mode, }, } - timer := time.Now() + task := log.With(zap.Stringer("mode", mode)).Begin(zap.DebugLevel, "switch mode") _, err := importer.cli.SwitchMode(ctx, req) - if err != nil { - if strings.Contains(err.Error(), "status: Unimplemented") { - fmt.Fprintln(os.Stderr, "Error: The TiKV instance does not support mode switching. Please make sure the TiKV version is 2.0.4 or above.") - } - return errors.Trace(err) - } - - common.AppLogger.Infof("switch to tikv %s mode takes %v", mode, time.Since(timer)) - return nil + task.End(zap.WarnLevel, err) + return errors.Trace(err) } // Compact the target cluster for better performance. func (importer *Importer) Compact(ctx context.Context, level int32) error { - common.AppLogger.Infof("compact level %d", level) - req := &kv.CompactClusterRequest{ PdAddr: importer.pdAddr, Request: &sst.CompactRequest{ @@ -127,10 +119,10 @@ func (importer *Importer) Compact(ctx context.Context, level int32) error { OutputLevel: level, }, } - timer := time.Now() - _, err := importer.cli.CompactCluster(ctx, req) - common.AppLogger.Infof("compact level %d takes %v", level, time.Since(timer)) + task := log.With(zap.Int32("level", level)).Begin(zap.InfoLevel, "compact cluster") + _, err := importer.cli.CompactCluster(ctx, req) + task.End(zap.ErrorLevel, err) return errors.Trace(err) } @@ -138,7 +130,7 @@ func (importer *Importer) Compact(ctx context.Context, level int32) error { // to it via WriteStream instances. type OpenedEngine struct { importer *Importer - tag string + logger log.Logger uuid uuid.UUID ts uint64 } @@ -159,6 +151,13 @@ func makeTag(tableName string, engineID int32) string { return fmt.Sprintf("%s:%d", tableName, engineID) } +func makeLogger(tag string, engineUUID uuid.UUID) log.Logger { + return log.With( + zap.String("engineTag", tag), + zap.Stringer("engineUUID", engineUUID), + ) +} + var engineNamespace = uuid.Must(uuid.FromString("d68d6abe-c59e-45d6-ade8-e2b0ceb7bedf")) // OpenEngine opens an engine with the given table name and engine ID. This type @@ -180,7 +179,9 @@ func (importer *Importer) OpenEngine( openCounter := metric.ImporterEngineCounter.WithLabelValues("open") openCounter.Inc() - common.AppLogger.Infof("[%s] open engine %s", tag, engineUUID) + + logger := makeLogger(tag, engineUUID) + logger.Info("open engine") // gofail: var FailIfEngineCountExceeds int // { @@ -197,7 +198,7 @@ func (importer *Importer) OpenEngine( return &OpenedEngine{ importer: importer, - tag: tag, + logger: logger, ts: uint64(time.Now().Unix()), // TODO ... set outside ? from pd ? uuid: engineUUID, }, nil @@ -229,7 +230,7 @@ func (engine *OpenedEngine) NewWriteStream(ctx context.Context) (*WriteStream, e if err = wstream.Send(req); err != nil { if _, closeErr := wstream.CloseAndRecv(); closeErr != nil { // just log the close error, we need to propagate the send error instead - common.AppLogger.Warnf("[%s] close write stream cause failed : %v", engine.tag, closeErr) + engine.logger.Warn("close write stream failed", log.ShortError(closeErr)) } return nil, errors.Trace(err) } @@ -267,7 +268,7 @@ func (stream *WriteStream) Put(kvs []kvec.KvPair) error { if !common.IsRetryableError(sendErr) { break } - common.AppLogger.Errorf("[%s] write stream failed to send: %s", stream.engine.tag, sendErr.Error()) + stream.engine.logger.Error("send write stream failed", log.ShortError(sendErr)) time.Sleep(retryBackoffTime) } return errors.Trace(sendErr) @@ -276,9 +277,7 @@ func (stream *WriteStream) Put(kvs []kvec.KvPair) error { // Close the write stream. func (stream *WriteStream) Close() error { if _, err := stream.wstream.CloseAndRecv(); err != nil { - if common.ShouldLogError(err) { - common.AppLogger.Errorf("[%s] close write stream cause failed : %v", stream.engine.tag, err) - } + stream.engine.logger.Error("close write stream failed", log.ShortError(err)) return errors.Trace(err) } return nil @@ -289,20 +288,17 @@ func (stream *WriteStream) Close() error { // method anywhere. type ClosedEngine struct { importer *Importer - tag string + logger log.Logger uuid uuid.UUID } // Close the opened engine to prepare it for importing. This method will return // error if any associated WriteStream is still not closed. func (engine *OpenedEngine) Close(ctx context.Context) (*ClosedEngine, error) { - common.AppLogger.Infof("[%s] [%s] engine close", engine.tag, engine.uuid) - timer := time.Now() - closedEngine, err := engine.importer.UnsafeCloseEngineWithUUID(ctx, engine.tag, engine.uuid) + closedEngine, err := engine.importer.rawUnsafeCloseEngine(ctx, engine.logger, engine.uuid) if err != nil { return nil, errors.Trace(err) } - common.AppLogger.Infof("[%s] [%s] engine close takes %v", engine.tag, engine.uuid, time.Since(timer)) metric.ImporterEngineCounter.WithLabelValues("closed").Inc() return closedEngine, nil } @@ -320,17 +316,28 @@ func (importer *Importer) UnsafeCloseEngine(ctx context.Context, tableName strin // UnsafeCloseEngineWithUUID closes the engine using the UUID alone. func (importer *Importer) UnsafeCloseEngineWithUUID(ctx context.Context, tag string, engineUUID uuid.UUID) (*ClosedEngine, error) { + logger := makeLogger(tag, engineUUID) + return importer.rawUnsafeCloseEngine(ctx, logger, engineUUID) +} + +func (importer *Importer) rawUnsafeCloseEngine(ctx context.Context, logger log.Logger, engineUUID uuid.UUID) (*ClosedEngine, error) { req := &kv.CloseEngineRequest{ Uuid: engineUUID.Bytes(), } + task := logger.Begin(zap.InfoLevel, "engine close") _, err := importer.cli.CloseEngine(ctx, req) - if !isIgnorableOpenCloseEngineError(err) { + if isIgnorableOpenCloseEngineError(err) { + err = nil + } + task.End(zap.ErrorLevel, err) + + if err != nil { return nil, errors.Trace(err) } return &ClosedEngine{ importer: importer, - tag: tag, + logger: logger, uuid: engineUUID, }, nil } @@ -340,41 +347,31 @@ func (engine *ClosedEngine) Import(ctx context.Context) error { var err error for i := 0; i < maxRetryTimes; i++ { - common.AppLogger.Infof("[%s] [%s] import", engine.tag, engine.uuid) req := &kv.ImportEngineRequest{ Uuid: engine.uuid.Bytes(), PdAddr: engine.importer.pdAddr, } - timer := time.Now() + + task := engine.logger.With(zap.Int("retryCnt", i)).Begin(zap.InfoLevel, "import") _, err = engine.importer.cli.ImportEngine(ctx, req) if !common.IsRetryableError(err) { - if err == nil { - common.AppLogger.Infof("[%s] [%s] import takes %v", engine.tag, engine.uuid, time.Since(timer)) - } else if common.ShouldLogError(err) { - common.AppLogger.Errorf("[%s] [%s] import failed and cannot retry, err %v", engine.tag, engine.uuid, err) - } + task.End(zap.ErrorLevel, err) return errors.Trace(err) } - common.AppLogger.Warnf("[%s] [%s] import failed and retry %d time, err %v", engine.tag, engine.uuid, i+1, err) + task.Warn("import spuriously failed, going to retry again", log.ShortError(err)) time.Sleep(retryBackoffTime) } - return errors.Annotatef(err, "[%s] [%s] import reach max retry %d and still failed", engine.tag, engine.uuid, maxRetryTimes) + return errors.Annotatef(err, "[%s] import reach max retry %d and still failed", engine.uuid, maxRetryTimes) } // Cleanup deletes the imported data from importer. func (engine *ClosedEngine) Cleanup(ctx context.Context) error { - common.AppLogger.Infof("[%s] [%s] cleanup ", engine.tag, engine.uuid) req := &kv.CleanupEngineRequest{ Uuid: engine.uuid.Bytes(), } - timer := time.Now() + task := engine.logger.Begin(zap.InfoLevel, "cleanup") _, err := engine.importer.cli.CleanupEngine(ctx, req) - common.AppLogger.Infof("[%s] [%s] cleanup takes %v", engine.tag, engine.uuid, time.Since(timer)) + task.End(zap.WarnLevel, err) return errors.Trace(err) } - -// Tag gets an identification stirng of this engine for logging. -func (engine *ClosedEngine) Tag() string { - return engine.tag -} diff --git a/lightning/lightning.go b/lightning/lightning.go index 4c4f576e6..c7fe571db 100644 --- a/lightning/lightning.go +++ b/lightning/lightning.go @@ -22,9 +22,11 @@ import ( "github.com/pingcap/errors" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" "github.com/pingcap/tidb-lightning/lightning/common" "github.com/pingcap/tidb-lightning/lightning/config" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/tidb-lightning/lightning/mydump" "github.com/pingcap/tidb-lightning/lightning/restore" ) @@ -38,14 +40,15 @@ type Lightning struct { } func initEnv(cfg *config.Config) error { - if err := common.InitLogger(&cfg.App.LogConfig, cfg.TiDB.LogLevel); err != nil { + if err := log.InitLogger(&cfg.App.Config, cfg.TiDB.LogLevel); err != nil { return errors.Trace(err) } if cfg.App.ProfilePort > 0 { go func() { http.Handle("/metrics", promhttp.Handler()) - common.AppLogger.Info(http.ListenAndServe(fmt.Sprintf(":%d", cfg.App.ProfilePort), nil)) + err := http.ListenAndServe(fmt.Sprintf(":%d", cfg.App.ProfilePort), nil) + log.L().Info("stopped HTTP server", log.ShortError(err)) }() } @@ -67,7 +70,7 @@ func New(cfg *config.Config) *Lightning { func (l *Lightning) Run() error { runtime.GOMAXPROCS(runtime.NumCPU()) common.PrintInfo("lightning", func() { - common.AppLogger.Infof("cfg %s", l.cfg) + log.L().Info("cfg", zap.Stringer("cfg", l.cfg)) }) l.wg.Add(1) @@ -81,16 +84,17 @@ func (l *Lightning) Run() error { } func (l *Lightning) run() error { + loadTask := log.L().Begin(zap.InfoLevel, "load data source") mdl, err := mydump.NewMyDumpLoader(l.cfg) + loadTask.End(zap.ErrorLevel, err) if err != nil { - common.AppLogger.Errorf("failed to load mydumper source : %s", errors.ErrorStack(err)) return errors.Trace(err) } dbMetas := mdl.GetDatabases() procedure, err := restore.NewRestoreController(l.ctx, dbMetas, l.cfg) if err != nil { - common.AppLogger.Errorf("failed to restore : %s", errors.ErrorStack(err)) + log.L().Error("restore failed", log.ShortError(err)) return errors.Trace(err) } defer procedure.Close() diff --git a/lightning/log/log.go b/lightning/log/log.go new file mode 100644 index 000000000..5a9b203ca --- /dev/null +++ b/lightning/log/log.go @@ -0,0 +1,190 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "context" + "time" + + "github.com/pingcap/errors" + pclog "github.com/pingcap/log" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + defaultLogLevel = "info" + defaultLogMaxDays = 7 + defaultLogMaxSize = 512 // MB +) + +// Config serializes log related config in toml/json. +type Config struct { + // Log level. + Level string `toml:"level" json:"level"` + // Log filename, leave empty to disable file log. + File string `toml:"file" json:"file"` + // Max size for a single file, in MB. + FileMaxSize int `toml:"max-size" json:"max-size"` + // Max log keep days, default is never deleting. + FileMaxDays int `toml:"max-days" json:"max-days"` + // Maximum number of old log files to retain. + FileMaxBackups int `toml:"max-backups" json:"max-backups"` +} + +func (cfg *Config) Adjust() { + if len(cfg.Level) == 0 { + cfg.Level = defaultLogLevel + } + if cfg.Level == "warning" { + cfg.Level = "warn" + } + if cfg.FileMaxSize == 0 { + cfg.FileMaxSize = defaultLogMaxSize + } + if cfg.FileMaxDays == 0 { + cfg.FileMaxDays = defaultLogMaxDays + } +} + +// Logger is a simple wrapper around *zap.Logger which provides some extra +// methods to simplify Lightning's log usage. +type Logger struct { + *zap.Logger +} + +// logger for lightning, different from tidb logger. +var ( + appLogger = Logger{zap.NewNop()} + appLevel zap.AtomicLevel +) + +// InitLogger initializes Lightning's and also the TiDB library's loggers. +func InitLogger(cfg *Config, tidbLoglevel string) error { + logutil.InitLogger(&logutil.LogConfig{Config: pclog.Config{Level: tidbLoglevel}}) + + logger, props, err := pclog.InitLogger(&pclog.Config{ + Level: cfg.Level, + File: pclog.FileLogConfig{ + Filename: cfg.File, + LogRotate: true, + MaxSize: cfg.FileMaxSize, + MaxDays: cfg.FileMaxDays, + MaxBackups: cfg.FileMaxBackups, + }, + }) + // Do not log stack traces at all, as we'll get the stack trace from the + // error itself. + appLogger = Logger{logger.WithOptions(zap.AddStacktrace(zap.DPanicLevel))} + appLevel = props.Level + + return err +} + +// L returns the current logger for Lightning. +func L() Logger { + return appLogger +} + +// SetLevel modifies the log level of the global logger. Returns the previous +// level. +func SetLevel(level zapcore.Level) zapcore.Level { + oldLevel := appLevel.Level() + appLevel.SetLevel(level) + return oldLevel +} + +// ShortError contructs a field which only records the error message without the +// verbose text (i.e. excludes the stack trace). +func ShortError(err error) zap.Field { + if err == nil { + return zap.Skip() + } + return zap.String("error", err.Error()) +} + +// With creates a child logger from the global logger and adds structured +// context to it. +func With(fields ...zap.Field) Logger { + return appLogger.With(fields...) +} + +// IsContextCanceledError returns whether the error is caused by context +// cancellation. +func IsContextCanceledError(err error) bool { + err = errors.Cause(err) + return err == context.Canceled || status.Code(err) == codes.Canceled +} + +// Begin marks the beginning of a task. +func (logger Logger) Begin(level zapcore.Level, name string) *Task { + if ce := logger.WithOptions(zap.AddCallerSkip(1)).Check(level, name+" start"); ce != nil { + ce.Write() + } + return &Task{ + Logger: logger, + level: level, + name: name, + since: time.Now(), + } +} + +// With creates a child logger and adds structured context to it. +func (logger Logger) With(fields ...zap.Field) Logger { + return Logger{logger.Logger.With(fields...)} +} + +// Named adds a new path segment to the logger's name. +func (logger Logger) Named(name string) Logger { + return Logger{logger.Logger.Named(name)} +} + +// Task is a logger for a task spanning a period of time. This structure records +// when the task is started, so the time elapsed for the whole task can be +// logged without book-keeping. +type Task struct { + Logger + level zapcore.Level + name string + since time.Time +} + +// End marks the end of a task. +func (task *Task) End(level zapcore.Level, err error, extraFields ...zap.Field) time.Duration { + elapsed := time.Since(task.since) + var verb string + switch { + case err == nil: + level = task.level + verb = " completed" + case IsContextCanceledError(err): + level = zap.DebugLevel + verb = " canceled" + extraFields = nil + default: + verb = " failed" + extraFields = nil + } + if ce := task.WithOptions(zap.AddCallerSkip(1)).Check(level, task.name+verb); ce != nil { + ce.Write(append( + extraFields, + zap.Duration("takeTime", elapsed), + ShortError(err), + )...) + } + return elapsed +} diff --git a/lightning/mydump/csv_parser.rl b/lightning/mydump/csv_parser.rl index 1fee4e665..1afeec42c 100644 --- a/lightning/mydump/csv_parser.rl +++ b/lightning/mydump/csv_parser.rl @@ -10,8 +10,9 @@ package mydump import ( "io" - "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/errors" + "go.uber.org/zap" ) %%{ @@ -81,7 +82,10 @@ func (parser *CSVParser) lex() (csvToken, []byte, error) { %% write exec; if cs == %%{ write error; }%% { - common.AppLogger.Errorf("Syntax error near byte %d, content is «%s»", parser.pos, string(data)) + log.L().Error("syntax error", + zap.Int64("pos", parser.pos), + zap.ByteString("content", data), + ) return csvTokNil, nil, errors.New("Syntax error") } diff --git a/lightning/mydump/csv_parser_generated.go b/lightning/mydump/csv_parser_generated.go index 86237d31b..c82bc684b 100644 --- a/lightning/mydump/csv_parser_generated.go +++ b/lightning/mydump/csv_parser_generated.go @@ -13,16 +13,17 @@ package mydump import ( "io" - "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/errors" + "go.uber.org/zap" ) -//.... lightning/mydump/csv_parser.rl:58 +//.... lightning/mydump/csv_parser.rl:59 -//.... tmp_parser.go:25 +//.... tmp_parser.go:26 const csv_parser_start int = 8 const csv_parser_first_final int = 8 const csv_parser_error int = 0 @@ -30,7 +31,7 @@ const csv_parser_error int = 0 const csv_parser_en_main int = 8 -//.... lightning/mydump/csv_parser.rl:61 +//.... lightning/mydump/csv_parser.rl:62 func (parser *CSVParser) lex() (csvToken, []byte, error) { var delim byte @@ -41,7 +42,7 @@ func (parser *CSVParser) lex() (csvToken, []byte, error) { var cs, ts, te, act, p int -//.... tmp_parser.go:44 +//.... tmp_parser.go:45 { cs = csv_parser_start ts = 0 @@ -49,7 +50,7 @@ func (parser *CSVParser) lex() (csvToken, []byte, error) { act = 0 } -//.... lightning/mydump/csv_parser.rl:71 +//.... lightning/mydump/csv_parser.rl:72 for { data := parser.buf @@ -61,7 +62,7 @@ func (parser *CSVParser) lex() (csvToken, []byte, error) { } -//.... tmp_parser.go:64 +//.... tmp_parser.go:65 { var _widec int16 if p == pe { @@ -78,46 +79,54 @@ func (parser *CSVParser) lex() (csvToken, []byte, error) { goto st_case_10 case 1: goto st_case_1 - case 2: - goto st_case_2 case 11: goto st_case_11 + case 2: + goto st_case_2 case 12: goto st_case_12 - case 3: - goto st_case_3 case 13: goto st_case_13 - case 4: - goto st_case_4 + case 3: + goto st_case_3 case 14: goto st_case_14 + case 4: + goto st_case_4 case 15: goto st_case_15 - case 5: - goto st_case_5 case 16: goto st_case_16 - case 6: - goto st_case_6 case 17: goto st_case_17 - case 7: - goto st_case_7 case 18: goto st_case_18 case 19: goto st_case_19 + case 5: + goto st_case_5 case 20: goto st_case_20 + case 6: + goto st_case_6 case 21: goto st_case_21 + case 7: + goto st_case_7 case 22: goto st_case_22 case 23: goto st_case_23 case 24: goto st_case_24 + case 25: + goto st_case_25 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 } goto st_out tr0: @@ -137,36 +146,33 @@ tr0: consumedToken = csvTokField {p++; cs = 8; goto _out } } + case 3: + {p = (te) - 1 + + consumedToken = csvTokNewLine + {p++; cs = 8; goto _out } + } } goto st8 tr14: -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 p = (te) - 1 { consumedToken = csvTokField {p++; cs = 8; goto _out } } goto st8 -tr17: -//.... lightning/mydump/csv_parser.rl:41 +tr19: +//.... lightning/mydump/csv_parser.rl:42 te = p+1 { consumedToken = csvTokSep {p++; cs = 8; goto _out } } goto st8 -tr23: -//.... lightning/mydump/csv_parser.rl:51 -te = p -p-- -{ - consumedToken = csvTokNewLine - {p++; cs = 8; goto _out } - } - goto st8 -tr24: -//.... lightning/mydump/csv_parser.rl:46 +tr27: +//.... lightning/mydump/csv_parser.rl:47 te = p p-- { @@ -174,8 +180,8 @@ p-- {p++; cs = 8; goto _out } } goto st8 -tr25: -//.... lightning/mydump/csv_parser.rl:41 +tr30: +//.... lightning/mydump/csv_parser.rl:42 te = p p-- { @@ -197,22 +203,11 @@ act = 0 //.... NONE:1 ts = p -//.... tmp_parser.go:200 +//.... tmp_parser.go:206 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -256,9 +251,15 @@ ts = p } switch _widec { case 10: - goto st9 - case 13: - goto st9 + goto tr15 + case 2829: + goto tr17 + case 3085: + goto tr18 + case 3341: + goto tr20 + case 3597: + goto tr22 case 3932: goto tr1 case 4188: @@ -268,41 +269,33 @@ ts = p case 4700: goto st5 case 4956: - goto tr17 + goto tr19 case 5212: - goto tr18 + goto tr21 case 5468: - goto st23 + goto st27 case 5724: - goto st24 + goto st28 } switch { case _widec < 3165: switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr1 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr1 } - default: + case _widec >= 2816: goto tr1 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto st2 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto st2 } - default: + case _widec >= 3072: goto st2 } default: @@ -312,37 +305,28 @@ ts = p switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto tr17 + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { + goto tr19 } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { - goto tr17 - } - default: - goto tr17 + case _widec >= 3328: + goto tr19 } case _widec > 3583: switch { case _widec < 3595: if 3584 <= _widec && _widec <= 3593 { - goto tr18 + goto tr21 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr18 - } - case _widec >= 3598: - goto tr18 + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { + goto tr21 } default: - goto tr18 + goto tr21 } default: - goto tr17 + goto tr19 } default: goto st2 @@ -352,23 +336,38 @@ st_case_0: st0: cs = 0 goto _out +tr15: +//.... NONE:1 +te = p+1 + +//.... lightning/mydump/csv_parser.rl:52 +act = 3; + goto st9 +tr20: +//.... NONE:1 +te = p+1 + +//.... lightning/mydump/csv_parser.rl:42 +act = 1; + goto st9 st9: if p++; p == pe { goto _test_eof9 } st_case_9: +//.... tmp_parser.go:358 switch data[p] { case 10: - goto st9 + goto tr15 case 13: - goto st9 + goto tr15 } - goto tr23 + goto tr0 tr1: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; goto st10 st10: @@ -376,22 +375,11 @@ act = 2; goto _test_eof10 } st_case_10: -//.... tmp_parser.go:379 +//.... tmp_parser.go:378 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -450,36 +438,49 @@ act = 2; if 2816 <= _widec && _widec <= 2825 { goto tr1 } - case _widec > 2828: - switch { - case _widec > 2907: - if 2909 <= _widec && _widec <= 3071 { - goto tr1 - } - case _widec >= 2830: + case _widec > 2907: + if 2909 <= _widec && _widec <= 3071 { goto tr1 } default: goto tr1 } - goto tr24 + goto tr27 st1: if p++; p == pe { goto _test_eof1 } st_case_1: goto tr1 - st2: +tr17: +//.... NONE:1 +te = p+1 + +//.... lightning/mydump/csv_parser.rl:47 +act = 2; + goto st11 + st11: if p++; p == pe { - goto _test_eof2 + goto _test_eof11 } - st_case_2: + st_case_11: +//.... tmp_parser.go:467 _widec = int16(data[p]) switch { - case data[p] < 14: + case data[p] < 11: + if data[p] <= 9 { + _widec = 2816 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + if data[p] == sep { + _widec += 512 + } + } + case data[p] > 91: switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { + case data[p] > 92: + if 93 <= data[p] { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -488,7 +489,71 @@ act = 2; _widec += 512 } } - default: + case data[p] >= 92: + _widec = 3840 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + if parser.escFlavor != backslashEscapeFlavorNone { + _widec += 512 + } + if data[p] == sep { + _widec += 1024 + } + } + default: + _widec = 2816 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + if data[p] == sep { + _widec += 512 + } + } + switch _widec { + case 10: + goto tr15 + case 2829: + goto tr17 + case 3085: + goto tr15 + case 3341: + goto tr15 + case 3597: + goto tr15 + case 3932: + goto tr1 + case 4444: + goto st1 + case 4700: + goto st1 + case 5468: + goto st1 + case 5724: + goto st1 + } + switch { + case _widec < 2827: + if 2816 <= _widec && _widec <= 2825 { + goto tr1 + } + case _widec > 2907: + if 2909 <= _widec && _widec <= 3071 { + goto tr1 + } + default: + goto tr1 + } + goto tr27 + st2: + if p++; p == pe { + goto _test_eof2 + } + st_case_2: + _widec = int16(data[p]) + switch { + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -533,8 +598,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto st2 case 4188: @@ -557,28 +620,20 @@ act = 2; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto st2 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto st2 } - default: + case _widec >= 2816: goto st2 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr3 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr3 } - default: + case _widec >= 3072: goto tr3 } default: @@ -588,15 +643,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto st2 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -605,13 +656,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -628,26 +674,19 @@ tr3: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st11 - st11: + goto st12 + st12: if p++; p == pe { - goto _test_eof11 + goto _test_eof12 } - st_case_11: -//.... tmp_parser.go:639 + st_case_12: +//.... tmp_parser.go:685 _widec = int16(data[p]) switch { - case data[p] < 11: - if data[p] <= 9 { - _widec = 768 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - } - case data[p] > 12: - if 14 <= data[p] { + case data[p] > 9: + if 11 <= data[p] { _widec = 768 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -660,46 +699,31 @@ act = 2; } } switch { - case _widec < 1035: - if 1024 <= _widec && _widec <= 1033 { - goto st2 - } - case _widec > 1036: - if 1038 <= _widec && _widec <= 1279 { + case _widec > 1033: + if 1035 <= _widec && _widec <= 1279 { goto st2 } - default: + case _widec >= 1024: goto st2 } - goto tr24 + goto tr27 tr4: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st12 - st12: + goto st13 + st13: if p++; p == pe { - goto _test_eof12 + goto _test_eof13 } - st_case_12: -//.... tmp_parser.go:687 + st_case_13: +//.... tmp_parser.go:722 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -744,8 +768,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto st2 case 4188: @@ -768,28 +790,20 @@ act = 2; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto st2 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto st2 } - default: + case _widec >= 2816: goto st2 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr4 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr4 } - default: + case _widec >= 3072: goto tr4 } default: @@ -799,15 +813,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto st2 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -816,13 +826,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -834,7 +839,7 @@ act = 2; default: goto tr4 } - goto tr24 + goto tr27 st3: if p++; p == pe { goto _test_eof3 @@ -845,30 +850,19 @@ tr8: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st13 - st13: + goto st14 + st14: if p++; p == pe { - goto _test_eof13 + goto _test_eof14 } - st_case_13: -//.... tmp_parser.go:856 + st_case_14: +//.... tmp_parser.go:861 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -913,8 +907,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto st2 case 4188: @@ -937,28 +929,20 @@ act = 2; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto st2 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto st2 } - default: + case _widec >= 2816: goto st2 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr4 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr4 } - default: + case _widec >= 3072: goto tr4 } default: @@ -968,15 +952,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { - goto st2 - } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -985,13 +965,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -1003,7 +978,7 @@ act = 2; default: goto tr4 } - goto tr24 + goto tr27 st4: if p++; p == pe { goto _test_eof4 @@ -1011,19 +986,8 @@ act = 2; st_case_4: _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1068,8 +1032,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto st2 case 4188: @@ -1092,28 +1054,20 @@ act = 2; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto st2 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto st2 } - default: + case _widec >= 2816: goto st2 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr4 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr4 } - default: + case _widec >= 3072: goto tr4 } default: @@ -1123,15 +1077,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { - goto st2 - } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -1140,13 +1090,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -1163,85 +1108,268 @@ tr6: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st14 - st14: + goto st15 + st15: if p++; p == pe { - goto _test_eof14 + goto _test_eof15 } - st_case_14: -//.... tmp_parser.go:1174 + st_case_15: +//.... tmp_parser.go:1119 + _widec = int16(data[p]) + switch { + case data[p] > 9: + if 11 <= data[p] { + _widec = 768 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + } + default: + _widec = 768 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + } + if _widec == 10 { + goto st2 + } + switch { + case _widec < 779: + if 768 <= _widec && _widec <= 777 { + goto st2 + } + case _widec > 1033: + if 1035 <= _widec && _widec <= 1279 { + goto st2 + } + default: + goto st2 + } + goto tr27 +tr18: +//.... NONE:1 +te = p+1 + +//.... lightning/mydump/csv_parser.rl:52 +act = 3; + goto st16 +tr22: +//.... NONE:1 +te = p+1 + +//.... lightning/mydump/csv_parser.rl:42 +act = 1; + goto st16 + st16: + if p++; p == pe { + goto _test_eof16 + } + st_case_16: +//.... tmp_parser.go:1170 _widec = int16(data[p]) switch { case data[p] < 11: if data[p] <= 9 { - _widec = 768 + (int16(data[p]) - 0) + _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 } + if data[p] == sep { + _widec += 512 + } } - case data[p] > 12: - if 14 <= data[p] { - _widec = 768 + (int16(data[p]) - 0) + case data[p] > 91: + switch { + case data[p] > 92: + if 93 <= data[p] { + _widec = 2816 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + if data[p] == sep { + _widec += 512 + } + } + case data[p] >= 92: + _widec = 3840 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 } + if parser.escFlavor != backslashEscapeFlavorNone { + _widec += 512 + } + if data[p] == sep { + _widec += 1024 + } } default: - _widec = 768 + (int16(data[p]) - 0) + _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 } + if data[p] == sep { + _widec += 512 + } } switch _widec { case 10: + goto tr18 + case 2829: + goto tr18 + case 3085: + goto tr28 + case 3341: + goto tr18 + case 3597: + goto tr29 + case 3932: goto st2 - case 13: + case 4188: + goto tr3 + case 4444: + goto st3 + case 4700: + goto tr6 + case 4956: goto st2 + case 5212: + goto tr4 + case 5468: + goto st4 + case 5724: + goto tr8 } switch { - case _widec < 782: + case _widec < 3165: switch { - case _widec > 777: - if 779 <= _widec && _widec <= 780 { + case _widec < 2909: + switch { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { + goto st2 + } + case _widec >= 2816: goto st2 } - case _widec >= 768: + case _widec > 3071: + switch { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { + goto tr3 + } + case _widec >= 3072: + goto tr3 + } + default: goto st2 } - case _widec > 1033: + case _widec > 3327: switch { - case _widec > 1036: - if 1038 <= _widec && _widec <= 1279 { + case _widec < 3421: + switch { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { + goto st2 + } + case _widec >= 3328: goto st2 } - case _widec >= 1035: + case _widec > 3583: + switch { + case _widec < 3595: + if 3584 <= _widec && _widec <= 3593 { + goto tr4 + } + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { + goto tr4 + } + default: + goto tr4 + } + default: goto st2 } default: + goto tr3 + } + goto tr0 +tr28: +//.... NONE:1 +te = p+1 + +//.... lightning/mydump/csv_parser.rl:47 +act = 2; + goto st17 + st17: + if p++; p == pe { + goto _test_eof17 + } + st_case_17: +//.... tmp_parser.go:1311 + _widec = int16(data[p]) + switch { + case data[p] > 9: + if 11 <= data[p] { + _widec = 768 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + } + default: + _widec = 768 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + } + switch _widec { + case 10: + goto tr15 + case 781: + goto tr15 + case 1037: + goto tr18 + } + switch { + case _widec > 1033: + if 1035 <= _widec && _widec <= 1279 { + goto st2 + } + case _widec >= 1024: goto st2 } - goto tr24 -tr18: + goto tr27 +tr29: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:41 -act = 1; - goto st15 - st15: +//.... lightning/mydump/csv_parser.rl:47 +act = 2; + goto st18 + st18: if p++; p == pe { - goto _test_eof15 + goto _test_eof18 } - st_case_15: -//.... tmp_parser.go:1238 + st_case_18: +//.... tmp_parser.go:1356 _widec = int16(data[p]) switch { - case data[p] < 14: + case data[p] < 11: + if data[p] <= 9 { + _widec = 2816 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + if data[p] == sep { + _widec += 512 + } + } + case data[p] > 91: switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { + case data[p] > 92: + if 93 <= data[p] { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1250,7 +1378,127 @@ act = 1; _widec += 512 } } + case data[p] >= 92: + _widec = 3840 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + if parser.escFlavor != backslashEscapeFlavorNone { + _widec += 512 + } + if data[p] == sep { + _widec += 1024 + } + } + default: + _widec = 2816 + (int16(data[p]) - 0) + if data[p] == delim { + _widec += 256 + } + if data[p] == sep { + _widec += 512 + } + } + switch _widec { + case 10: + goto tr18 + case 2829: + goto tr18 + case 3085: + goto tr29 + case 3341: + goto tr18 + case 3597: + goto tr29 + case 3932: + goto st2 + case 4188: + goto tr4 + case 4444: + goto st3 + case 4700: + goto tr8 + case 4956: + goto st2 + case 5212: + goto tr4 + case 5468: + goto st4 + case 5724: + goto tr8 + } + switch { + case _widec < 3165: + switch { + case _widec < 2909: + switch { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { + goto st2 + } + case _widec >= 2816: + goto st2 + } + case _widec > 3071: + switch { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { + goto tr4 + } + case _widec >= 3072: + goto tr4 + } default: + goto st2 + } + case _widec > 3327: + switch { + case _widec < 3421: + switch { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { + goto st2 + } + case _widec >= 3328: + goto st2 + } + case _widec > 3583: + switch { + case _widec < 3595: + if 3584 <= _widec && _widec <= 3593 { + goto tr4 + } + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { + goto tr4 + } + default: + goto tr4 + } + default: + goto st2 + } + default: + goto tr4 + } + goto tr27 +tr21: +//.... NONE:1 +te = p+1 + +//.... lightning/mydump/csv_parser.rl:42 +act = 1; + goto st19 + st19: + if p++; p == pe { + goto _test_eof19 + } + st_case_19: +//.... tmp_parser.go:1497 + _widec = int16(data[p]) + switch { + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1295,8 +1543,6 @@ act = 1; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto st2 case 4188: @@ -1319,28 +1565,20 @@ act = 1; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto st2 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto st2 } - default: + case _widec >= 2816: goto st2 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr3 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr3 } - default: + case _widec >= 3072: goto tr3 } default: @@ -1350,15 +1588,11 @@ act = 1; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto st2 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -1367,13 +1601,8 @@ act = 1; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -1385,7 +1614,7 @@ act = 1; default: goto tr3 } - goto tr25 + goto tr30 st5: if p++; p == pe { goto _test_eof5 @@ -1393,19 +1622,8 @@ act = 1; st_case_5: _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1450,8 +1668,6 @@ act = 1; switch _widec { case 10: goto tr9 - case 13: - goto tr9 case 3932: goto tr9 case 4188: @@ -1474,28 +1690,20 @@ act = 1; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr9 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr9 } - default: + case _widec >= 2816: goto tr9 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr10 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr10 } - default: + case _widec >= 3072: goto tr10 } default: @@ -1505,15 +1713,11 @@ act = 1; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto tr9 } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { - goto tr9 - } - default: + case _widec >= 3328: goto tr9 } case _widec > 3583: @@ -1522,13 +1726,8 @@ act = 1; if 3584 <= _widec && _widec <= 3593 { goto tr11 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr11 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr11 } default: @@ -1545,30 +1744,19 @@ tr9: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st16 - st16: + goto st20 + st20: if p++; p == pe { - goto _test_eof16 + goto _test_eof20 } - st_case_16: -//.... tmp_parser.go:1556 + st_case_20: +//.... tmp_parser.go:1755 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1613,8 +1801,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto tr9 case 4188: @@ -1622,7 +1808,7 @@ act = 2; case 4444: goto st6 case 4700: - goto st17 + goto st21 case 4956: goto st2 case 5212: @@ -1630,35 +1816,27 @@ act = 2; case 5468: goto st7 case 5724: - goto st19 + goto st23 } switch { case _widec < 3165: switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr9 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr9 } - default: + case _widec >= 2816: goto tr9 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr3 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr3 } - default: + case _widec >= 3072: goto tr3 } default: @@ -1668,15 +1846,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto st2 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -1685,13 +1859,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -1703,29 +1872,22 @@ act = 2; default: goto tr3 } - goto tr24 + goto tr27 st6: if p++; p == pe { goto _test_eof6 } st_case_6: goto tr9 - st17: + st21: if p++; p == pe { - goto _test_eof17 + goto _test_eof21 } - st_case_17: + st_case_21: _widec = int16(data[p]) switch { - case data[p] < 11: - if data[p] <= 9 { - _widec = 768 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - } - case data[p] > 12: - if 14 <= data[p] { + case data[p] > 9: + if 11 <= data[p] { _widec = 768 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1737,35 +1899,22 @@ act = 2; _widec += 256 } } - switch _widec { - case 10: - goto tr9 - case 13: + if _widec == 10 { goto tr9 } switch { - case _widec < 782: - switch { - case _widec > 777: - if 779 <= _widec && _widec <= 780 { - goto tr9 - } - case _widec >= 768: + case _widec < 779: + if 768 <= _widec && _widec <= 777 { goto tr9 } case _widec > 1033: - switch { - case _widec > 1036: - if 1038 <= _widec && _widec <= 1279 { - goto tr9 - } - case _widec >= 1035: + if 1035 <= _widec && _widec <= 1279 { goto tr9 } default: goto tr9 } - goto tr24 + goto tr27 st7: if p++; p == pe { goto _test_eof7 @@ -1773,19 +1922,8 @@ act = 2; st_case_7: _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1830,8 +1968,6 @@ act = 2; switch _widec { case 10: goto tr9 - case 13: - goto tr9 case 3932: goto tr9 case 4188: @@ -1854,28 +1990,20 @@ act = 2; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr9 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr9 } - default: + case _widec >= 2816: goto tr9 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr11 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr11 } - default: + case _widec >= 3072: goto tr11 } default: @@ -1885,15 +2013,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto tr9 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto tr9 } - default: + case _widec >= 3328: goto tr9 } case _widec > 3583: @@ -1902,13 +2026,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr11 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr11 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr11 } default: @@ -1925,30 +2044,19 @@ tr11: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st18 - st18: + goto st22 + st22: if p++; p == pe { - goto _test_eof18 + goto _test_eof22 } - st_case_18: -//.... tmp_parser.go:1936 + st_case_22: +//.... tmp_parser.go:2055 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -1993,8 +2101,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto tr9 case 4188: @@ -2002,7 +2108,7 @@ act = 2; case 4444: goto st6 case 4700: - goto st19 + goto st23 case 4956: goto st2 case 5212: @@ -2010,35 +2116,27 @@ act = 2; case 5468: goto st7 case 5724: - goto st19 + goto st23 } switch { case _widec < 3165: switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr9 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr9 } - default: + case _widec >= 2816: goto tr9 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr4 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr4 } - default: + case _widec >= 3072: goto tr4 } default: @@ -2048,15 +2146,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto st2 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -2065,13 +2159,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -2083,27 +2172,16 @@ act = 2; default: goto tr4 } - goto tr24 - st19: + goto tr27 + st23: if p++; p == pe { - goto _test_eof19 + goto _test_eof23 } - st_case_19: + st_case_23: _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -2148,8 +2226,6 @@ act = 2; switch _widec { case 10: goto tr9 - case 13: - goto tr9 case 3932: goto tr9 case 4188: @@ -2172,28 +2248,20 @@ act = 2; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr9 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr9 } - default: + case _widec >= 2816: goto tr9 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr11 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr11 } - default: + case _widec >= 3072: goto tr11 } default: @@ -2203,15 +2271,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto tr9 } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { - goto tr9 - } - default: + case _widec >= 3328: goto tr9 } case _widec > 3583: @@ -2220,13 +2284,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr11 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr11 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr11 } default: @@ -2238,35 +2297,24 @@ act = 2; default: goto tr11 } - goto tr24 + goto tr27 tr13: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st20 - st20: + goto st24 + st24: if p++; p == pe { - goto _test_eof20 + goto _test_eof24 } - st_case_20: -//.... tmp_parser.go:2254 + st_case_24: +//.... tmp_parser.go:2313 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -2311,8 +2359,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto tr9 case 4188: @@ -2320,7 +2366,7 @@ act = 2; case 4444: goto st7 case 4700: - goto st19 + goto st23 case 4956: goto st2 case 5212: @@ -2328,35 +2374,27 @@ act = 2; case 5468: goto st7 case 5724: - goto st19 + goto st23 } switch { case _widec < 3165: switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr9 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr9 } - default: + case _widec >= 2816: goto tr9 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr4 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr4 } - default: + case _widec >= 3072: goto tr4 } default: @@ -2366,15 +2404,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto st2 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - default: + case _widec >= 3328: goto st2 } case _widec > 3583: @@ -2383,13 +2417,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr4 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr4 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr4 } default: @@ -2401,35 +2430,24 @@ act = 2; default: goto tr4 } - goto tr24 + goto tr27 tr10: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st21 - st21: + goto st25 + st25: if p++; p == pe { - goto _test_eof21 + goto _test_eof25 } - st_case_21: -//.... tmp_parser.go:2417 + st_case_25: +//.... tmp_parser.go:2446 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -2490,13 +2508,8 @@ act = 2; switch { case _widec < 3083: switch { - case _widec < 2830: - switch { - case _widec > 2825: - if 2827 <= _widec && _widec <= 2828 { - goto tr1 - } - case _widec >= 2816: + case _widec < 2827: + if 2816 <= _widec && _widec <= 2825 { goto tr1 } case _widec > 2907: @@ -2511,28 +2524,19 @@ act = 2; default: goto tr1 } - case _widec > 3084: + case _widec > 3163: switch { case _widec < 3584: - switch { - case _widec > 3163: - if 3165 <= _widec && _widec <= 3327 { - goto st2 - } - case _widec >= 3086: + if 3165 <= _widec && _widec <= 3327 { goto st2 } case _widec > 3593: switch { - case _widec < 3598: - if 3595 <= _widec && _widec <= 3596 { - goto st2 - } case _widec > 3675: if 3677 <= _widec && _widec <= 3839 { goto st2 } - default: + case _widec >= 3595: goto st2 } default: @@ -2541,35 +2545,24 @@ act = 2; default: goto st2 } - goto tr24 + goto tr27 tr12: //.... NONE:1 te = p+1 -//.... lightning/mydump/csv_parser.rl:46 +//.... lightning/mydump/csv_parser.rl:47 act = 2; - goto st22 - st22: + goto st26 + st26: if p++; p == pe { - goto _test_eof22 + goto _test_eof26 } - st_case_22: -//.... tmp_parser.go:2557 + st_case_26: +//.... tmp_parser.go:2561 _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -2614,8 +2607,6 @@ act = 2; switch _widec { case 10: goto st2 - case 13: - goto st2 case 3932: goto tr9 case 4188: @@ -2634,29 +2625,20 @@ act = 2; goto st5 } switch { - case _widec < 3086: + case _widec < 3083: switch { - case _widec < 2830: - switch { - case _widec > 2825: - if 2827 <= _widec && _widec <= 2828 { - goto tr9 - } - case _widec >= 2816: + case _widec < 2827: + if 2816 <= _widec && _widec <= 2825 { goto tr9 } case _widec > 2907: switch { - case _widec < 3072: - if 2909 <= _widec && _widec <= 3071 { - goto tr9 - } - case _widec > 3081: - if 3083 <= _widec && _widec <= 3084 { + case _widec > 3071: + if 3072 <= _widec && _widec <= 3081 { goto st2 } - default: - goto st2 + case _widec >= 2909: + goto tr9 } default: goto tr9 @@ -2665,28 +2647,20 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3165 <= _widec && _widec <= 3337 { - goto st2 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto st2 } - default: + case _widec >= 3165: goto st2 } case _widec > 3593: switch { - case _widec < 3598: - if 3595 <= _widec && _widec <= 3596 { - goto st2 - } case _widec > 3675: if 3677 <= _widec && _widec <= 3839 { goto st2 } - default: + case _widec >= 3595: goto st2 } default: @@ -2695,33 +2669,22 @@ act = 2; default: goto st2 } - goto tr24 - st23: + goto tr27 + st27: if p++; p == pe { - goto _test_eof23 + goto _test_eof27 } - st_case_23: + st_case_27: goto tr1 - st24: + st28: if p++; p == pe { - goto _test_eof24 + goto _test_eof28 } - st_case_24: + st_case_28: _widec = int16(data[p]) switch { - case data[p] < 14: - switch { - case data[p] > 9: - if 11 <= data[p] && data[p] <= 12 { - _widec = 2816 + (int16(data[p]) - 0) - if data[p] == delim { - _widec += 256 - } - if data[p] == sep { - _widec += 512 - } - } - default: + case data[p] < 11: + if data[p] <= 9 { _widec = 2816 + (int16(data[p]) - 0) if data[p] == delim { _widec += 256 @@ -2766,8 +2729,6 @@ act = 2; switch _widec { case 10: goto tr9 - case 13: - goto tr9 case 3932: goto tr9 case 4188: @@ -2790,28 +2751,20 @@ act = 2; switch { case _widec < 2909: switch { - case _widec < 2827: - if 2816 <= _widec && _widec <= 2825 { - goto tr9 - } - case _widec > 2828: - if 2830 <= _widec && _widec <= 2907 { + case _widec > 2825: + if 2827 <= _widec && _widec <= 2907 { goto tr9 } - default: + case _widec >= 2816: goto tr9 } case _widec > 3071: switch { - case _widec < 3083: - if 3072 <= _widec && _widec <= 3081 { - goto tr10 - } - case _widec > 3084: - if 3086 <= _widec && _widec <= 3163 { + case _widec > 3081: + if 3083 <= _widec && _widec <= 3163 { goto tr10 } - default: + case _widec >= 3072: goto tr10 } default: @@ -2821,15 +2774,11 @@ act = 2; switch { case _widec < 3421: switch { - case _widec < 3339: - if 3328 <= _widec && _widec <= 3337 { - goto tr9 - } - case _widec > 3340: - if 3342 <= _widec && _widec <= 3419 { + case _widec > 3337: + if 3339 <= _widec && _widec <= 3419 { goto tr9 } - default: + case _widec >= 3328: goto tr9 } case _widec > 3583: @@ -2838,13 +2787,8 @@ act = 2; if 3584 <= _widec && _widec <= 3593 { goto tr11 } - case _widec > 3596: - switch { - case _widec > 3675: - if 3677 <= _widec && _widec <= 3839 { - goto tr11 - } - case _widec >= 3598: + case _widec > 3675: + if 3677 <= _widec && _widec <= 3839 { goto tr11 } default: @@ -2856,92 +2800,107 @@ act = 2; default: goto tr10 } - goto tr25 + goto tr30 st_out: _test_eof8: cs = 8; goto _test_eof _test_eof9: cs = 9; goto _test_eof _test_eof10: cs = 10; goto _test_eof _test_eof1: cs = 1; goto _test_eof - _test_eof2: cs = 2; goto _test_eof _test_eof11: cs = 11; goto _test_eof + _test_eof2: cs = 2; goto _test_eof _test_eof12: cs = 12; goto _test_eof - _test_eof3: cs = 3; goto _test_eof _test_eof13: cs = 13; goto _test_eof - _test_eof4: cs = 4; goto _test_eof + _test_eof3: cs = 3; goto _test_eof _test_eof14: cs = 14; goto _test_eof + _test_eof4: cs = 4; goto _test_eof _test_eof15: cs = 15; goto _test_eof - _test_eof5: cs = 5; goto _test_eof _test_eof16: cs = 16; goto _test_eof - _test_eof6: cs = 6; goto _test_eof _test_eof17: cs = 17; goto _test_eof - _test_eof7: cs = 7; goto _test_eof _test_eof18: cs = 18; goto _test_eof _test_eof19: cs = 19; goto _test_eof + _test_eof5: cs = 5; goto _test_eof _test_eof20: cs = 20; goto _test_eof + _test_eof6: cs = 6; goto _test_eof _test_eof21: cs = 21; goto _test_eof + _test_eof7: cs = 7; goto _test_eof _test_eof22: cs = 22; goto _test_eof _test_eof23: cs = 23; goto _test_eof _test_eof24: cs = 24; goto _test_eof + _test_eof25: cs = 25; goto _test_eof + _test_eof26: cs = 26; goto _test_eof + _test_eof27: cs = 27; goto _test_eof + _test_eof28: cs = 28; goto _test_eof _test_eof: {} if p == eof { switch cs { case 9: - goto tr23 + goto tr0 case 10: - goto tr24 + goto tr27 case 1: goto tr0 + case 11: + goto tr27 case 2: goto tr0 - case 11: - goto tr24 case 12: - goto tr24 + goto tr27 + case 13: + goto tr27 case 3: goto tr0 - case 13: - goto tr24 + case 14: + goto tr27 case 4: goto tr0 - case 14: - goto tr24 case 15: - goto tr25 - case 5: - goto tr0 + goto tr27 case 16: - goto tr24 - case 6: - goto tr14 + goto tr0 case 17: - goto tr24 - case 7: - goto tr14 + goto tr27 case 18: - goto tr24 + goto tr27 case 19: - goto tr24 + goto tr30 + case 5: + goto tr0 case 20: - goto tr24 + goto tr27 + case 6: + goto tr14 case 21: - goto tr24 + goto tr27 + case 7: + goto tr14 case 22: - goto tr24 + goto tr27 case 23: - goto tr25 + goto tr27 case 24: - goto tr25 + goto tr27 + case 25: + goto tr27 + case 26: + goto tr27 + case 27: + goto tr30 + case 28: + goto tr30 } } _out: {} } -//.... lightning/mydump/csv_parser.rl:82 +//.... lightning/mydump/csv_parser.rl:83 if cs == 0 { - common.AppLogger.Errorf("Syntax error near byte %d, content is «%s»", parser.pos, string(data)) + log.L().Error("syntax error", + zap.Int64("pos", parser.pos), + zap.ByteString("content", data), + ) return csvTokNil, nil, errors.New("Syntax error") } diff --git a/lightning/mydump/loader.go b/lightning/mydump/loader.go index 56e14eedd..e240cfe1a 100644 --- a/lightning/mydump/loader.go +++ b/lightning/mydump/loader.go @@ -21,15 +21,12 @@ import ( "strings" "github.com/pingcap/errors" - "github.com/pingcap/tidb-lightning/lightning/common" - "github.com/pingcap/tidb-lightning/lightning/config" "github.com/pingcap/tidb-tools/pkg/filter" -) + "go.uber.org/zap" -var ( - // errors - errDirNotExists = errors.New("mydumper dir not exists") - errMissingFile = errors.New("missing file") + "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/config" + "github.com/pingcap/tidb-lightning/lightning/log" ) type MDDatabaseMeta struct { @@ -51,7 +48,10 @@ type MDTableMeta struct { func (m *MDTableMeta) GetSchema() string { schema, err := ExportStatement(m.SchemaFile, m.charSet) if err != nil { - common.AppLogger.Errorf("failed to extract table schema (%s) : %s", m.SchemaFile, err.Error()) + log.L().Error("failed to extract table schema", + zap.String("path", m.SchemaFile), + log.ShortError(err), + ) return "" } return string(schema) @@ -92,7 +92,6 @@ func NewMyDumpLoader(cfg *config.Config) (*MDLoader, error) { } if err := setup.setup(mdl.dir); err != nil { - // common.AppLogger.Errorf("init mydumper loader failed : %s\n", err.Error()) return nil, errors.Trace(err) } @@ -150,18 +149,17 @@ func (s *mdLoaderSetup) setup(dir string) error { sql —— {db}.{table}.{part}.sql / {db}.{table}.sql */ if !common.IsDirExists(dir) { - return errors.Annotatef(errDirNotExists, "dir %s", dir) + return errors.Errorf("%s: mydumper dir does not exist", dir) } if err := s.listFiles(dir); err != nil { - common.AppLogger.Errorf("list file failed : %s", err.Error()) - return errors.Trace(err) + return errors.Annotate(err, "list file failed") } if !s.loader.noSchema { // setup database schema if len(s.dbSchemas) == 0 { - return errors.Annotatef(errMissingFile, "missing {schema}-schema-create.sql") + return errors.New("missing {schema}-schema-create.sql") } for _, fileInfo := range s.dbSchemas { if _, dbExists := s.insertDB(fileInfo.tableName.Schema, fileInfo.path); dbExists { @@ -222,6 +220,7 @@ func (s *mdLoaderSetup) listFiles(dir string) error { lowerFName := strings.ToLower(fname) info := fileInfo{path: path, size: f.Size()} + logger := log.With(zap.String("path", path)) var ( ftype fileType @@ -242,7 +241,7 @@ func (s *mdLoaderSetup) listFiles(dir string) error { case strings.HasSuffix(lowerFName, "-schema-view.sql"), strings.HasSuffix(lowerFName, "-schema-trigger.sql"), strings.HasSuffix(lowerFName, "-schema-post.sql"): - common.AppLogger.Warn("[loader] ignore unsupport view/trigger:", path) + logger.Warn("[loader] ignore unsupport view/trigger") return nil case strings.HasSuffix(lowerFName, ".sql"), strings.HasSuffix(lowerFName, ".csv"): ftype = fileTypeTableData @@ -253,14 +252,14 @@ func (s *mdLoaderSetup) listFiles(dir string) error { matchRes := tableNameRegexp.FindStringSubmatch(qualifiedName) if len(matchRes) != 3 { - common.AppLogger.Debugf("[loader] ignore almost %s file: %s", ftype, path) + logger.Debug("[loader] ignore almost " + ftype.String() + " file") return nil } info.tableName.Schema = matchRes[1] info.tableName.Name = matchRes[2] if s.loader.shouldSkip(&info.tableName) { - common.AppLogger.Infof("[filter] ignoring table file %s", path) + logger.Debug("[filter] ignoring table file") return nil } diff --git a/lightning/mydump/parser.rl b/lightning/mydump/parser.rl index f431fba82..72e1bb95d 100644 --- a/lightning/mydump/parser.rl +++ b/lightning/mydump/parser.rl @@ -23,8 +23,9 @@ package mydump import ( "io" - "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/errors" + "go.uber.org/zap" ) %%{ @@ -156,7 +157,10 @@ func (parser *ChunkParser) lex() (token, []byte, error) { %% write exec; if cs == %%{ write error; }%% { - common.AppLogger.Errorf("Syntax error near byte %d, content is «%s»", parser.pos, string(data)) + log.L().Error("syntax error", + zap.Int64("pos", parser.pos), + zap.ByteString("content", data), + ) return tokNil, nil, errors.New("Syntax error") } diff --git a/lightning/mydump/parser_generated.go b/lightning/mydump/parser_generated.go index 230b479c0..cb99c839f 100644 --- a/lightning/mydump/parser_generated.go +++ b/lightning/mydump/parser_generated.go @@ -26,16 +26,17 @@ package mydump import ( "io" - "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/errors" + "go.uber.org/zap" ) -//.... lightning/mydump/parser.rl:139 +//.... lightning/mydump/parser.rl:140 -//.... tmp_parser.go:38 +//.... tmp_parser.go:39 const chunk_parser_start int = 21 const chunk_parser_first_final int = 21 const chunk_parser_error int = 0 @@ -43,12 +44,12 @@ const chunk_parser_error int = 0 const chunk_parser_en_main int = 21 -//.... lightning/mydump/parser.rl:142 +//.... lightning/mydump/parser.rl:143 func (parser *ChunkParser) lex() (token, []byte, error) { var cs, ts, te, act, p int -//.... tmp_parser.go:51 +//.... tmp_parser.go:52 { cs = chunk_parser_start ts = 0 @@ -56,7 +57,7 @@ func (parser *ChunkParser) lex() (token, []byte, error) { act = 0 } -//.... lightning/mydump/parser.rl:146 +//.... lightning/mydump/parser.rl:147 for { data := parser.buf @@ -68,7 +69,7 @@ func (parser *ChunkParser) lex() (token, []byte, error) { } -//.... tmp_parser.go:71 +//.... tmp_parser.go:72 { var _widec int16 if p == pe { @@ -283,7 +284,7 @@ tr4: goto st21 tr8: -//.... lightning/mydump/parser.rl:132 +//.... lightning/mydump/parser.rl:133 p = (te) - 1 { consumedToken = tokUnquoted @@ -291,12 +292,12 @@ p = (te) - 1 } goto st21 tr10: -//.... lightning/mydump/parser.rl:70 +//.... lightning/mydump/parser.rl:71 te = p+1 goto st21 tr13: -//.... lightning/mydump/parser.rl:112 +//.... lightning/mydump/parser.rl:113 te = p+1 { consumedToken = tokBinString @@ -304,7 +305,7 @@ te = p+1 } goto st21 tr22: -//.... lightning/mydump/parser.rl:107 +//.... lightning/mydump/parser.rl:108 te = p+1 { consumedToken = tokHexString @@ -312,7 +313,7 @@ te = p+1 } goto st21 tr29: -//.... lightning/mydump/parser.rl:72 +//.... lightning/mydump/parser.rl:73 te = p+1 { consumedToken = tokRowBegin @@ -320,7 +321,7 @@ te = p+1 } goto st21 tr30: -//.... lightning/mydump/parser.rl:77 +//.... lightning/mydump/parser.rl:78 te = p+1 { consumedToken = tokRowEnd @@ -328,7 +329,7 @@ te = p+1 } goto st21 tr44: -//.... lightning/mydump/parser.rl:122 +//.... lightning/mydump/parser.rl:123 te = p p-- { @@ -337,7 +338,7 @@ p-- } goto st21 tr45: -//.... lightning/mydump/parser.rl:117 +//.... lightning/mydump/parser.rl:118 te = p p-- { @@ -346,7 +347,7 @@ p-- } goto st21 tr46: -//.... lightning/mydump/parser.rl:132 +//.... lightning/mydump/parser.rl:133 te = p p-- { @@ -355,7 +356,7 @@ p-- } goto st21 tr48: -//.... lightning/mydump/parser.rl:102 +//.... lightning/mydump/parser.rl:103 te = p p-- { @@ -364,7 +365,7 @@ p-- } goto st21 tr84: -//.... lightning/mydump/parser.rl:127 +//.... lightning/mydump/parser.rl:128 te = p p-- { @@ -386,7 +387,7 @@ act = 0 //.... NONE:1 ts = p -//.... tmp_parser.go:389 +//.... tmp_parser.go:390 switch data[p] { case 32: goto tr10 @@ -460,42 +461,42 @@ tr26: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:132 +//.... lightning/mydump/parser.rl:133 act = 14; goto st22 tr51: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:70 +//.... lightning/mydump/parser.rl:71 act = 1; goto st22 tr65: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:97 +//.... lightning/mydump/parser.rl:98 act = 7; goto st22 tr70: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:87 +//.... lightning/mydump/parser.rl:88 act = 5; goto st22 tr73: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:92 +//.... lightning/mydump/parser.rl:93 act = 6; goto st22 tr83: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:82 +//.... lightning/mydump/parser.rl:83 act = 4; goto st22 st22: @@ -503,7 +504,7 @@ act = 4; goto _test_eof22 } st_case_22: -//.... tmp_parser.go:506 +//.... tmp_parser.go:507 switch data[p] { case 32: goto tr4 @@ -587,7 +588,7 @@ tr1: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:122 +//.... lightning/mydump/parser.rl:123 act = 12; goto st23 st23: @@ -595,7 +596,7 @@ act = 12; goto _test_eof23 } st_case_23: -//.... tmp_parser.go:598 +//.... tmp_parser.go:599 if data[p] == 34 { goto st2 } @@ -693,7 +694,7 @@ tr6: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:117 +//.... lightning/mydump/parser.rl:118 act = 11; goto st24 st24: @@ -701,7 +702,7 @@ act = 11; goto _test_eof24 } st_case_24: -//.... tmp_parser.go:704 +//.... tmp_parser.go:705 if data[p] == 39 { goto st5 } @@ -775,7 +776,7 @@ te = p+1 goto _test_eof26 } st_case_26: -//.... tmp_parser.go:778 +//.... tmp_parser.go:779 switch data[p] { case 10: goto tr10 @@ -876,7 +877,7 @@ te = p+1 goto _test_eof29 } st_case_29: -//.... tmp_parser.go:879 +//.... tmp_parser.go:880 switch data[p] { case 32: goto st8 @@ -931,7 +932,7 @@ te = p+1 goto _test_eof30 } st_case_30: -//.... tmp_parser.go:934 +//.... tmp_parser.go:935 switch data[p] { case 32: goto st8 @@ -995,14 +996,14 @@ tr52: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:132 +//.... lightning/mydump/parser.rl:133 act = 14; goto st32 tr54: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:112 +//.... lightning/mydump/parser.rl:113 act = 10; goto st32 st32: @@ -1010,7 +1011,7 @@ act = 10; goto _test_eof32 } st_case_32: -//.... tmp_parser.go:1013 +//.... tmp_parser.go:1014 switch data[p] { case 32: goto tr4 @@ -1040,14 +1041,14 @@ tr53: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:132 +//.... lightning/mydump/parser.rl:133 act = 14; goto st33 tr55: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:107 +//.... lightning/mydump/parser.rl:108 act = 9; goto st33 st33: @@ -1055,7 +1056,7 @@ act = 9; goto _test_eof33 } st_case_33: -//.... tmp_parser.go:1058 +//.... tmp_parser.go:1059 switch data[p] { case 32: goto tr4 @@ -1101,7 +1102,7 @@ te = p+1 goto _test_eof34 } st_case_34: -//.... tmp_parser.go:1104 +//.... tmp_parser.go:1105 switch data[p] { case 32: goto tr46 @@ -1865,7 +1866,7 @@ te = p+1 goto _test_eof59 } st_case_59: -//.... tmp_parser.go:1868 +//.... tmp_parser.go:1869 switch data[p] { case 32: goto st11 @@ -2134,7 +2135,7 @@ te = p+1 goto _test_eof65 } st_case_65: -//.... tmp_parser.go:2137 +//.... tmp_parser.go:2138 switch data[p] { case 32: goto tr46 @@ -2192,7 +2193,7 @@ tr25: //.... NONE:1 te = p+1 -//.... lightning/mydump/parser.rl:127 +//.... lightning/mydump/parser.rl:128 act = 13; goto st66 st66: @@ -2200,7 +2201,7 @@ act = 13; goto _test_eof66 } st_case_66: -//.... tmp_parser.go:2203 +//.... tmp_parser.go:2204 if data[p] == 96 { goto st20 } @@ -2408,10 +2409,13 @@ act = 13; _out: {} } -//.... lightning/mydump/parser.rl:157 +//.... lightning/mydump/parser.rl:158 if cs == 0 { - common.AppLogger.Errorf("Syntax error near byte %d, content is «%s»", parser.pos, string(data)) + log.L().Error("syntax error", + zap.Int64("pos", parser.pos), + zap.ByteString("content", data), + ) return tokNil, nil, errors.New("Syntax error") } diff --git a/lightning/mydump/reader.go b/lightning/mydump/reader.go index 9b3c03e42..a30e42461 100644 --- a/lightning/mydump/reader.go +++ b/lightning/mydump/reader.go @@ -22,7 +22,8 @@ import ( "unicode/utf8" "github.com/pingcap/errors" - "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" + "go.uber.org/zap" "golang.org/x/text/encoding/simplifiedchinese" ) @@ -103,7 +104,10 @@ func ExportStatement(sqlFile string, characterSet string) ([]byte, error) { data, err = decodeCharacterSet(data, characterSet) if err != nil { - common.AppLogger.Errorf("cannot decode input file as %s encoding, please convert it manually: %s", characterSet, sqlFile) + log.L().Error("cannot decode input file, please convert to target encoding manually", + zap.String("encoding", characterSet), + zap.String("path", sqlFile), + ) return nil, errors.Annotatef(err, "failed to decode %s as %s", sqlFile, characterSet) } return data, nil diff --git a/lightning/restore/checkpoints.go b/lightning/restore/checkpoints.go index c4aadf65b..55b2e2372 100644 --- a/lightning/restore/checkpoints.go +++ b/lightning/restore/checkpoints.go @@ -28,8 +28,10 @@ import ( "github.com/cznic/mathutil" "github.com/joho/sqltocsv" "github.com/pingcap/errors" + "go.uber.org/zap" "github.com/pingcap/tidb-lightning/lightning/common" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/tidb-lightning/lightning/mydump" verify "github.com/pingcap/tidb-lightning/lightning/verification" ) @@ -298,14 +300,19 @@ func NewMySQLCheckpointsDB(ctx context.Context, db *sql.DB, schemaName string) ( common.WriteMySQLIdentifier(&escapedSchemaName, schemaName) schema := escapedSchemaName.String() - err := common.ExecWithRetry(ctx, db, "(create checkpoints database)", fmt.Sprintf(` + sql := common.SQLWithRetry{ + DB: db, + Logger: log.With(zap.String("schema", schemaName)), + HideQueryLog: true, + } + err := sql.Exec(ctx, "create checkpoints database", fmt.Sprintf(` CREATE DATABASE IF NOT EXISTS %s; `, schema)) if err != nil { return nil, errors.Trace(err) } - err = common.ExecWithRetry(ctx, db, "(create table checkpoints table)", fmt.Sprintf(` + err = sql.Exec(ctx, "create table checkpoints table", fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s.%s ( node_id int unsigned NOT NULL, session bigint unsigned NOT NULL, @@ -322,7 +329,7 @@ func NewMySQLCheckpointsDB(ctx context.Context, db *sql.DB, schemaName string) ( return nil, errors.Trace(err) } - err = common.ExecWithRetry(ctx, db, "(create engine checkpoints table)", fmt.Sprintf(` + err = sql.Exec(ctx, "create engine checkpoints table", fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s.%s ( table_name varchar(261) NOT NULL, engine_id int NOT NULL, @@ -336,7 +343,7 @@ func NewMySQLCheckpointsDB(ctx context.Context, db *sql.DB, schemaName string) ( return nil, errors.Trace(err) } - err = common.ExecWithRetry(ctx, db, "(create chunks checkpoints table)", fmt.Sprintf(` + err = sql.Exec(ctx, "create chunks checkpoints table", fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s.%s ( table_name varchar(261) NOT NULL, engine_id int unsigned NOT NULL, @@ -374,7 +381,8 @@ func (cpdb *MySQLCheckpointsDB) Initialize(ctx context.Context, dbInfo map[strin // We can have at most 65535 placeholders https://stackoverflow.com/q/4922345/ // Since this step is not performance critical, we just insert the rows one-by-one. - err := common.TransactWithRetry(ctx, cpdb.db, "(insert checkpoints)", func(c context.Context, tx *sql.Tx) error { + s := common.SQLWithRetry{DB: cpdb.db, Logger: log.L()} + err := s.Transact(ctx, "insert checkpoints", func(c context.Context, tx *sql.Tx) error { // If `node_id` is not the same but the `table_name` duplicates, // the CASE expression will return NULL, which can be used to violate // the NOT NULL requirement of `session` column, and caused this INSERT @@ -421,8 +429,11 @@ func (cpdb *MySQLCheckpointsDB) Get(ctx context.Context, tableName string) (*Tab Engines: map[int32]*EngineCheckpoint{}, } - purpose := "(read checkpoint " + tableName + ")" - err := common.TransactWithRetry(ctx, cpdb.db, purpose, func(c context.Context, tx *sql.Tx) error { + s := common.SQLWithRetry{ + DB: cpdb.db, + Logger: log.With(zap.String("table", tableName)), + } + err := s.Transact(ctx, "read checkpoint", func(c context.Context, tx *sql.Tx) error { // 1. Populate the engines. engineQuery := fmt.Sprintf(` @@ -512,7 +523,11 @@ func (cpdb *MySQLCheckpointsDB) Get(ctx context.Context, tableName string) (*Tab } func (cpdb *MySQLCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tableName string, checkpoints map[int32]*EngineCheckpoint) error { - err := common.TransactWithRetry(ctx, cpdb.db, "(update engine checkpoints for "+tableName+")", func(c context.Context, tx *sql.Tx) error { + s := common.SQLWithRetry{ + DB: cpdb.db, + Logger: log.With(zap.String("table", tableName)), + } + err := s.Transact(ctx, "update engine checkpoints", func(c context.Context, tx *sql.Tx) error { engineStmt, err := tx.PrepareContext(c, fmt.Sprintf(` REPLACE INTO %s.%s (table_name, engine_id, status) VALUES (?, ?, ?); `, cpdb.schema, checkpointTableNameEngine)) @@ -580,7 +595,8 @@ func (cpdb *MySQLCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpoi UPDATE %s.%s SET status = ? WHERE (table_name, engine_id) = (?, ?); `, cpdb.schema, checkpointTableNameEngine) - err := common.TransactWithRetry(context.Background(), cpdb.db, "(update checkpoints)", func(c context.Context, tx *sql.Tx) error { + s := common.SQLWithRetry{DB: cpdb.db, Logger: log.L()} + err := s.Transact(context.Background(), "update checkpoints", func(c context.Context, tx *sql.Tx) error { chunkStmt, e := tx.PrepareContext(c, chunkQuery) if e != nil { return errors.Trace(e) @@ -634,7 +650,7 @@ func (cpdb *MySQLCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpoi return nil }) if err != nil { - common.AppLogger.Errorf("failed to save checkpoint: %v", err) + log.L().Error("save checkpoint failed", zap.Error(err)) } } @@ -656,7 +672,10 @@ func NewFileCheckpointsDB(path string) *FileCheckpointsDB { if err == nil { cpdb.checkpoints.Unmarshal(content) } else { - common.AppLogger.Warnf("failed to open checkpoint file %s, going to create a new one: %v", path, err) + log.L().Warn("open checkpoint file failed, going to create a new one", + zap.String("path", path), + log.ShortError(err), + ) } return cpdb } @@ -813,7 +832,7 @@ func (cpdb *FileCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpoin } if err := cpdb.save(); err != nil { - common.AppLogger.Errorf("failed to save checkpoint: %v", err) + log.L().Error("save checkpoint failed", zap.Error(err)) } } @@ -863,7 +882,11 @@ func (cpdb *MySQLCheckpointsDB) RemoveCheckpoint(ctx context.Context, tableName deleteChunkQuery := fmt.Sprintf(deleteChunkFmt, cpdb.schema, checkpointTableNameChunk, checkpointTableNameTable) deleteEngineQuery := fmt.Sprintf(deleteEngineFmt, cpdb.schema, checkpointTableNameEngine, checkpointTableNameTable) deleteTableQuery := fmt.Sprintf(deleteTableFmt, cpdb.schema, checkpointTableNameTable) - err := common.TransactWithRetry(ctx, cpdb.db, fmt.Sprintf("(remove checkpoints of %s)", tableName), func(c context.Context, tx *sql.Tx) error { + s := common.SQLWithRetry{ + DB: cpdb.db, + Logger: log.With(zap.String("table", tableName)), + } + err := s.Transact(ctx, "remove checkpoints", func(c context.Context, tx *sql.Tx) error { if _, e := tx.ExecContext(c, deleteChunkQuery, arg); e != nil { return errors.Trace(e) } @@ -895,7 +918,11 @@ func (cpdb *MySQLCheckpointsDB) IgnoreErrorCheckpoint(ctx context.Context, table UPDATE %s.%s SET status = %d WHERE %s = ? AND status <= %d; `, cpdb.schema, checkpointTableNameTable, CheckpointStatusLoaded, colName, CheckpointStatusMaxInvalid) - err := common.TransactWithRetry(ctx, cpdb.db, fmt.Sprintf("(ignore error checkpoints for %s)", tableName), func(c context.Context, tx *sql.Tx) error { + s := common.SQLWithRetry{ + DB: cpdb.db, + Logger: log.With(zap.String("table", tableName)), + } + err := s.Transact(ctx, "ignore error checkpoints", func(c context.Context, tx *sql.Tx) error { if _, e := tx.ExecContext(c, engineQuery, arg); e != nil { return errors.Trace(e) } @@ -942,7 +969,11 @@ func (cpdb *MySQLCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tabl var targetTables []DestroyedTableCheckpoint - err := common.TransactWithRetry(ctx, cpdb.db, fmt.Sprintf("(destroy error checkpoints for %s)", tableName), func(c context.Context, tx *sql.Tx) error { + s := common.SQLWithRetry{ + DB: cpdb.db, + Logger: log.With(zap.String("table", tableName)), + } + err := s.Transact(ctx, "destroy error checkpoints", func(c context.Context, tx *sql.Tx) error { // Obtain the list of tables targetTables = nil rows, e := tx.QueryContext(c, selectQuery, arg) diff --git a/lightning/restore/restore.go b/lightning/restore/restore.go index 4d390410c..2207fa177 100644 --- a/lightning/restore/restore.go +++ b/lightning/restore/restore.go @@ -38,10 +38,12 @@ import ( "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/kvencoder" + "go.uber.org/zap" "github.com/pingcap/tidb-lightning/lightning/common" "github.com/pingcap/tidb-lightning/lightning/config" "github.com/pingcap/tidb-lightning/lightning/kv" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/tidb-lightning/lightning/metric" "github.com/pingcap/tidb-lightning/lightning/mydump" verify "github.com/pingcap/tidb-lightning/lightning/verification" @@ -87,6 +89,7 @@ type errorSummary struct { status CheckpointStatus err error } + type errorSummaries struct { sync.Mutex summary map[string]errorSummary @@ -95,13 +98,17 @@ type errorSummaries struct { func (es *errorSummaries) emitLog() { es.Lock() defer es.Unlock() + + logger := log.L() if errorCount := len(es.summary); errorCount > 0 { - var msg strings.Builder - fmt.Fprintf(&msg, "Totally **%d** tables failed to be imported.\n", errorCount) + logger.Error("tables failed to be imported", zap.Int("count", errorCount)) for tableName, errorSummary := range es.summary { - fmt.Fprintf(&msg, "- [%s] [%s] %s\n", tableName, errorSummary.status.MetricName(), errorSummary.err.Error()) + logger.Error("-", + zap.String("table", tableName), + zap.String("status", errorSummary.status.MetricName()), + log.ShortError(errorSummary.err), + ) } - common.AppLogger.Error(msg.String()) } } @@ -209,7 +216,6 @@ func (rc *RestoreController) Close() { } func (rc *RestoreController) Run(ctx context.Context) error { - timer := time.Now() opts := []func(context.Context) error{ rc.checkRequirements, rc.restoreSchema, @@ -219,25 +225,28 @@ func (rc *RestoreController) Run(ctx context.Context) error { rc.cleanCheckpoints, } + task := log.L().Begin(zap.InfoLevel, "the whole procedure") + var err error outside: for i, process := range opts { err = process(ctx) + logger := task.With(zap.Int("step", i), log.ShortError(err)) + switch { case err == nil: - case !common.ShouldLogError(err): - common.AppLogger.Infof("[step %d] user terminated : %v", i, err) + case log.IsContextCanceledError(err): + logger.Info("user terminated") err = nil break outside default: - common.AppLogger.Errorf("[step %d] run cause error : %v", i, err) + logger.Error("run failed") fmt.Fprintf(os.Stderr, "Error: %s\n", err) break outside // ps : not continue } } - common.AppLogger.Infof("the whole procedure takes %v", time.Since(timer)) - + task.End(zap.ErrorLevel, err) rc.errorSummaries.emitLog() return errors.Trace(err) @@ -254,17 +263,18 @@ func (rc *RestoreController) restoreSchema(ctx context.Context) error { tidbMgr.db.ExecContext(ctx, "SET SQL_MODE = ?", rc.cfg.TiDB.StrSQLMode) for _, dbMeta := range rc.dbMetas { - timer := time.Now() - common.AppLogger.Infof("restore table schema for `%s`", dbMeta.Name) + task := log.With(zap.String("db", dbMeta.Name)).Begin(zap.InfoLevel, "restore table schema") + tablesSchema := make(map[string]string) for _, tblMeta := range dbMeta.Tables { tablesSchema[tblMeta.Name] = tblMeta.GetSchema() } err = tidbMgr.InitSchema(ctx, dbMeta.Name, tablesSchema) + + task.End(zap.ErrorLevel, err) if err != nil { - return errors.Errorf("db schema failed to init : %v", err) + return errors.Annotatef(err, "restore table schema %s failed", dbMeta.Name) } - common.AppLogger.Infof("restore table schema for `%s` takes %v", dbMeta.Name, time.Since(timer)) } } dbInfos, err := tidbMgr.LoadSchemaInfo(ctx, rc.dbMetas) @@ -413,10 +423,10 @@ func (rc *RestoreController) runPeriodicActions(ctx context.Context, stop <-chan for { select { case <-ctx.Done(): - common.AppLogger.Warnf("Stopping periodic actions due to %v", ctx.Err()) + log.L().Warn("stopping periodic actions", log.ShortError(ctx.Err())) return case <-stop: - common.AppLogger.Info("Everything imported, stopping periodic actions") + log.L().Info("everything imported, stopping periodic actions") return case <-switchModeTicker.C: @@ -432,20 +442,23 @@ func (rc *RestoreController) runPeriodicActions(ctx context.Context, stop <-chan completedTables := metric.ReadCounter(metric.TableCounter.WithLabelValues(metric.TableStateCompleted, metric.TableResultSuccess)) bytesRead := metric.ReadHistogramSum(metric.RowReadBytesHistogram) - var remaining string + var state string + var remaining zap.Field if finished >= estimated { - remaining = ", post-processing" + state = "post-processing" + remaining = zap.Skip() } else if finished > 0 { remainNanoseconds := (estimated/finished - 1) * nanoseconds - remaining = fmt.Sprintf(", remaining %s", time.Duration(remainNanoseconds).Round(time.Second)) + state = "writing" + remaining = zap.Duration("remaining", time.Duration(remainNanoseconds).Round(time.Second)) } // Note: a speed of 28 MiB/s roughly corresponds to 100 GiB/hour. - common.AppLogger.Infof( - "progress: %.0f/%.0f chunks (%.1f%%), %.0f/%.0f tables (%.1f%%), speed %.2f MiB/s%s", - finished, estimated, finished/estimated*100, - completedTables, totalTables, completedTables/totalTables*100, - bytesRead/(1048576e-9*nanoseconds), + log.L().Info("progress", + zap.String("files", fmt.Sprintf("%.0f/%.0f (%.1f%%)", finished, estimated, finished/estimated*100)), + zap.String("tables", fmt.Sprintf("%.0f/%.0f (%.1f%%)", completedTables, totalTables, completedTables/totalTables*100)), + zap.Float64("speed(MiB/s)", bytesRead/(1048576e-9*nanoseconds)), + zap.String("state", state), remaining, ) } @@ -453,7 +466,8 @@ func (rc *RestoreController) runPeriodicActions(ctx context.Context, stop <-chan } func (rc *RestoreController) restoreTables(ctx context.Context) error { - timer := time.Now() + logTask := log.L().Begin(zap.InfoLevel, "restore all tables data") + var wg sync.WaitGroup var restoreErr common.OnceError @@ -470,9 +484,11 @@ func (rc *RestoreController) restoreTables(ctx context.Context) error { for i := 0; i < rc.cfg.App.IndexConcurrency; i++ { go func() { for task := range taskCh { + tableLogTask := task.tr.logger.Begin(zap.InfoLevel, "restore table") err := task.tr.restoreTable(ctx, rc, task.cp) + tableLogTask.End(zap.ErrorLevel, err) metric.RecordTableCount("completed", err) - restoreErr.Set(task.tr.tableName, err) + restoreErr.Set(err) wg.Done() } }() @@ -481,19 +497,12 @@ func (rc *RestoreController) restoreTables(ctx context.Context) error { for _, dbMeta := range rc.dbMetas { dbInfo, ok := rc.dbInfos[dbMeta.Name] if !ok { - common.AppLogger.Errorf("database %s not found in rc.dbInfos", dbMeta.Name) - continue + return errors.Errorf("database %s not found in rc.dbInfos", dbMeta.Name) } for _, tableMeta := range dbMeta.Tables { tableInfo, ok := dbInfo.Tables[tableMeta.Name] if !ok { - return errors.Errorf("table info %s not found", tableMeta.Name) - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: + return errors.Errorf("table info %s.%s not found", dbMeta.Name, tableMeta.Name) } tableName := common.UniqueTable(dbInfo.Name, tableInfo.Name) @@ -510,15 +519,20 @@ func (rc *RestoreController) restoreTables(ctx context.Context) error { } wg.Add(1) - taskCh <- task{tr: tr, cp: cp} + select { + case taskCh <- task{tr: tr, cp: cp}: + case <-ctx.Done(): + return ctx.Err() + } } } wg.Wait() stopPeriodicActions <- struct{}{} - common.AppLogger.Infof("restore all tables data takes %v", time.Since(timer)) - return errors.Trace(restoreErr.Get()) + err := restoreErr.Get() + logTask.End(zap.ErrorLevel, err) + return err } func (t *TableRestore) restoreTable( @@ -530,7 +544,10 @@ func (t *TableRestore) restoreTable( // no need to do anything if the chunks are already populated if len(cp.Engines) > 0 { - common.AppLogger.Infof("[%s] reusing %d engines and %d chunks from checkpoint", t.tableName, len(cp.Engines), cp.CountChunks()) + t.logger.Info("reusing engines and files info from checkpoint", + zap.Int("enginesCnt", len(cp.Engines)), + zap.Int("filesCnt", cp.CountChunks()), + ) } else if cp.Status < CheckpointStatusAllWritten { if err := t.populateChunks(rc.cfg, cp); err != nil { return errors.Trace(err) @@ -595,7 +612,7 @@ func (t *TableRestore) restoreEngines(ctx context.Context, rc *RestoreController cp.Status, indexEngineCp.Status) } - timer := time.Now() + logTask := t.logger.Begin(zap.InfoLevel, "import whole table") var wg sync.WaitGroup var engineErr common.OnceError @@ -623,26 +640,27 @@ func (t *TableRestore) restoreEngines(ctx context.Context, rc *RestoreController go func(w *worker.Worker, eid int32, ecp *EngineCheckpoint) { defer wg.Done() - tag := fmt.Sprintf("%s:%d", t.tableName, eid) - + // tag := fmt.Sprintf("%s:%d", t.tableName, eid) + engineLogTask := t.logger.With(zap.Int32("engineNumber", eid)).Begin(zap.InfoLevel, "restore engine") dataClosedEngine, dataWorker, err := t.restoreEngine(ctx, rc, indexEngine, eid, ecp) + engineLogTask.End(zap.ErrorLevel, err) rc.tableWorkers.Recycle(w) if err != nil { - engineErr.Set(tag, err) + engineErr.Set(err) return } defer rc.closedEngineLimit.Recycle(dataWorker) if err := t.importEngine(ctx, dataClosedEngine, rc, eid, ecp); err != nil { - engineErr.Set(tag, err) + engineErr.Set(err) } }(restoreWorker, engineID, engine) } wg.Wait() - common.AppLogger.Infof("[%s] import whole table takes %v", t.tableName, time.Since(timer)) err = engineErr.Get() + logTask.End(zap.ErrorLevel, err) if err != nil { return errors.Trace(err) } @@ -656,7 +674,6 @@ func (t *TableRestore) restoreEngines(ctx context.Context, rc *RestoreController rc.saveStatusCheckpoint(t.tableName, indexEngineID, err, CheckpointStatusClosed) } if err != nil { - common.AppLogger.Errorf("[%s] [kv-deliver] index engine closed error: %s", t.tableName, errors.ErrorStack(err)) return errors.Trace(err) } } @@ -676,7 +693,6 @@ func (t *TableRestore) restoreEngines(ctx context.Context, rc *RestoreController rc.saveStatusCheckpoint(t.tableName, wholeTableEngineID, err, CheckpointStatusIndexImported) if err != nil { - common.AppLogger.Errorf("[%[1]s] failed to import index engine: %v", t.tableName, err.Error()) return errors.Trace(err) } } @@ -701,7 +717,7 @@ func (t *TableRestore) restoreEngine( return closedEngine, w, nil } - timer := time.Now() + logTask := t.logger.With(zap.Int32("engineNumber", engineID)).Begin(zap.InfoLevel, "encode kv data and write") dataEngine, err := rc.importer.OpenEngine(ctx, t.tableName, engineID) if err != nil { @@ -755,13 +771,11 @@ func (t *TableRestore) restoreEngine( return } metric.ChunkCounter.WithLabelValues(metric.ChunkStateFailed).Inc() - tag := fmt.Sprintf("%s:%d] [%s", t.tableName, engineID, &cr.chunk.Key) - chunkErr.Set(tag, err) + chunkErr.Set(err) }(restoreWorker, cr) } wg.Wait() - dur := time.Since(timer) // Report some statistics into the log for debugging. totalKVSize := uint64(0) @@ -771,8 +785,11 @@ func (t *TableRestore) restoreEngine( totalSQLSize += chunk.Chunk.EndOffset } - common.AppLogger.Infof("[%s:%d] encode kv data and write takes %v (read %d, written %d)", t.tableName, engineID, dur, totalSQLSize, totalKVSize) err = chunkErr.Get() + logTask.End(zap.ErrorLevel, err, + zap.Int64("read", totalSQLSize), + zap.Uint64("written", totalKVSize), + ) rc.saveStatusCheckpoint(t.tableName, engineID, err, CheckpointStatusAllWritten) if err != nil { return nil, nil, errors.Trace(err) @@ -782,7 +799,6 @@ func (t *TableRestore) restoreEngine( closedDataEngine, err := dataEngine.Close(ctx) rc.saveStatusCheckpoint(t.tableName, engineID, err, CheckpointStatusClosed) if err != nil { - common.AppLogger.Errorf("[kv-deliver] flush stage with error (step = close) : %s", errors.ErrorStack(err)) // If any error occurred, recycle worker immediately rc.closedEngineLimit.Recycle(dataWorker) return nil, nil, errors.Trace(err) @@ -817,11 +833,7 @@ func (t *TableRestore) importEngine( if rc.cfg.PostRestore.Level1Compact && atomic.CompareAndSwapInt32(&rc.compactState, compactStateIdle, compactStateDoing) { go func() { - err := rc.doCompact(ctx, Level1Compact) - if err != nil { - // log it and continue - common.AppLogger.Warnf("compact %d failed %v", Level1Compact, err) - } + rc.doCompact(ctx, Level1Compact) atomic.StoreInt32(&rc.compactState, compactStateIdle) }() } @@ -839,11 +851,7 @@ func (t *TableRestore) postProcess(ctx context.Context, rc *RestoreController, c rc.alterTableLock.Unlock() rc.saveStatusCheckpoint(t.tableName, wholeTableEngineID, err, CheckpointStatusAlteredAutoInc) if err != nil { - common.AppLogger.Errorf( - "[%[1]s] failed to AUTO TABLE %[1]s SET AUTO_INCREMENT=%[2]d : %[3]v", - t.tableName, t.alloc.Base()+1, err.Error(), - ) - return errors.Trace(err) + return err } } @@ -854,17 +862,16 @@ func (t *TableRestore) postProcess(ctx context.Context, rc *RestoreController, c localChecksum.Add(&chunk.Checksum) } } - common.AppLogger.Infof("[%s] local checksum [sum:%d, kvs:%d, size:%v]", - t.tableName, localChecksum.Sum(), localChecksum.SumKVS(), localChecksum.SumSize()) + + t.logger.Info("local checksum", zap.Object("checksum", &localChecksum)) if cp.Status < CheckpointStatusChecksummed { if !rc.cfg.PostRestore.Checksum { - common.AppLogger.Infof("[%s] Skip checksum.", t.tableName) + t.logger.Info("skip checksum") rc.saveStatusCheckpoint(t.tableName, wholeTableEngineID, nil, CheckpointStatusChecksumSkipped) } else { err := t.compareChecksum(ctx, rc.tidbMgr.db, localChecksum) rc.saveStatusCheckpoint(t.tableName, wholeTableEngineID, err, CheckpointStatusChecksummed) if err != nil { - common.AppLogger.Errorf("[%s] checksum failed: %v", t.tableName, err.Error()) return errors.Trace(err) } } @@ -873,13 +880,12 @@ func (t *TableRestore) postProcess(ctx context.Context, rc *RestoreController, c // 5. do table analyze if cp.Status < CheckpointStatusAnalyzed { if !rc.cfg.PostRestore.Analyze { - common.AppLogger.Infof("[%s] Skip analyze.", t.tableName) + t.logger.Info("skip analyze") rc.saveStatusCheckpoint(t.tableName, wholeTableEngineID, nil, CheckpointStatusAnalyzeSkipped) } else { err := t.analyzeTable(ctx, rc.tidbMgr.db) rc.saveStatusCheckpoint(t.tableName, wholeTableEngineID, err, CheckpointStatusAnalyzed) if err != nil { - common.AppLogger.Errorf("[%s] analyze failed: %v", t.tableName, err.Error()) return errors.Trace(err) } } @@ -891,17 +897,16 @@ func (t *TableRestore) postProcess(ctx context.Context, rc *RestoreController, c // do full compaction for the whole data. func (rc *RestoreController) fullCompact(ctx context.Context) error { if !rc.cfg.PostRestore.Compact { - common.AppLogger.Info("Skip full compaction.") + log.L().Info("skip full compaction") return nil } // wait until any existing level-1 compact to complete first. - common.AppLogger.Info("Wait for existing level 1 compaction to finish") - start := time.Now() + task := log.L().Begin(zap.InfoLevel, "wait for completion of existing level 1 compaction") for !atomic.CompareAndSwapInt32(&rc.compactState, compactStateIdle, compactStateDoing) { time.Sleep(100 * time.Millisecond) } - common.AppLogger.Infof("Wait for existing level 1 compaction to finish takes %v", time.Since(start)) + task.End(zap.ErrorLevel, nil) return errors.Trace(rc.doCompact(ctx, FullLevelCompact)) } @@ -920,9 +925,7 @@ func (rc *RestoreController) switchToNormalMode(ctx context.Context) error { } func (rc *RestoreController) switchTiKVMode(ctx context.Context, mode sstpb.SwitchMode) { - if err := rc.importer.SwitchMode(ctx, mode); err != nil { - common.AppLogger.Warnf("cannot switch to %s mode: %v", mode.String(), err) - } + rc.importer.SwitchMode(ctx, mode) } func (rc *RestoreController) checkRequirements(_ context.Context) error { @@ -1044,13 +1047,12 @@ func checkVersion(component string, expected, actual semver.Version) error { func (rc *RestoreController) cleanCheckpoints(ctx context.Context) error { if !rc.cfg.Checkpoint.Enable || rc.cfg.Checkpoint.KeepAfterSuccess { - common.AppLogger.Info("Skip clean checkpoints.") - + log.L().Info("skip clean checkpoints") return nil } - timer := time.Now() + task := log.L().Begin(zap.InfoLevel, "clean checkpoints") err := rc.checkpointsDB.RemoveCheckpoint(ctx, "all") - common.AppLogger.Infof("clean checkpoints takes %v", time.Since(timer)) + task.End(zap.ErrorLevel, err) return errors.Trace(err) } @@ -1103,6 +1105,7 @@ type TableRestore struct { tableMeta *mydump.MDTableMeta encTable table.Table alloc autoid.Allocator + logger log.Logger } func NewTableRestore( @@ -1125,46 +1128,45 @@ func NewTableRestore( tableMeta: tableMeta, encTable: tbl, alloc: idAlloc, + logger: log.With(zap.String("table", tableName)), }, nil } func (tr *TableRestore) Close() { tr.encTable = nil - common.AppLogger.Infof("[%s] restore done", tr.tableName) + tr.logger.Info("restore done") } func (t *TableRestore) populateChunks(cfg *config.Config, cp *TableCheckpoint) error { - common.AppLogger.Infof("[%s] load chunks", t.tableName) - timer := time.Now() - + task := t.logger.Begin(zap.InfoLevel, "load engines and files") chunks, err := mydump.MakeTableRegions(t.tableMeta, t.tableInfo.Columns, cfg.Mydumper.BatchSize, cfg.Mydumper.BatchImportRatio, cfg.App.TableConcurrency) - if err != nil { - return errors.Trace(err) - } - - for _, chunk := range chunks { - engine, found := cp.Engines[chunk.EngineID] - if !found { - engine = &EngineCheckpoint{ - Status: CheckpointStatusLoaded, + if err == nil { + for _, chunk := range chunks { + engine, found := cp.Engines[chunk.EngineID] + if !found { + engine = &EngineCheckpoint{ + Status: CheckpointStatusLoaded, + } + cp.Engines[chunk.EngineID] = engine } - cp.Engines[chunk.EngineID] = engine + engine.Chunks = append(engine.Chunks, &ChunkCheckpoint{ + Key: ChunkCheckpointKey{ + Path: chunk.File, + Offset: chunk.Chunk.Offset, + }, + ColumnPermutation: nil, + Chunk: chunk.Chunk, + }) } - engine.Chunks = append(engine.Chunks, &ChunkCheckpoint{ - Key: ChunkCheckpointKey{ - Path: chunk.File, - Offset: chunk.Chunk.Offset, - }, - ColumnPermutation: nil, - Chunk: chunk.Chunk, - }) - } - - // Add index engine checkpoint - cp.Engines[indexEngineID] = &EngineCheckpoint{Status: CheckpointStatusLoaded} - common.AppLogger.Infof("[%s] load %d engines and %d chunks takes %v", t.tableName, len(cp.Engines), len(chunks), time.Since(timer)) - return nil + // Add index engine checkpoint + cp.Engines[indexEngineID] = &EngineCheckpoint{Status: CheckpointStatusLoaded} + } + task.End(zap.ErrorLevel, err, + zap.Int("enginesCnt", len(cp.Engines)), + zap.Int("filesCnt", len(chunks)), + ) + return err } // initializeColumns computes the "column permutation" for an INSERT INTO @@ -1213,31 +1215,18 @@ func (t *TableRestore) initializeColumns(columns []string, ccp *ChunkCheckpoint) } func (tr *TableRestore) restoreTableMeta(ctx context.Context, db *sql.DB) error { - timer := time.Now() - - err := AlterAutoIncrement(ctx, db, tr.tableMeta.DB, tr.tableMeta.Name, tr.alloc.Base()+1) - if err != nil { - return errors.Trace(err) - } - common.AppLogger.Infof("[%s] alter table set auto_id takes %v", common.UniqueTable(tr.tableMeta.DB, tr.tableMeta.Name), time.Since(timer)) - return nil + return AlterAutoIncrement(ctx, db, tr.tableName, tr.alloc.Base()+1) } func (tr *TableRestore) importKV(ctx context.Context, closedEngine *kv.ClosedEngine) error { - common.AppLogger.Infof("[%s] flush kv deliver ...", closedEngine.Tag()) start := time.Now() - if err := closedEngine.Import(ctx); err != nil { - if common.ShouldLogError(err) { - common.AppLogger.Errorf("[%s] failed to flush kvs : %s", closedEngine.Tag(), err.Error()) - } return errors.Trace(err) } closedEngine.Cleanup(ctx) dur := time.Since(start) metric.ImportSecondsHistogram.Observe(dur.Seconds()) - common.AppLogger.Infof("[%s] kv deliver all flushed, takes %v", closedEngine.Tag(), dur) // gofail: var SlowDownImport struct{} @@ -1246,10 +1235,7 @@ func (tr *TableRestore) importKV(ctx context.Context, closedEngine *kv.ClosedEng // do checksum for each table. func (tr *TableRestore) compareChecksum(ctx context.Context, db *sql.DB, localChecksum verify.KVChecksum) error { - start := time.Now() remoteChecksum, err := DoChecksum(ctx, db, tr.tableName) - dur := time.Since(start) - metric.ChecksumSecondsHistogram.Observe(dur.Seconds()) if err != nil { return errors.Trace(err) } @@ -1264,20 +1250,16 @@ func (tr *TableRestore) compareChecksum(ctx context.Context, db *sql.DB, localCh ) } - common.AppLogger.Infof("[%s] checksum pass, %+v takes %v", tr.tableName, localChecksum, dur) + tr.logger.Info("checksum pass", zap.Object("local", &localChecksum)) return nil } func (tr *TableRestore) analyzeTable(ctx context.Context, db *sql.DB) error { - timer := time.Now() - common.AppLogger.Infof("[%s] analyze", tr.tableName) - query := fmt.Sprintf("ANALYZE TABLE %s", tr.tableName) - err := common.ExecWithRetry(ctx, db, query, query) - if err != nil { - return errors.Trace(err) - } - common.AppLogger.Infof("[%s] analyze takes %v", tr.tableName, time.Since(timer)) - return nil + task := tr.logger.Begin(zap.InfoLevel, "analyze") + err := common.SQLWithRetry{DB: db, Logger: tr.logger}. + Exec(ctx, "analyze table", "ANALYZE TABLE "+tr.tableName) + task.End(zap.ErrorLevel, err) + return err } // RemoteChecksum represents a checksum result got from tidb. @@ -1289,38 +1271,26 @@ type RemoteChecksum struct { TotalBytes uint64 } -func (c *RemoteChecksum) String() string { - return fmt.Sprintf("[%s] remote_checksum=%d, total_kvs=%d, total_bytes=%d", common.UniqueTable(c.Schema, c.Table), c.Checksum, c.TotalKVs, c.TotalBytes) -} - func setSessionConcurrencyVars(ctx context.Context, db *sql.DB, dsn config.DBStore) { - err := common.ExecWithRetry(ctx, db, "(set session concurrency variables)", `SET + common.SQLWithRetry{DB: db, Logger: log.L()}.Exec(ctx, "set session concurrency variables", `SET SESSION tidb_build_stats_concurrency = ?, SESSION tidb_distsql_scan_concurrency = ?, SESSION tidb_index_serial_scan_concurrency = ?, SESSION tidb_checksum_table_concurrency = ?; `, dsn.BuildStatsConcurrency, dsn.DistSQLScanConcurrency, dsn.IndexSerialScanConcurrency, dsn.ChecksumTableConcurrency) - if err != nil { - common.AppLogger.Warnf("failed to set session concurrency variables: %s", err.Error()) - } } // DoChecksum do checksum for tables. // table should be in ., format. e.g. foo.bar func DoChecksum(ctx context.Context, db *sql.DB, table string) (*RemoteChecksum, error) { - timer := time.Now() - ori, err := increaseGCLifeTime(ctx, db) if err != nil { return nil, errors.Trace(err) } // set it back finally - defer func() { - err = UpdateGCLifeTime(ctx, db, ori) - if err != nil && common.ShouldLogError(err) { - common.AppLogger.Errorf("[%s] update tikv_gc_life_time error %v", table, errors.ErrorStack(err)) - } - }() + defer UpdateGCLifeTime(ctx, db, ori) + + task := log.With(zap.String("table", table)).Begin(zap.InfoLevel, "remote checksum") // ADMIN CHECKSUM TABLE
,
example. // mysql> admin checksum table test.t; @@ -1331,14 +1301,14 @@ func DoChecksum(ctx context.Context, db *sql.DB, table string) (*RemoteChecksum, // +---------+------------+---------------------+-----------+-------------+ cs := RemoteChecksum{} - common.AppLogger.Infof("[%s] doing remote checksum", table) - query := fmt.Sprintf("ADMIN CHECKSUM TABLE %s", table) - err = common.QueryRowWithRetry(ctx, db, query, &cs.Schema, &cs.Table, &cs.Checksum, &cs.TotalKVs, &cs.TotalBytes) + err = common.SQLWithRetry{DB: db, Logger: task.Logger}.QueryRow(ctx, "compute remote checksum", + "ADMIN CHECKSUM TABLE "+table, &cs.Schema, &cs.Table, &cs.Checksum, &cs.TotalKVs, &cs.TotalBytes, + ) + dur := task.End(zap.ErrorLevel, err) + metric.ChecksumSecondsHistogram.Observe(dur.Seconds()) if err != nil { return nil, errors.Trace(err) } - common.AppLogger.Infof("[%s] do checksum takes %v", table, time.Since(timer)) - return &cs, nil } @@ -1552,7 +1522,6 @@ func (cr *chunkRestore) restore( kvEncoder = nil }() - timer := time.Now() readTotalDur := time.Duration(0) encodeTotalDur := time.Duration(0) @@ -1566,6 +1535,12 @@ func (cr *chunkRestore) restore( defer close(kvsCh) + logTask := t.logger.With( + zap.Int32("engineNumber", engineID), + zap.Int("fileIndex", cr.index), + zap.Stringer("path", &cr.chunk.Key), + ).Begin(zap.InfoLevel, "restore file") + initializedColumns := false outside: for { @@ -1597,7 +1572,6 @@ outside: metric.RowReadBytesHistogram.Observe(float64(newOffset - offset)) // sql -> kv - start = time.Now() lastRow := cr.parser.LastRow() kvs, err := kvEncoder.Encode(lastRow.Row, lastRow.RowID, cr.chunk.ColumnPermutation) encodeDur := time.Since(start) @@ -1605,7 +1579,7 @@ outside: metric.RowEncodeSecondsHistogram.Observe(encodeDur.Seconds()) if err != nil { - common.AppLogger.Errorf("kv encode failed = %s\n", err.Error()) + logTask.Error("kv encode failed", log.ShortError(err)) return errors.Trace(err) } @@ -1639,14 +1613,12 @@ outside: select { case deliverResult := <-deliverCompleteCh: - if deliverResult.err == nil { - common.AppLogger.Infof( - "[%s:%d] restore chunk #%d (%s) takes %v (read: %v, encode: %v, deliver: %v, size: %d, kvs: %d)", - t.tableName, engineID, cr.index, &cr.chunk.Key, time.Since(timer), - readTotalDur, encodeTotalDur, deliverResult.totalDur, - cr.chunk.Checksum.SumSize(), cr.chunk.Checksum.SumKVS(), - ) - } + logTask.End(zap.ErrorLevel, deliverResult.err, + zap.Duration("readDur", readTotalDur), + zap.Duration("encodeDur", encodeTotalDur), + zap.Duration("deliverDur", deliverResult.totalDur), + zap.Object("checksum", &cr.chunk.Checksum), + ) return errors.Trace(deliverResult.err) case <-ctx.Done(): return ctx.Err() diff --git a/lightning/restore/tidb.go b/lightning/restore/tidb.go index 31472ba1b..3443f77d4 100644 --- a/lightning/restore/tidb.go +++ b/lightning/restore/tidb.go @@ -20,14 +20,15 @@ import ( "net/http" "net/url" "regexp" - "time" "github.com/pingcap/errors" "github.com/pingcap/parser/model" "github.com/pingcap/tidb-lightning/lightning/common" "github.com/pingcap/tidb-lightning/lightning/config" + "github.com/pingcap/tidb-lightning/lightning/log" "github.com/pingcap/tidb-lightning/lightning/metric" "github.com/pingcap/tidb-lightning/lightning/mydump" + "go.uber.org/zap" ) type TiDBManager struct { @@ -73,26 +74,40 @@ func (timgr *TiDBManager) Close() { } func (timgr *TiDBManager) InitSchema(ctx context.Context, database string, tablesSchema map[string]string) error { + sql := common.SQLWithRetry{ + DB: timgr.db, + Logger: log.With(zap.String("db", database)), + } + createDatabase := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", database) - err := common.ExecWithRetry(ctx, timgr.db, createDatabase, createDatabase) + err := sql.Exec(ctx, "create database", createDatabase) if err != nil { return errors.Trace(err) } useDB := fmt.Sprintf("USE `%s`", database) - err = common.ExecWithRetry(ctx, timgr.db, useDB, useDB) + err = sql.Exec(ctx, "use database", useDB) if err != nil { return errors.Trace(err) } - for _, sqlCreateTable := range tablesSchema { - timer := time.Now() - if err = safeCreateTable(ctx, timgr.db, sqlCreateTable); err != nil { - return errors.Trace(err) + task := sql.Logger.Begin(zap.InfoLevel, "create tables") + for tbl, sqlCreateTable := range tablesSchema { + task.Debug("create table", zap.String("schema", sqlCreateTable)) + + safeCreateTable := createTableIfNotExistsStmt(sqlCreateTable) + sql2 := common.SQLWithRetry{ + DB: timgr.db, + Logger: sql.Logger.With(zap.String("table", common.UniqueTable(database, tbl))), + HideQueryLog: true, + } + err = sql2.Exec(ctx, "create table", safeCreateTable) + if err != nil { + break } - common.AppLogger.Infof("%s takes %v", sqlCreateTable, time.Since(timer)) } + task.End(zap.ErrorLevel, err) - return nil + return errors.Trace(err) } var createTableRegexp = regexp.MustCompile(`(?i)CREATE TABLE( IF NOT EXISTS)?`) @@ -108,12 +123,6 @@ func createTableIfNotExistsStmt(createTable string) string { return createTable } -func safeCreateTable(ctx context.Context, db *sql.DB, createTable string) error { - createTable = createTableIfNotExistsStmt(createTable) - err := common.ExecWithRetry(ctx, db, createTable, createTable) - return errors.Trace(err) -} - func (timgr *TiDBManager) getTables(schema string) ([]*model.TableInfo, error) { baseURL := *timgr.baseURL baseURL.Path = fmt.Sprintf("schema/%s", schema) @@ -121,14 +130,17 @@ func (timgr *TiDBManager) getTables(schema string) ([]*model.TableInfo, error) { var tables []*model.TableInfo err := common.GetJSON(timgr.client, baseURL.String(), &tables) if err != nil { - return nil, errors.Annotatef(errors.Trace(err), "get tables for schema %s", schema) + return nil, errors.Annotatef(err, "get tables for schema %s", schema) } return tables, nil } func (timgr *TiDBManager) DropTable(ctx context.Context, tableName string) error { - query := "DROP TABLE " + tableName - return errors.Trace(common.ExecWithRetry(ctx, timgr.db, query, query)) + sql := common.SQLWithRetry{ + DB: timgr.db, + Logger: log.With(zap.String("table", tableName)), + } + return sql.Exec(ctx, "drop table", "DROP TABLE "+tableName) } func (timgr *TiDBManager) LoadSchemaInfo(ctx context.Context, schemas []*mydump.MDDatabaseMeta) (map[string]*TidbDBInfo, error) { @@ -173,31 +185,50 @@ func (timgr *TiDBManager) LoadSchemaInfo(ctx context.Context, schemas []*mydump. } func (timgr *TiDBManager) getCreateTableStmt(ctx context.Context, schema, table string) (string, error) { - query := fmt.Sprintf("SHOW CREATE TABLE %s", common.UniqueTable(schema, table)) + tableName := common.UniqueTable(schema, table) + sql := common.SQLWithRetry{ + DB: timgr.db, + Logger: log.With(zap.String("table", tableName)), + } var tbl, createTable string - err := common.QueryRowWithRetry(ctx, timgr.db, query, &tbl, &createTable) - return createTable, errors.Annotatef(err, "%s", query) + err := sql.QueryRow(ctx, "show create table", + "SHOW CREATE TABLE "+tableName, + &tbl, &createTable, + ) + return createTable, err } -func ObtainGCLifeTime(ctx context.Context, db *sql.DB) (gcLifeTime string, err error) { - query := "SELECT VARIABLE_VALUE FROM mysql.tidb WHERE VARIABLE_NAME = 'tikv_gc_life_time'" - err = common.QueryRowWithRetry(ctx, db, query, &gcLifeTime) - return gcLifeTime, errors.Annotatef(err, "%s", query) +func ObtainGCLifeTime(ctx context.Context, db *sql.DB) (string, error) { + var gcLifeTime string + err := common.SQLWithRetry{DB: db, Logger: log.L()}.QueryRow(ctx, "obtain GC lifetime", + "SELECT VARIABLE_VALUE FROM mysql.tidb WHERE VARIABLE_NAME = 'tikv_gc_life_time'", + &gcLifeTime, + ) + return gcLifeTime, err } func UpdateGCLifeTime(ctx context.Context, db *sql.DB, gcLifeTime string) error { - query := "UPDATE mysql.tidb SET VARIABLE_VALUE = ? WHERE VARIABLE_NAME = 'tikv_gc_life_time'" - err := common.ExecWithRetry(ctx, db, query, query, gcLifeTime) - return errors.Annotatef(err, "%s -- ? = %s", query, gcLifeTime) + sql := common.SQLWithRetry{ + DB: db, + Logger: log.With(zap.String("gcLifeTime", gcLifeTime)), + } + return sql.Exec(ctx, "update GC lifetime", + "UPDATE mysql.tidb SET VARIABLE_VALUE = ? WHERE VARIABLE_NAME = 'tikv_gc_life_time'", + gcLifeTime, + ) } -func AlterAutoIncrement(ctx context.Context, db *sql.DB, schema string, table string, incr int64) error { - tableName := common.UniqueTable(schema, table) +func AlterAutoIncrement(ctx context.Context, db *sql.DB, tableName string, incr int64) error { + sql := common.SQLWithRetry{ + DB: db, + Logger: log.With(zap.String("table", tableName), zap.Int64("auto_increment", incr)), + } query := fmt.Sprintf("ALTER TABLE %s AUTO_INCREMENT=%d", tableName, incr) - common.AppLogger.Infof("[%s.%s] %s", schema, table, query) - err := common.ExecWithRetry(ctx, db, query, query) + task := sql.Logger.Begin(zap.InfoLevel, "alter table auto_increment") + err := sql.Exec(ctx, "alter table auto_increment", query) + task.End(zap.ErrorLevel, err) if err != nil { - common.AppLogger.Errorf("query failed %v, you should do it manually, err %v", query, err) + task.Error("alter table auto_increment failed, please perform the query manually", zap.String("query", query)) } return errors.Annotatef(err, "%s", query) } diff --git a/lightning/verification/checksum.go b/lightning/verification/checksum.go index a73097f32..a44fc2290 100644 --- a/lightning/verification/checksum.go +++ b/lightning/verification/checksum.go @@ -17,6 +17,7 @@ import ( "hash/crc64" kvec "github.com/pingcap/tidb/util/kvencoder" + "go.uber.org/zap/zapcore" ) var ecmaTable = crc64.MakeTable(crc64.ECMA) @@ -88,3 +89,10 @@ func (c *KVChecksum) SumSize() uint64 { func (c *KVChecksum) SumKVS() uint64 { return c.kvs } + +func (c *KVChecksum) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + encoder.AddUint64("cksum", c.checksum) + encoder.AddUint64("size", c.bytes) + encoder.AddUint64("kvs", c.kvs) + return nil +} diff --git a/tests/error_summary/run.sh b/tests/error_summary/run.sh index fecd8c7e6..19de9a754 100755 --- a/tests/error_summary/run.sh +++ b/tests/error_summary/run.sh @@ -40,7 +40,7 @@ check_contains 'sum(k): 32' # Verify the log contains the expected messages at the last few lines tail -20 "$TEST_DIR/lightning-error-summary.log" > "$TEST_DIR/lightning-error-summary.tail" -grep -Fq '[error] Totally **2** tables failed to be imported.' "$TEST_DIR/lightning-error-summary.tail" -grep -Fq '[`error_summary`.`a`] [checksum] checksum mismatched' "$TEST_DIR/lightning-error-summary.tail" -grep -Fq '[`error_summary`.`c`] [checksum] checksum mismatched' "$TEST_DIR/lightning-error-summary.tail" -! grep -Fq '[`error_summary`.`b`] [checksum] checksum mismatched' "$TEST_DIR/lightning-error-summary.tail" +grep -Fq '["tables failed to be imported"] [count=2]' "$TEST_DIR/lightning-error-summary.tail" +grep -Fq '[-] [table=`error_summary`.`a`] [status=checksum] [error="checksum mismatched' "$TEST_DIR/lightning-error-summary.tail" +grep -Fq '[-] [table=`error_summary`.`c`] [status=checksum] [error="checksum mismatched' "$TEST_DIR/lightning-error-summary.tail" +! grep -Fq '[-] [table=`error_summary`.`b`] [status=checksum] [error="checksum mismatched' "$TEST_DIR/lightning-error-summary.tail"