diff --git a/.bazelversion b/.bazelversion index 84197c89467dd..09b254e90c61e 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -5.3.2 +6.0.0 diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml deleted file mode 100644 index 94b68e9c95510..0000000000000 --- a/.github/workflows/misc.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: misc - -on: - workflow_dispatch: - pull_request: - branches: - - "master" - - "main" - - "release-**" - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - check: - permissions: - contents: read # to fetch code (actions/checkout) - pull-requests: write # to comment on pull-requests - - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Check File Permission - run: make check-file-perm - - name: Check License Header - uses: apache/skywalking-eyes/header@v0.4.0 - with: - log: info - token: ${{ secrets.GITHUB_TOKEN }} - config: .github/licenserc.yml diff --git a/DEPS.bzl b/DEPS.bzl index a1ff508ca83c7..f9a9cfe05aad5 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -147,6 +147,13 @@ def go_deps(): sum = "h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=", version = "v0.1.1", ) + go_repository( + name = "com_github_apache_skywalking_eyes", + build_file_proto_mode = "disable", + importpath = "github.com/apache/skywalking-eyes", + sum = "h1:O13kdRU6FCEZevfD01mdhTgCZLLfPZIQ0GXZrLl7FpQ=", + version = "v0.4.0", + ) go_repository( name = "com_github_apache_thrift", @@ -337,6 +344,14 @@ def go_deps(): sum = "h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=", version = "v0.8.0", ) + go_repository( + name = "com_github_bmatcuk_doublestar_v2", + build_file_proto_mode = "disable", + importpath = "github.com/bmatcuk/doublestar/v2", + sum = "h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI=", + version = "v2.0.4", + ) + go_repository( name = "com_github_bombsimon_wsl_v3", build_file_proto_mode = "disable", @@ -1090,8 +1105,8 @@ def go_deps(): name = "com_github_frankban_quicktest", build_file_proto_mode = "disable_global", importpath = "github.com/frankban/quicktest", - sum = "h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=", - version = "v1.11.3", + sum = "h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=", + version = "v1.14.3", ) go_repository( name = "com_github_fsnotify_fsnotify", @@ -1667,12 +1682,20 @@ def go_deps(): sum = "h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=", version = "v0.5.9", ) + go_repository( + name = "com_github_google_go_github_v33", + build_file_proto_mode = "disable", + importpath = "github.com/google/go-github/v33", + sum = "h1:qAf9yP0qc54ufQxzwv+u9H0tiVOnPJxo0lI/JXqw3ZM=", + version = "v33.0.0", + ) + go_repository( name = "com_github_google_go_querystring", build_file_proto_mode = "disable_global", importpath = "github.com/google/go-querystring", - sum = "h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=", - version = "v1.0.0", + sum = "h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=", + version = "v1.1.0", ) go_repository( name = "com_github_google_gofuzz", @@ -1681,6 +1704,14 @@ def go_deps(): sum = "h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=", version = "v1.1.0", ) + go_repository( + name = "com_github_google_licensecheck", + build_file_proto_mode = "disable", + importpath = "github.com/google/licensecheck", + sum = "h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs=", + version = "v0.3.1", + ) + go_repository( name = "com_github_google_martian", build_file_proto_mode = "disable_global", @@ -2051,6 +2082,14 @@ def go_deps(): sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=", version = "v1.0.0", ) + go_repository( + name = "com_github_huandu_xstrings", + build_file_proto_mode = "disable", + importpath = "github.com/huandu/xstrings", + sum = "h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=", + version = "v1.3.1", + ) + go_repository( name = "com_github_hydrogen18_memlistener", build_file_proto_mode = "disable_global", @@ -2072,6 +2111,14 @@ def go_deps(): sum = "h1:uGg2frlt3IcT7kbV6LEp5ONv4vmoO2FW4qSO+my/aoM=", version = "v0.0.0-20210905161508-09a460cdf81d", ) + go_repository( + name = "com_github_imdario_mergo", + build_file_proto_mode = "disable", + importpath = "github.com/imdario/mergo", + sum = "h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=", + version = "v0.3.11", + ) + go_repository( name = "com_github_imkira_go_interpol", build_file_proto_mode = "disable_global", @@ -2667,6 +2714,14 @@ def go_deps(): sum = "h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q=", version = "v1.1.0", ) + go_repository( + name = "com_github_masterminds_goutils", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/goutils", + sum = "h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=", + version = "v1.1.1", + ) + go_repository( name = "com_github_masterminds_semver", build_file_proto_mode = "disable", @@ -2674,6 +2729,21 @@ def go_deps(): sum = "h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=", version = "v1.5.0", ) + go_repository( + name = "com_github_masterminds_semver_v3", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/semver/v3", + sum = "h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=", + version = "v3.1.1", + ) + go_repository( + name = "com_github_masterminds_sprig_v3", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/sprig/v3", + sum = "h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=", + version = "v3.2.2", + ) + go_repository( name = "com_github_matoous_godox", build_file_proto_mode = "disable", @@ -2807,6 +2877,14 @@ def go_deps(): sum = "h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=", version = "v1.0.0", ) + go_repository( + name = "com_github_mitchellh_copystructure", + build_file_proto_mode = "disable", + importpath = "github.com/mitchellh/copystructure", + sum = "h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=", + version = "v1.0.0", + ) + go_repository( name = "com_github_mitchellh_go_homedir", build_file_proto_mode = "disable_global", @@ -3466,10 +3544,6 @@ def go_deps(): name = "com_github_rivo_uniseg", build_file_proto_mode = "disable_global", importpath = "github.com/rivo/uniseg", - patch_args = ["-p1"], - patches = [ - "//build/patches:com_github_rivo_uniseg.patch", - ], sum = "h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=", version = "v0.4.2", ) @@ -3668,8 +3742,8 @@ def go_deps(): name = "com_github_shopspring_decimal", build_file_proto_mode = "disable", importpath = "github.com/shopspring/decimal", - sum = "h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=", - version = "v0.0.0-20180709203117-cd690d0c9e24", + sum = "h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=", + version = "v1.2.0", ) go_repository( @@ -3822,6 +3896,14 @@ def go_deps(): sum = "h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=", version = "v1.12.0", ) + go_repository( + name = "com_github_spkg_bom", + build_file_proto_mode = "disable", + importpath = "github.com/spkg/bom", + sum = "h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64=", + version = "v1.0.0", + ) + go_repository( name = "com_github_ssgreg_nlreturn_v2", build_file_proto_mode = "disable", @@ -5832,8 +5914,8 @@ def go_deps(): name = "org_golang_x_tools", build_file_proto_mode = "disable_global", importpath = "golang.org/x/tools", - sum = "h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=", - version = "v0.2.0", + sum = "h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=", + version = "v0.5.0", ) go_repository( name = "org_golang_x_xerrors", diff --git a/Makefile b/Makefile index b138bfbbd0f04..6afde9f6420ec 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ dev: checklist check explaintest gogenerate br_unit_test test_part_parser_dev ut # Install the check tools. check-setup:tools/bin/revive -check: parser_yacc check-parallel lint tidy testSuite errdoc check-bazel-prepare +check: parser_yacc check-parallel lint tidy testSuite errdoc license check-bazel-prepare fmt: @echo "gofmt (simplify)" @@ -56,6 +56,12 @@ lint:tools/bin/revive @echo "linting" @tools/bin/revive -formatter friendly -config tools/check/revive.toml $(FILES_TIDB_TESTS) +license: + bazel $(BAZEL_GLOBAL_CONFIG) run $(BAZEL_CMD_CONFIG) \ + --run_under="cd $(CURDIR) && " \ + @com_github_apache_skywalking_eyes//cmd/license-eye:license-eye --run_under="cd $(CURDIR) && " -- -c ./.github/licenserc.yml header check + + tidy: @echo "go mod tidy" ./tools/check/check-tidy.sh diff --git a/WORKSPACE b/WORKSPACE index 627c7dd5c5575..6fce0d77d8da1 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -2,19 +2,19 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( name = "io_bazel_rules_go", - sha256 = "56d8c5a5c91e1af73eca71a6fab2ced959b67c86d12ba37feedb0a2dfea441a6", + sha256 = "dd926a88a564a9246713a9c00b35315f54cbd46b31a26d5d8fb264c07045f05d", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.37.0/rules_go-v0.37.0.zip", - "https://github.com/bazelbuild/rules_go/releases/download/v0.37.0/rules_go-v0.37.0.zip", + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip", ], ) http_archive( name = "bazel_gazelle", - sha256 = "501deb3d5695ab658e82f6f6f549ba681ea3ca2a5fb7911154b5aa45596183fa", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.26.0/bazel-gazelle-v0.26.0.tar.gz", - "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.26.0/bazel-gazelle-v0.26.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", ], ) diff --git a/br/pkg/backup/schema.go b/br/pkg/backup/schema.go index f42976272ea54..19910af1c9b9b 100644 --- a/br/pkg/backup/schema.go +++ b/br/pkg/backup/schema.go @@ -112,7 +112,7 @@ func (ss *Schemas) BackupSchemas( var checksum *checkpoint.ChecksumItem var exists bool = false - if ss.checkpointChecksum != nil { + if ss.checkpointChecksum != nil && schema.tableInfo != nil { checksum, exists = ss.checkpointChecksum[schema.tableInfo.ID] } workerPool.ApplyOnErrorGroup(errg, func() error { diff --git a/br/pkg/lightning/common/BUILD.bazel b/br/pkg/lightning/common/BUILD.bazel index bc04df82904a9..3bd871276d733 100644 --- a/br/pkg/lightning/common/BUILD.bazel +++ b/br/pkg/lightning/common/BUILD.bazel @@ -35,9 +35,49 @@ go_library( "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//status", - "@org_golang_x_sys//unix", "@org_uber_go_zap//:zap", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:aix": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:android": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:illumos": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:js": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "@org_golang_x_sys//unix", + ], + "//conditions:default": [], + }), ) go_test( diff --git a/br/pkg/lightning/common/util.go b/br/pkg/lightning/common/util.go index b9bdf564403de..fbf275a99bfe1 100644 --- a/br/pkg/lightning/common/util.go +++ b/br/pkg/lightning/common/util.go @@ -44,6 +44,8 @@ const ( retryTimeout = 3 * time.Second defaultMaxRetry = 3 + + dbTimeout = 30 * time.Second ) // MySQLConnectParam records the parameters needed to connect to a MySQL database. @@ -74,6 +76,8 @@ func (param *MySQLConnectParam) ToDriverConfig() *mysql.Config { cfg.Params["charset"] = "utf8mb4" cfg.Params["sql_mode"] = fmt.Sprintf("'%s'", param.SQLMode) cfg.MaxAllowedPacket = int(param.MaxAllowedPacket) + cfg.ReadTimeout = dbTimeout + cfg.WriteTimeout = dbTimeout cfg.TLS = param.TLSConfig cfg.AllowFallbackToPlaintext = param.AllowFallbackToPlaintext diff --git a/br/pkg/lightning/config/BUILD.bazel b/br/pkg/lightning/config/BUILD.bazel index b69d2fca0d310..b035b506aebf2 100644 --- a/br/pkg/lightning/config/BUILD.bazel +++ b/br/pkg/lightning/config/BUILD.bazel @@ -25,6 +25,8 @@ go_library( "@com_github_docker_go_units//:go-units", "@com_github_go_sql_driver_mysql//:mysql", "@com_github_pingcap_errors//:errors", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//keepalive", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], diff --git a/br/pkg/lightning/config/config_test.go b/br/pkg/lightning/config/config_test.go index ea0cff40a04c7..16db98845e80c 100644 --- a/br/pkg/lightning/config/config_test.go +++ b/br/pkg/lightning/config/config_test.go @@ -643,7 +643,7 @@ func TestLoadConfig(t *testing.T) { err = taskCfg.Adjust(context.Background()) require.NoError(t, err) equivalentDSN := taskCfg.Checkpoint.MySQLParam.ToDriverConfig().FormatDSN() - expectedDSN := "guest:12345@tcp(172.16.30.11:4001)/?maxAllowedPacket=67108864&charset=utf8mb4&sql_mode=%27ONLY_FULL_GROUP_BY%2CSTRICT_TRANS_TABLES%2CNO_ZERO_IN_DATE%2CNO_ZERO_DATE%2CERROR_FOR_DIVISION_BY_ZERO%2CNO_AUTO_CREATE_USER%2CNO_ENGINE_SUBSTITUTION%27" + expectedDSN := "guest:12345@tcp(172.16.30.11:4001)/?readTimeout=30s&writeTimeout=30s&maxAllowedPacket=67108864&charset=utf8mb4&sql_mode=%27ONLY_FULL_GROUP_BY%2CSTRICT_TRANS_TABLES%2CNO_ZERO_IN_DATE%2CNO_ZERO_DATE%2CERROR_FOR_DIVISION_BY_ZERO%2CNO_AUTO_CREATE_USER%2CNO_ENGINE_SUBSTITUTION%27" require.Equal(t, expectedDSN, equivalentDSN) result := taskCfg.String() diff --git a/br/pkg/lightning/config/const.go b/br/pkg/lightning/config/const.go index 23a38ac41117d..e114eafd8ea88 100644 --- a/br/pkg/lightning/config/const.go +++ b/br/pkg/lightning/config/const.go @@ -15,7 +15,11 @@ package config import ( + "time" + "github.com/docker/go-units" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) const ( @@ -34,3 +38,11 @@ const ( DefaultBatchSize ByteSize = 100 * units.GiB ) + +var ( + DefaultGrpcKeepaliveParams = grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 10 * time.Second, + Timeout: 20 * time.Second, + PermitWithoutStream: false, + }) +) diff --git a/br/pkg/lightning/mydump/BUILD.bazel b/br/pkg/lightning/mydump/BUILD.bazel index d265cad78bce6..a4aa1626afc46 100644 --- a/br/pkg/lightning/mydump/BUILD.bazel +++ b/br/pkg/lightning/mydump/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "//util/slice", "//util/table-filter", "@com_github_pingcap_errors//:errors", + "@com_github_spkg_bom//:bom", "@com_github_xitongsys_parquet_go//parquet", "@com_github_xitongsys_parquet_go//reader", "@com_github_xitongsys_parquet_go//source", diff --git a/br/pkg/lightning/mydump/parser.go b/br/pkg/lightning/mydump/parser.go index 512c3789cfa7f..0ac82ce189d71 100644 --- a/br/pkg/lightning/mydump/parser.go +++ b/br/pkg/lightning/mydump/parser.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/worker" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/types" + "github.com/spkg/bom" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -285,7 +286,13 @@ func (parser *blockParser) readBlock() error { parser.remainBuf.Write(parser.buf) parser.appendBuf.Reset() parser.appendBuf.Write(parser.remainBuf.Bytes()) - parser.appendBuf.Write(parser.blockBuf[:n]) + blockData := parser.blockBuf[:n] + if parser.pos == 0 { + bomCleanedData := bom.Clean(blockData) + parser.pos += int64(n - len(bomCleanedData)) + blockData = bomCleanedData + } + parser.appendBuf.Write(blockData) parser.buf = parser.appendBuf.Bytes() if parser.metrics != nil { parser.metrics.ChunkParserReadBlockSecondsHistogram.Observe(time.Since(startTime).Seconds()) diff --git a/br/pkg/lightning/mydump/reader.go b/br/pkg/lightning/mydump/reader.go index 4837b35aceab2..3735e97cb48ee 100644 --- a/br/pkg/lightning/mydump/reader.go +++ b/br/pkg/lightning/mydump/reader.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/worker" "github.com/pingcap/tidb/br/pkg/storage" + "github.com/spkg/bom" "go.uber.org/zap" "golang.org/x/text/encoding/simplifiedchinese" ) @@ -83,7 +84,7 @@ func ExportStatement(ctx context.Context, store storage.ExternalStorage, sqlFile } defer fd.Close() - br := bufio.NewReader(fd) + br := bufio.NewReader(bom.NewReader(fd)) data := make([]byte, 0, sqlFile.FileMeta.FileSize+1) buffer := make([]byte, 0, sqlFile.FileMeta.FileSize+1) diff --git a/br/pkg/lightning/restore/BUILD.bazel b/br/pkg/lightning/restore/BUILD.bazel index ef5aeb106585b..06e503e0519db 100644 --- a/br/pkg/lightning/restore/BUILD.bazel +++ b/br/pkg/lightning/restore/BUILD.bazel @@ -80,7 +80,6 @@ go_library( "@com_github_tikv_pd_client//:client", "@io_etcd_go_etcd_client_v3//:client", "@org_golang_google_grpc//:grpc", - "@org_golang_google_grpc//keepalive", "@org_golang_x_exp//maps", "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", diff --git a/br/pkg/lightning/restore/precheck_impl.go b/br/pkg/lightning/restore/precheck_impl.go index f412b101ff08b..8d5142a8b5fd4 100644 --- a/br/pkg/lightning/restore/precheck_impl.go +++ b/br/pkg/lightning/restore/precheck_impl.go @@ -48,7 +48,6 @@ import ( "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" ) type clusterResourceCheckItem struct { @@ -733,11 +732,7 @@ func dialEtcdWithCfg(ctx context.Context, cfg *config.Config) (*clientv3.Client, AutoSyncInterval: 30 * time.Second, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{ - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 10 * time.Second, - Timeout: 3 * time.Second, - PermitWithoutStream: false, - }), + config.DefaultGrpcKeepaliveParams, grpc.WithBlock(), grpc.WithReturnConnectionError(), }, diff --git a/br/pkg/lightning/tikv/BUILD.bazel b/br/pkg/lightning/tikv/BUILD.bazel index 596aa52075758..48758bfedaacf 100644 --- a/br/pkg/lightning/tikv/BUILD.bazel +++ b/br/pkg/lightning/tikv/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//br/pkg/lightning/common", + "//br/pkg/lightning/config", "//br/pkg/lightning/log", "//br/pkg/pdutil", "//br/pkg/version", diff --git a/br/pkg/lightning/tikv/tikv.go b/br/pkg/lightning/tikv/tikv.go index 8d2d797d322d1..53c06cc6102f6 100644 --- a/br/pkg/lightning/tikv/tikv.go +++ b/br/pkg/lightning/tikv/tikv.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/kvproto/pkg/debugpb" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/tidb/br/pkg/lightning/common" + "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/version" @@ -88,7 +89,7 @@ func withTiKVConnection(ctx context.Context, tls *common.TLS, tikvAddr string, a // Connect to the ImportSST service on the given TiKV node. // The connection is needed for executing `action` and will be tear down // when this function exits. - conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption()) + conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption(), config.DefaultGrpcKeepaliveParams) if err != nil { return errors.Trace(err) } @@ -172,7 +173,8 @@ var fetchModeRegexp = regexp.MustCompile(`\btikv_config_rocksdb\{cf="default",na // FetchMode obtains the import mode status of the TiKV node. func FetchMode(ctx context.Context, tls *common.TLS, tikvAddr string) (import_sstpb.SwitchMode, error) { - conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption()) + conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption(), + config.DefaultGrpcKeepaliveParams) if err != nil { return 0, err } diff --git a/br/pkg/lightning/web/BUILD.bazel b/br/pkg/lightning/web/BUILD.bazel index 842eb48fb3dd3..93cb28cdaf0ce 100644 --- a/br/pkg/lightning/web/BUILD.bazel +++ b/br/pkg/lightning/web/BUILD.bazel @@ -4,7 +4,7 @@ go_library( name = "web", srcs = [ "progress.go", - "res.go", + "res.go", #keep "res_vfsdata.go", ], importpath = "github.com/pingcap/tidb/br/pkg/lightning/web", diff --git a/br/pkg/restore/split/BUILD.bazel b/br/pkg/restore/split/BUILD.bazel index 1726817092ba8..5ddd7b7671822 100644 --- a/br/pkg/restore/split/BUILD.bazel +++ b/br/pkg/restore/split/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//br/pkg/conn/util", "//br/pkg/errors", "//br/pkg/httputil", + "//br/pkg/lightning/config", "//br/pkg/logutil", "//br/pkg/redact", "//br/pkg/utils", diff --git a/br/pkg/restore/split/client.go b/br/pkg/restore/split/client.go index 5f6788d6ee470..72482a94e87dc 100644 --- a/br/pkg/restore/split/client.go +++ b/br/pkg/restore/split/client.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/httputil" + "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/store/pdtypes" pd "github.com/tikv/pd/client" @@ -201,7 +202,9 @@ func (c *pdClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key if err != nil { return nil, errors.Trace(err) } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.Dial(store.GetAddress(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + config.DefaultGrpcKeepaliveParams) if err != nil { return nil, errors.Trace(err) } @@ -341,7 +344,8 @@ func sendSplitRegionRequest(ctx context.Context, c *pdClient, regionInfo *Region if c.tlsConf != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(c.tlsConf)) } - conn, err := grpc.Dial(store.GetAddress(), opt) + conn, err := grpc.Dial(store.GetAddress(), opt, + config.DefaultGrpcKeepaliveParams) if err != nil { return false, nil, err } diff --git a/br/tests/lightning_bom_file/config.toml b/br/tests/lightning_bom_file/config.toml new file mode 100644 index 0000000000000..291d1b166103a --- /dev/null +++ b/br/tests/lightning_bom_file/config.toml @@ -0,0 +1,2 @@ +[mydumper.csv] +header = true diff --git a/br/tests/lightning_bom_file/data/mytest.testtbl-schema.sql b/br/tests/lightning_bom_file/data/mytest.testtbl-schema.sql new file mode 100644 index 0000000000000..4232788898790 --- /dev/null +++ b/br/tests/lightning_bom_file/data/mytest.testtbl-schema.sql @@ -0,0 +1,5 @@ +CREATE TABLE testtbl ( + id INTEGER, + val1 VARCHAR(40) NOT NULL, + INDEX `idx_val1` (`val1`) +); diff --git a/br/tests/lightning_bom_file/data/mytest.testtbl.csv b/br/tests/lightning_bom_file/data/mytest.testtbl.csv new file mode 100644 index 0000000000000..e0931cce2a480 --- /dev/null +++ b/br/tests/lightning_bom_file/data/mytest.testtbl.csv @@ -0,0 +1,6 @@ +id,val1 +1,"aaa01" +2,"aaa01" +3,"aaa02" +4,"aaa02" +5,"aaa05" diff --git a/br/tests/lightning_bom_file/original_data/mytest.testtbl-schema.sql b/br/tests/lightning_bom_file/original_data/mytest.testtbl-schema.sql new file mode 100644 index 0000000000000..dc1e032a16618 --- /dev/null +++ b/br/tests/lightning_bom_file/original_data/mytest.testtbl-schema.sql @@ -0,0 +1,5 @@ +CREATE TABLE testtbl ( + id INTEGER, + val1 VARCHAR(40) NOT NULL, + INDEX `idx_val1` (`val1`) +); diff --git a/br/tests/lightning_bom_file/original_data/mytest.testtbl.csv b/br/tests/lightning_bom_file/original_data/mytest.testtbl.csv new file mode 100644 index 0000000000000..270c410cd79fd --- /dev/null +++ b/br/tests/lightning_bom_file/original_data/mytest.testtbl.csv @@ -0,0 +1,6 @@ +id,val1 +1,"aaa01" +2,"aaa01" +3,"aaa02" +4,"aaa02" +5,"aaa05" diff --git a/br/tests/lightning_bom_file/run.sh b/br/tests/lightning_bom_file/run.sh new file mode 100755 index 0000000000000..88eada54c74a9 --- /dev/null +++ b/br/tests/lightning_bom_file/run.sh @@ -0,0 +1,56 @@ +#!/bin/sh +# +# Copyright 2023 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux + +mydir=$(dirname "${BASH_SOURCE[0]}") + +original_schema_file="${mydir}/original_data/mytest.testtbl-schema.sql" +original_data_file="${mydir}/original_data/mytest.testtbl.csv" +schema_file="${original_schema_file/original_data/data}" +data_file="${original_data_file/original_data/data}" + +# add the BOM header +printf '\xEF\xBB\xBF' | cat - <( sed '1s/^\xEF\xBB\xBF//' "${original_schema_file}" ) > "${schema_file}" +printf '\xEF\xBB\xBF' | cat - <( sed '1s/^\xEF\xBB\xBF//' "${original_data_file}" ) > "${data_file}" + +# verify the BOM header +if ! grep -q $'^\xEF\xBB\xBF' "${schema_file}"; then + echo "schema file doesn't contain the BOM header" >&2 + exit 1 +fi + +if ! grep -q $'^\xEF\xBB\xBF' "${data_file}"; then + echo "data file doesn't contain the BOM header" >&2 + exit 1 +fi + +row_count=$( sed '1d' "${data_file}" | wc -l | xargs echo ) + +run_lightning --backend tidb + +# Check that everything is correctly imported +run_sql 'SELECT count(*) FROM mytest.testtbl' +check_contains "count(*): ${row_count}" + +check_cluster_version 4 0 0 'local backend' || exit 0 +run_sql "DROP TABLE mytest.testtbl" + +run_lightning --backend local + +# Check that everything is correctly imported +run_sql 'SELECT count(*) FROM mytest.testtbl' +check_contains "count(*): ${row_count}" diff --git a/build/BUILD.bazel b/build/BUILD.bazel index 23cf263d525e3..3c2a569dd80d8 100644 --- a/build/BUILD.bazel +++ b/build/BUILD.bazel @@ -55,6 +55,7 @@ STATICHECK_ANALYZERS = [ "S1039", "S1040", "SA1019", + "SA1029", "SA2000", "SA2001", "SA2003", diff --git a/build/linter/BUILD.bazel b/build/linter/BUILD.bazel new file mode 100644 index 0000000000000..e5407284430fa --- /dev/null +++ b/build/linter/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "linter", + srcs = ["linter.go"], + importpath = "github.com/pingcap/tidb/build/linter", + visibility = ["//visibility:public"], + deps = ["@com_github_apache_skywalking_eyes//pkg/config"], +) diff --git a/build/linter/linter.go b/build/linter/linter.go new file mode 100644 index 0000000000000..794a7c37039ef --- /dev/null +++ b/build/linter/linter.go @@ -0,0 +1,20 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linter + +import ( + // it is necessary to make skywalking-eye into gomod. + _ "github.com/apache/skywalking-eyes/pkg/config" +) diff --git a/build/nogo_config.json b/build/nogo_config.json index 0aa1bd67a2217..90a5b7cae853f 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -1,7 +1,7 @@ { "all_revive": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/rules_go_work-*": "ignore generated code", ".*_/testmain\\.go$": "ignore code" @@ -9,32 +9,32 @@ }, "asciicheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "br/pkg/lightning/web/res_vfsdata.go": "ignore code" } }, "asmdecl": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "assign": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "atomic": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "atomicalign": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, @@ -46,31 +46,31 @@ }, "bools": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "buildtag": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "printf": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "unreachable": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "composites": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "br/pkg/glue/console_glue_test.go": "ignore code", "br/pkg/restore/db_test.go": "ignore code", @@ -79,45 +79,45 @@ }, "copylocks": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo code" } }, "ctrlflow": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "deadcode": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "deepequalerrors": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "durationcheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", "/rules_go_work-*": "ignore generated code", ".*_generated\\.go$": "ignore generated code" } }, "errorsas": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "errcheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", ".*_test\\.go$": "ignore generated code", "util/logutil": "ignore util/logutil code", @@ -131,20 +131,20 @@ }, "exportloopref": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "filepermission": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", ".*_/testmain\\.go$": "ignore code" } }, "fieldalignment": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", ".*_/testmain\\.go$": "ignore code", ".*_test\\.go$": "ignore test code" @@ -188,13 +188,13 @@ }, "findcall": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "forcetypeassert": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" }, "only_files": { @@ -219,7 +219,7 @@ }, "gofmt": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo code", "/rules_go_work-*": "ignore generated code", @@ -230,7 +230,7 @@ "gci": { "exclude_files": { "external/": "no need to vet third party code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo code", ".*\\.pb\\.go$": "generated code", @@ -260,37 +260,37 @@ }, "httpresponse": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "ifaceassert": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "ineffassign": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "inspect": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "loopclosure": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "lostcancel": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, @@ -298,33 +298,33 @@ "exclude_files": { "/cgo/": "ignore cgo code", ".*_test\\.go$": "ignore generated code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "misspell": { "exclude_files": { "/cgo/": "ignore cgo code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "nilfunc": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "nilness": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo" } }, "noloopclosure": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" }, "only_files": { @@ -339,13 +339,13 @@ }, "pkgfact": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "revive": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", "GOROOT/": "ignore code", "/cgo/": "ignore cgo", "tools/": "ignore tool code", @@ -411,49 +411,49 @@ }, "shift": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "sortslice": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "stdmethods": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "stringintconv": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "structtag": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "testinggoroutine": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "tests": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "unconvert": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*\\.pb\\.go$": "generated code", "parser/parser.go": "generated code", "/cgo/": "no need to vet third party code for cgo", @@ -465,27 +465,27 @@ }, "unmarshal": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "unsafeptr": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/digester.go": "ignore code" } }, "unusedresult": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/digester_test.go": "ignore code" } }, "rowserrcheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "server/tidb_test.go": "ignore test code", "server/tidb_serial_test.go": "ignore test code", @@ -497,250 +497,250 @@ }, "S1000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1002": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1003": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1004": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1005": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1006": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1007": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1008": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1009": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1010": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1011": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1012": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1013": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1014": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1015": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1016": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1017": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1018": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1019": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/parser.go": "ignore code" } }, "S1020": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1021": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "tools/check/ut.go": "ignore code" } }, "S1022": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1023": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/parser.go": "ignore code" } }, "S1024": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1025": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1026": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1027": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1028": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1029": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1030": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1031": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1032": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1033": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1034": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1035": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1036": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1037": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1038": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1039": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1040": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/parser.go": "ignore generated code" } @@ -748,7 +748,7 @@ "SA1019": { "exclude_files": { "/build/": "no need to linter code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", ".*_test\\.go$": "ignore test code", "br/pkg/restore/split/client.go": "github.com/golang/protobuf deprecated", @@ -773,135 +773,142 @@ "meta": "meta code" } }, - "SA2000": { + "SA1029": { "exclude_files": { "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + ".*_test\\.go$": "ignore test code" + } + }, + "SA2000": { + "exclude_files": { + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA2001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA2003": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA3000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA3001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA4009": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5002": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5003": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5004": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5005": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5007": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5008": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5009": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5010": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5011": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5012": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA6000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA6001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA6005": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "prealloc": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/yy_parser.go": "ignore generated code", "/cgo/": "no need to vet third party code for cgo" @@ -909,7 +916,7 @@ }, "predeclared": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/yy_parser.go": "ignore generated code", "parser/parser.go": "ignore generated code", @@ -918,7 +925,7 @@ }, "U1000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } } diff --git a/build/patches/com_github_rivo_uniseg.patch b/build/patches/com_github_rivo_uniseg.patch deleted file mode 100644 index 43c2c40933b11..0000000000000 --- a/build/patches/com_github_rivo_uniseg.patch +++ /dev/null @@ -1,542 +0,0 @@ -From 1492043a155839cb863210d4f564be3fa640c0d9 Mon Sep 17 00:00:00 2001 -From: Weizhen Wang -Date: Sat, 8 Oct 2022 11:41:06 +0800 -Subject: [PATCH] update - -Signed-off-by: Weizhen Wang ---- - BUILD.bazel | 27 +++++ - WORKSPACE | 2 + - gen_breaktest.go | 213 -------------------------------------- - gen_properties.go | 256 ---------------------------------------------- - 4 files changed, 29 insertions(+), 469 deletions(-) - create mode 100644 BUILD.bazel - create mode 100644 WORKSPACE - delete mode 100644 gen_breaktest.go - delete mode 100644 gen_properties.go - -diff --git a/BUILD.bazel b/BUILD.bazel -new file mode 100644 -index 0000000..a1e5c89 ---- /dev/null -+++ b/BUILD.bazel -@@ -0,0 +1,27 @@ -+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") -+ -+go_library( -+ name = "uniseg", -+ srcs = [ -+ "doc.go", -+ "eastasianwidth.go", -+ "emojipresentation.go", -+ "grapheme.go", -+ "graphemeproperties.go", -+ "graphemerules.go", -+ "line.go", -+ "lineproperties.go", -+ "linerules.go", -+ "properties.go", -+ "sentence.go", -+ "sentenceproperties.go", -+ "sentencerules.go", -+ "step.go", -+ "width.go", -+ "word.go", -+ "wordproperties.go", -+ "wordrules.go", -+ ], -+ importpath = "github.com/rivo/uniseg", -+ visibility = ["//visibility:public"], -+) -diff --git a/WORKSPACE b/WORKSPACE -new file mode 100644 -index 0000000..d596273 ---- /dev/null -+++ b/WORKSPACE -@@ -0,0 +1,2 @@ -+# DO NOT EDIT: automatically generated WORKSPACE file for go_repository rule -+workspace(name = "com_github_rivo_uniseg") -diff --git a/gen_breaktest.go b/gen_breaktest.go -deleted file mode 100644 -index e613c4c..0000000 ---- a/gen_breaktest.go -+++ /dev/null -@@ -1,213 +0,0 @@ --//go:build generate -- --// This program generates a Go containing a slice of test cases based on the --// Unicode Character Database auxiliary data files. The command line arguments --// are as follows: --// --// 1. The name of the Unicode data file (just the filename, without extension). --// 2. The name of the locally generated Go file. --// 3. The name of the slice containing the test cases. --// 4. The name of the generator, for logging purposes. --// --//go:generate go run gen_breaktest.go GraphemeBreakTest graphemebreak_test.go graphemeBreakTestCases graphemes --//go:generate go run gen_breaktest.go WordBreakTest wordbreak_test.go wordBreakTestCases words --//go:generate go run gen_breaktest.go SentenceBreakTest sentencebreak_test.go sentenceBreakTestCases sentences --//go:generate go run gen_breaktest.go LineBreakTest linebreak_test.go lineBreakTestCases lines -- --package main -- --import ( -- "bufio" -- "bytes" -- "errors" -- "fmt" -- "go/format" -- "io/ioutil" -- "log" -- "net/http" -- "os" -- "time" --) -- --// We want to test against a specific version rather than the latest. When the --// package is upgraded to a new version, change these to generate new tests. --const ( -- testCaseURL = `https://www.unicode.org/Public/14.0.0/ucd/auxiliary/%s.txt` --) -- --func main() { -- if len(os.Args) < 5 { -- fmt.Println("Not enough arguments, see code for details") -- os.Exit(1) -- } -- -- log.SetPrefix("gen_breaktest (" + os.Args[4] + "): ") -- log.SetFlags(0) -- -- // Read text of testcases and parse into Go source code. -- src, err := parse(fmt.Sprintf(testCaseURL, os.Args[1])) -- if err != nil { -- log.Fatal(err) -- } -- -- // Format the Go code. -- formatted, err := format.Source(src) -- if err != nil { -- log.Fatalln("gofmt:", err) -- } -- -- // Write it out. -- log.Print("Writing to ", os.Args[2]) -- if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil { -- log.Fatal(err) -- } --} -- --// parse reads a break text file, either from a local file or from a URL. It --// parses the file data into Go source code representing the test cases. --func parse(url string) ([]byte, error) { -- log.Printf("Parsing %s", url) -- res, err := http.Get(url) -- if err != nil { -- return nil, err -- } -- body := res.Body -- defer body.Close() -- -- buf := new(bytes.Buffer) -- buf.Grow(120 << 10) -- buf.WriteString(`package uniseg -- --// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. -- --// ` + os.Args[3] + ` are Grapheme testcases taken from --// ` + url + ` --// on ` + time.Now().Format("January 2, 2006") + `. See --// https://www.unicode.org/license.html for the Unicode license agreement. --var ` + os.Args[3] + ` = []testCase { --`) -- -- sc := bufio.NewScanner(body) -- num := 1 -- var line []byte -- original := make([]byte, 0, 64) -- expected := make([]byte, 0, 64) -- for sc.Scan() { -- num++ -- line = sc.Bytes() -- if len(line) == 0 || line[0] == '#' { -- continue -- } -- var comment []byte -- if i := bytes.IndexByte(line, '#'); i >= 0 { -- comment = bytes.TrimSpace(line[i+1:]) -- line = bytes.TrimSpace(line[:i]) -- } -- original, expected, err := parseRuneSequence(line, original[:0], expected[:0]) -- if err != nil { -- return nil, fmt.Errorf(`line %d: %v: %q`, num, err, line) -- } -- fmt.Fprintf(buf, "\t{original: \"%s\", expected: %s}, // %s\n", original, expected, comment) -- } -- if err := sc.Err(); err != nil { -- return nil, err -- } -- -- // Check for final "# EOF", useful check if we're streaming via HTTP -- if !bytes.Equal(line, []byte("# EOF")) { -- return nil, fmt.Errorf(`line %d: exected "# EOF" as final line, got %q`, num, line) -- } -- buf.WriteString("}\n") -- return buf.Bytes(), nil --} -- --// Used by parseRuneSequence to match input via bytes.HasPrefix. --var ( -- prefixBreak = []byte("÷ ") -- prefixDontBreak = []byte("× ") -- breakOk = []byte("÷") -- breakNo = []byte("×") --) -- --// parseRuneSequence parses a rune + breaking opportunity sequence from b --// and appends the Go code for testcase.original to orig --// and appends the Go code for testcase.expected to exp. --// It retuns the new orig and exp slices. --// --// E.g. for the input b="÷ 0020 × 0308 ÷ 1F1E6 ÷" --// it will append --// "\u0020\u0308\U0001F1E6" --// and "[][]rune{{0x0020,0x0308},{0x1F1E6},}" --// to orig and exp respectively. --// --// The formatting of exp is expected to be cleaned up by gofmt or format.Source. --// Note we explicitly require the sequence to start with ÷ and we implicitly --// require it to end with ÷. --func parseRuneSequence(b, orig, exp []byte) ([]byte, []byte, error) { -- // Check for and remove first ÷ or ×. -- if !bytes.HasPrefix(b, prefixBreak) && !bytes.HasPrefix(b, prefixDontBreak) { -- return nil, nil, errors.New("expected ÷ or × as first character") -- } -- if bytes.HasPrefix(b, prefixBreak) { -- b = b[len(prefixBreak):] -- } else { -- b = b[len(prefixDontBreak):] -- } -- -- boundary := true -- exp = append(exp, "[][]rune{"...) -- for len(b) > 0 { -- if boundary { -- exp = append(exp, '{') -- } -- exp = append(exp, "0x"...) -- // Find end of hex digits. -- var i int -- for i = 0; i < len(b) && b[i] != ' '; i++ { -- if d := b[i]; ('0' <= d || d <= '9') || -- ('A' <= d || d <= 'F') || -- ('a' <= d || d <= 'f') { -- continue -- } -- return nil, nil, errors.New("bad hex digit") -- } -- switch i { -- case 4: -- orig = append(orig, "\\u"...) -- case 5: -- orig = append(orig, "\\U000"...) -- default: -- return nil, nil, errors.New("unsupport code point hex length") -- } -- orig = append(orig, b[:i]...) -- exp = append(exp, b[:i]...) -- b = b[i:] -- -- // Check for space between hex and ÷ or ×. -- if len(b) < 1 || b[0] != ' ' { -- return nil, nil, errors.New("bad input") -- } -- b = b[1:] -- -- // Check for next boundary. -- switch { -- case bytes.HasPrefix(b, breakOk): -- boundary = true -- b = b[len(breakOk):] -- case bytes.HasPrefix(b, breakNo): -- boundary = false -- b = b[len(breakNo):] -- default: -- return nil, nil, errors.New("missing ÷ or ×") -- } -- if boundary { -- exp = append(exp, '}') -- } -- exp = append(exp, ',') -- if len(b) > 0 && b[0] == ' ' { -- b = b[1:] -- } -- } -- exp = append(exp, '}') -- return orig, exp, nil --} -diff --git a/gen_properties.go b/gen_properties.go -deleted file mode 100644 -index 999d5ef..0000000 ---- a/gen_properties.go -+++ /dev/null -@@ -1,256 +0,0 @@ --//go:build generate -- --// This program generates a property file in Go file from Unicode Character --// Database auxiliary data files. The command line arguments are as follows: --// --// 1. The name of the Unicode data file (just the filename, without extension). --// Can be "-" (to skip) if the emoji flag is included. --// 2. The name of the locally generated Go file. --// 3. The name of the slice mapping code points to properties. --// 4. The name of the generator, for logging purposes. --// 5. (Optional) Flags, comma-separated. The following flags are available: --// - "emojis=": include the specified emoji properties (e.g. --// "Extended_Pictographic"). --// - "gencat": include general category properties. --// --//go:generate go run gen_properties.go auxiliary/GraphemeBreakProperty graphemeproperties.go graphemeCodePoints graphemes emojis=Extended_Pictographic --//go:generate go run gen_properties.go auxiliary/WordBreakProperty wordproperties.go workBreakCodePoints words emojis=Extended_Pictographic --//go:generate go run gen_properties.go auxiliary/SentenceBreakProperty sentenceproperties.go sentenceBreakCodePoints sentences --//go:generate go run gen_properties.go LineBreak lineproperties.go lineBreakCodePoints lines gencat --//go:generate go run gen_properties.go EastAsianWidth eastasianwidth.go eastAsianWidth eastasianwidth --//go:generate go run gen_properties.go - emojipresentation.go emojiPresentation emojipresentation emojis=Emoji_Presentation --package main -- --import ( -- "bufio" -- "bytes" -- "errors" -- "fmt" -- "go/format" -- "io/ioutil" -- "log" -- "net/http" -- "os" -- "regexp" -- "sort" -- "strconv" -- "strings" -- "time" --) -- --// We want to test against a specific version rather than the latest. When the --// package is upgraded to a new version, change these to generate new tests. --const ( -- propertyURL = `https://www.unicode.org/Public/14.0.0/ucd/%s.txt` -- emojiURL = `https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt` --) -- --// The regular expression for a line containing a code point range property. --var propertyPattern = regexp.MustCompile(`^([0-9A-F]{4,6})(\.\.([0-9A-F]{4,6}))?\s*;\s*([A-Za-z0-9_]+)\s*#\s(.+)$`) -- --func main() { -- if len(os.Args) < 5 { -- fmt.Println("Not enough arguments, see code for details") -- os.Exit(1) -- } -- -- log.SetPrefix("gen_properties (" + os.Args[4] + "): ") -- log.SetFlags(0) -- -- // Parse flags. -- flags := make(map[string]string) -- if len(os.Args) >= 6 { -- for _, flag := range strings.Split(os.Args[5], ",") { -- flagFields := strings.Split(flag, "=") -- if len(flagFields) == 1 { -- flags[flagFields[0]] = "yes" -- } else { -- flags[flagFields[0]] = flagFields[1] -- } -- } -- } -- -- // Parse the text file and generate Go source code from it. -- _, includeGeneralCategory := flags["gencat"] -- var mainURL string -- if os.Args[1] != "-" { -- mainURL = fmt.Sprintf(propertyURL, os.Args[1]) -- } -- src, err := parse(mainURL, flags["emojis"], includeGeneralCategory) -- if err != nil { -- log.Fatal(err) -- } -- -- // Format the Go code. -- formatted, err := format.Source([]byte(src)) -- if err != nil { -- log.Fatal("gofmt:", err) -- } -- -- // Save it to the (local) target file. -- log.Print("Writing to ", os.Args[2]) -- if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil { -- log.Fatal(err) -- } --} -- --// parse parses the Unicode Properties text files located at the given URLs and --// returns their equivalent Go source code to be used in the uniseg package. If --// "emojiProperty" is not an empty string, emoji code points for that emoji --// property (e.g. "Extended_Pictographic") will be included. In those cases, you --// may pass an empty "propertyURL" to skip parsing the main properties file. If --// "includeGeneralCategory" is true, the Unicode General Category property will --// be extracted from the comments and included in the output. --func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (string, error) { -- if propertyURL == "" && emojiProperty == "" { -- return "", errors.New("no properties to parse") -- } -- -- // Temporary buffer to hold properties. -- var properties [][4]string -- -- // Open the first URL. -- if propertyURL != "" { -- log.Printf("Parsing %s", propertyURL) -- res, err := http.Get(propertyURL) -- if err != nil { -- return "", err -- } -- in1 := res.Body -- defer in1.Close() -- -- // Parse it. -- scanner := bufio.NewScanner(in1) -- num := 0 -- for scanner.Scan() { -- num++ -- line := strings.TrimSpace(scanner.Text()) -- -- // Skip comments and empty lines. -- if strings.HasPrefix(line, "#") || line == "" { -- continue -- } -- -- // Everything else must be a code point range, a property and a comment. -- from, to, property, comment, err := parseProperty(line) -- if err != nil { -- return "", fmt.Errorf("%s line %d: %v", os.Args[4], num, err) -- } -- properties = append(properties, [4]string{from, to, property, comment}) -- } -- if err := scanner.Err(); err != nil { -- return "", err -- } -- } -- -- // Open the second URL. -- if emojiProperty != "" { -- log.Printf("Parsing %s", emojiURL) -- res, err := http.Get(emojiURL) -- if err != nil { -- return "", err -- } -- in2 := res.Body -- defer in2.Close() -- -- // Parse it. -- scanner := bufio.NewScanner(in2) -- num := 0 -- for scanner.Scan() { -- num++ -- line := scanner.Text() -- -- // Skip comments, empty lines, and everything not containing -- // "Extended_Pictographic". -- if strings.HasPrefix(line, "#") || line == "" || !strings.Contains(line, emojiProperty) { -- continue -- } -- -- // Everything else must be a code point range, a property and a comment. -- from, to, property, comment, err := parseProperty(line) -- if err != nil { -- return "", fmt.Errorf("emojis line %d: %v", num, err) -- } -- properties = append(properties, [4]string{from, to, property, comment}) -- } -- if err := scanner.Err(); err != nil { -- return "", err -- } -- } -- -- // Sort properties. -- sort.Slice(properties, func(i, j int) bool { -- left, _ := strconv.ParseUint(properties[i][0], 16, 64) -- right, _ := strconv.ParseUint(properties[j][0], 16, 64) -- return left < right -- }) -- -- // Header. -- var ( -- buf bytes.Buffer -- emojiComment string -- ) -- columns := 3 -- if includeGeneralCategory { -- columns = 4 -- } -- if emojiURL != "" { -- emojiComment = ` --// and --// ` + emojiURL + ` --// ("Extended_Pictographic" only)` -- } -- buf.WriteString(`package uniseg -- --// Code generated via go generate from gen_properties.go. DO NOT EDIT. -- --// ` + os.Args[3] + ` are taken from --// ` + propertyURL + emojiComment + ` --// on ` + time.Now().Format("January 2, 2006") + `. See https://www.unicode.org/license.html for the Unicode --// license agreement. --var ` + os.Args[3] + ` = [][` + strconv.Itoa(columns) + `]int{ -- `) -- -- // Properties. -- for _, prop := range properties { -- if includeGeneralCategory { -- generalCategory := "gc" + prop[3][:2] -- if generalCategory == "gcL&" { -- generalCategory = "gcLC" -- } -- prop[3] = prop[3][3:] -- fmt.Fprintf(&buf, "{0x%s,0x%s,%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), generalCategory, prop[3]) -- } else { -- fmt.Fprintf(&buf, "{0x%s,0x%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), prop[3]) -- } -- } -- -- // Tail. -- buf.WriteString("}") -- -- return buf.String(), nil --} -- --// parseProperty parses a line of the Unicode properties text file containing a --// property for a code point range and returns it along with its comment. --func parseProperty(line string) (from, to, property, comment string, err error) { -- fields := propertyPattern.FindStringSubmatch(line) -- if fields == nil { -- err = errors.New("no property found") -- return -- } -- from = fields[1] -- to = fields[3] -- if to == "" { -- to = from -- } -- property = fields[4] -- comment = fields[5] -- return --} -- --// translateProperty translates a property name as used in the Unicode data file --// to a variable used in the Go code. --func translateProperty(prefix, property string) string { -- return prefix + strings.ReplaceAll(property, "_", "") --} --- -2.38.0 - diff --git a/cmd/pluginpkg/pluginpkg b/cmd/pluginpkg/pluginpkg deleted file mode 100755 index 9da90a4758831..0000000000000 Binary files a/cmd/pluginpkg/pluginpkg and /dev/null differ diff --git a/ddl/BUILD.bazel b/ddl/BUILD.bazel index 32387949c14c3..d3636a9d64b2c 100644 --- a/ddl/BUILD.bazel +++ b/ddl/BUILD.bazel @@ -149,7 +149,6 @@ go_test( srcs = [ "attributes_sql_test.go", "backfilling_test.go", - "callback_test.go", "cancel_test.go", "cluster_test.go", "column_change_test.go", @@ -176,7 +175,6 @@ go_test( "foreign_key_test.go", "index_change_test.go", "index_cop_test.go", - "index_merge_tmp_test.go", "index_modify_test.go", "integration_test.go", "job_table_test.go", @@ -210,7 +208,7 @@ go_test( deps = [ "//autoid_service", "//config", - "//ddl/ingest", + "//ddl/internal/callback", "//ddl/placement", "//ddl/resourcegroup", "//ddl/schematracker", diff --git a/ddl/backfilling.go b/ddl/backfilling.go index aae3a9b75790e..5036e56c096d8 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -489,7 +489,7 @@ func (w *backfillWorker) run(d *ddlCtx, bf backfiller, job *model.Job) { // splitTableRanges uses PD region's key ranges to split the backfilling table key range space, // to speed up backfilling data in table with disperse handle. // The `t` should be a non-partitioned table or a partition. -func splitTableRanges(t table.PhysicalTable, store kv.Storage, startKey, endKey kv.Key) ([]kv.KeyRange, error) { +func splitTableRanges(t table.PhysicalTable, store kv.Storage, startKey, endKey kv.Key, limit int) ([]kv.KeyRange, error) { logutil.BgLogger().Info("[ddl] split table range from PD", zap.Int64("physicalTableID", t.GetPhysicalID()), zap.String("start key", hex.EncodeToString(startKey)), @@ -504,7 +504,7 @@ func splitTableRanges(t table.PhysicalTable, store kv.Storage, startKey, endKey maxSleep := 10000 // ms bo := backoff.NewBackofferWithVars(context.Background(), maxSleep, nil) rc := copr.NewRegionCache(s.GetRegionCache()) - ranges, err := rc.SplitRegionRanges(bo, []kv.KeyRange{kvRange}) + ranges, err := rc.SplitRegionRanges(bo, []kv.KeyRange{kvRange}, limit) if err != nil { return nil, errors.Trace(err) } @@ -981,7 +981,7 @@ func (dc *ddlCtx) writePhysicalTableRecord(sessPool *sessionPool, t table.Physic } for { - kvRanges, err := splitTableRanges(t, reorgInfo.d.store, startKey, endKey) + kvRanges, err := splitTableRanges(t, reorgInfo.d.store, startKey, endKey, backfillTaskChanSize) if err != nil { return errors.Trace(err) } @@ -1093,7 +1093,7 @@ func (*ddlCtx) splitTableToBackfillJobs(sess *session, reorgInfo *reorgInfo, pTb isFirstOps := true bJobs := make([]*BackfillJob, 0, genTaskBatch) for { - kvRanges, err := splitTableRanges(pTbl, reorgInfo.d.store, startKey, endKey) + kvRanges, err := splitTableRanges(pTbl, reorgInfo.d.store, startKey, endKey, genTaskBatch) if err != nil { return errors.Trace(err) } diff --git a/ddl/cancel_test.go b/ddl/cancel_test.go index 3a5c461ad8461..3f02029ffced7 100644 --- a/ddl/cancel_test.go +++ b/ddl/cancel_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" @@ -264,7 +265,7 @@ func TestCancel(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockBackfillSlow")) }() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} i := atomicutil.NewInt64(0) cancel := atomicutil.NewBool(false) cancelResult := atomicutil.NewBool(false) @@ -282,12 +283,12 @@ func TestCancel(t *testing.T) { } dom.DDL().SetHook(hook.Clone()) - restHook := func(h *ddl.TestDDLCallback) { + restHook := func(h *callback.TestDDLCallback) { h.OnJobRunBeforeExported = nil h.OnJobUpdatedExported.Store(nil) dom.DDL().SetHook(h.Clone()) } - registHook := func(h *ddl.TestDDLCallback, onJobRunBefore bool) { + registHook := func(h *callback.TestDDLCallback, onJobRunBefore bool) { if onJobRunBefore { h.OnJobRunBeforeExported = hookFunc } else { diff --git a/ddl/cluster_test.go b/ddl/cluster_test.go index e2a4302e044ce..55c780d55e536 100644 --- a/ddl/cluster_test.go +++ b/ddl/cluster_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/meta" @@ -84,7 +85,7 @@ func TestFlashbackCloseAndResetPDSchedule(t *testing.T) { defer resetGC() tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionFlashbackCluster, job.Type) if job.SchemaState == model.StateWriteReorganization { @@ -136,7 +137,7 @@ func TestAddDDLDuringFlashback(t *testing.T) { defer resetGC() tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionFlashbackCluster, job.Type) if job.SchemaState == model.StateWriteOnly { @@ -175,7 +176,7 @@ func TestGlobalVariablesOnFlashback(t *testing.T) { defer resetGC() tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionFlashbackCluster, job.Type) if job.SchemaState == model.StateWriteReorganization { diff --git a/ddl/column_change_test.go b/ddl/column_change_test.go index be393dd488668..76a3b377a5abe 100644 --- a/ddl/column_change_test.go +++ b/ddl/column_change_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" @@ -48,7 +49,7 @@ func TestColumnAdd(t *testing.T) { tk.MustExec("insert t values (1, 2);") d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} ct := testNewContext(store) // set up hook @@ -149,7 +150,7 @@ func TestModifyAutoRandColumnWithMetaKeyChanged(t *testing.T) { tk.MustExec("create table t (a bigint primary key clustered AUTO_RANDOM(5));") d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} var errCount int32 = 3 var genAutoRandErr error @@ -457,7 +458,7 @@ func TestIssue40135(t *testing.T) { tk.MustExec("CREATE TABLE t40135 ( a tinyint DEFAULT NULL, b varchar(32) DEFAULT 'md') PARTITION BY HASH (a) PARTITIONS 2") one := true - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if one { diff --git a/ddl/column_modify_test.go b/ddl/column_modify_test.go index 658039c1092a4..7f3125511c587 100644 --- a/ddl/column_modify_test.go +++ b/ddl/column_modify_test.go @@ -24,7 +24,7 @@ import ( "time" "github.com/pingcap/errors" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" testddlutil "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -664,7 +664,7 @@ func TestTransactionWithWriteOnlyColumn(t *testing.T) { }, } - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -872,7 +872,7 @@ func TestAddGeneratedColumnAndInsert(t *testing.T) { tk1.MustExec("use test") d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} ctx := mock.NewContext() ctx.Store = store times := 0 @@ -916,7 +916,7 @@ func TestColumnTypeChangeGenUniqueChangingName(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} var checkErr error assertChangingColName := "_col$_c2_0" assertChangingIdxName := "_idx$_idx_0" diff --git a/ddl/column_test.go b/ddl/column_test.go index e6c48b1121595..d378c03e297b5 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/pingcap/errors" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -672,7 +672,7 @@ func TestAddColumn(t *testing.T) { checkOK := false - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { if checkOK { return @@ -740,7 +740,7 @@ func TestAddColumns(t *testing.T) { err = txn.Commit(context.Background()) require.NoError(t, err) - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { mu.Lock() defer mu.Unlock() @@ -810,7 +810,7 @@ func TestDropColumnInColumnTest(t *testing.T) { var mu sync.Mutex d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { mu.Lock() defer mu.Unlock() @@ -872,7 +872,7 @@ func TestDropColumns(t *testing.T) { var mu sync.Mutex d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { mu.Lock() defer mu.Unlock() @@ -998,7 +998,7 @@ func TestWriteDataWriteOnlyMode(t *testing.T) { originalCallback := dom.DDL().GetHook() defer dom.DDL().SetHook(originalCallback) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { return @@ -1009,7 +1009,7 @@ func TestWriteDataWriteOnlyMode(t *testing.T) { dom.DDL().SetHook(hook) tk.MustExec("alter table t change column `col1` `col1` varchar(20)") - hook = &ddl.TestDDLCallback{Do: dom} + hook = &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { return diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go index 308a815773ce9..0aa303c984398 100644 --- a/ddl/column_type_change_test.go +++ b/ddl/column_type_change_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -121,7 +122,7 @@ func TestColumnTypeChangeStateBetweenInteger(t *testing.T) { require.Equal(t, 2, len(tbl.Cols())) require.NotNil(t, external.GetModifyColumn(t, tk, "test", "t", "c2", false)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -185,7 +186,7 @@ func TestRollbackColumnTypeChangeBetweenInteger(t *testing.T) { require.Equal(t, 2, len(tbl.Cols())) require.NotNil(t, external.GetModifyColumn(t, tk, "test", "t", "c2", false)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} // Mock roll back at model.StateNone. customizeHookRollbackAtState(hook, tbl, model.StateNone) dom.DDL().SetHook(hook) @@ -217,7 +218,7 @@ func TestRollbackColumnTypeChangeBetweenInteger(t *testing.T) { assertRollBackedColUnchanged(t, tk) } -func customizeHookRollbackAtState(hook *ddl.TestDDLCallback, tbl table.Table, state model.SchemaState) { +func customizeHookRollbackAtState(hook *callback.TestDDLCallback, tbl table.Table, state model.SchemaState) { hook.OnJobRunBeforeExported = func(job *model.Job) { if tbl.Meta().ID != job.TableID { return @@ -934,7 +935,7 @@ func TestColumnTypeChangeIgnoreDisplayLength(t *testing.T) { assertHasAlterWriteReorg := func(tbl table.Table) { // Restore assertResult to false. assertResult = false - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if tbl.Meta().ID != job.TableID { return @@ -1600,7 +1601,7 @@ func TestChangingColOriginDefaultValue(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") originalHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var ( once bool checkErr error @@ -1679,7 +1680,7 @@ func TestChangingColOriginDefaultValueAfterAddColAndCastSucc(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") originalHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var ( once bool checkErr error @@ -1764,7 +1765,7 @@ func TestChangingColOriginDefaultValueAfterAddColAndCastFail(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") originalHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -1893,7 +1894,7 @@ func TestDDLExitWhenCancelMeetPanic(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockExceedErrorLimit")) }() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var jobID int64 hook.OnJobRunBeforeExported = func(job *model.Job) { if jobID != 0 { @@ -1968,7 +1969,7 @@ func TestCancelCTCInReorgStateWillCauseGoroutineLeak(t *testing.T) { tk.MustExec("insert into ctc_goroutine_leak values(1),(2),(3)") tbl := external.GetTableByName(t, tk, "test", "ctc_goroutine_leak") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var jobID int64 hook.OnJobRunBeforeExported = func(job *model.Job) { if jobID != 0 { @@ -2210,7 +2211,7 @@ func TestCastDateToTimestampInReorgAttribute(t *testing.T) { var checkErr1 error var checkErr2 error - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr1 != nil || checkErr2 != nil || tbl.Meta().ID != job.TableID { return diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index da49688ccc608..2dc12c6cda9e8 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/executor" @@ -74,7 +75,7 @@ func TestShowCreateTable(t *testing.T) { "CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL,\n `c` varchar(1) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"}, } prevState := model.StateNone - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} currTestCaseOffset := 0 onJobUpdatedExportedFunc := func(job *model.Job) { if job.SchemaState == prevState || checkErr != nil { @@ -143,7 +144,7 @@ func TestDropNotNullColumn(t *testing.T) { var checkErr error d := dom.DDL() originalCallback := d.GetHook() - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} sqlNum := 0 onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { @@ -222,7 +223,7 @@ func TestTwoStates(t *testing.T) { key(c1, c2))`) tk.MustExec("insert into t values(1, 'a', 'N', '2017-07-01')") - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} prevState := model.StateNone require.NoError(t, testInfo.parseSQLs(parser.New())) @@ -809,7 +810,7 @@ func runTestInSchemaState( // Make sure these SQLs use the plan of index scan. tk.MustExec("drop stats t") - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} prevState := model.StateNone var checkErr error se, err := session.CreateSession(store) @@ -872,7 +873,7 @@ func TestShowIndex(t *testing.T) { tk.MustExec("use test_db_state") tk.MustExec(`create table t(c1 int primary key nonclustered, c2 int)`) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} prevState := model.StateNone showIndexSQL := `show index from t` var checkErr error @@ -1325,7 +1326,7 @@ func TestParallelAlterAndDropSchema(t *testing.T) { } func prepareTestControlParallelExecSQL(t *testing.T, store kv.Storage, dom *domain.Domain) (*testkit.TestKit, *testkit.TestKit, chan struct{}, ddl.Callback) { - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} times := 0 callback.OnJobRunBeforeExported = func(job *model.Job) { if times != 0 { @@ -1433,7 +1434,7 @@ func dbChangeTestParallelExecSQL(t *testing.T, store kv.Storage, dom *domain.Dom var err2, err3 error var wg util.WaitGroupWrapper - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} once := sync.Once{} onJobUpdatedExportedFunc := func(job *model.Job) { // sleep a while, let other job enqueue. @@ -1531,7 +1532,7 @@ func TestParallelDDLBeforeRunDDLJob(t *testing.T) { tk2 := testkit.NewTestKit(t, store) tk2.MustExec("use test_db_state") - intercept := &ddl.TestInterceptor{} + intercept := &callback.TestInterceptor{} var sessionToStart sync.WaitGroup // sessionToStart is a waitgroup to wait for two session to get the same information schema sessionToStart.Add(2) @@ -1574,7 +1575,7 @@ func TestParallelDDLBeforeRunDDLJob(t *testing.T) { wg.Wait() - intercept = &ddl.TestInterceptor{} + intercept = &callback.TestInterceptor{} d.(ddl.DDLForTest).SetInterceptor(intercept) } @@ -1666,7 +1667,7 @@ func TestCreateExpressionIndex(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { return @@ -1741,7 +1742,7 @@ func TestCreateUniqueExpressionIndex(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { return @@ -1850,7 +1851,7 @@ func TestDropExpressionIndex(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { return @@ -1922,7 +1923,7 @@ func TestParallelRenameTable(t *testing.T) { d2 := dom.DDL() originalCallback := d2.GetHook() defer d2.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} callback.OnJobRunBeforeExported = func(job *model.Job) { switch job.SchemaState { case model.StateNone: @@ -2037,7 +2038,7 @@ func TestConcurrentSetDefaultValue(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} skip := false callback.OnJobRunBeforeExported = func(job *model.Job) { switch job.SchemaState { diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 1d482f8cecada..16d3d7a2478e1 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/errors" _ "github.com/pingcap/tidb/autoid_service" "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/schematracker" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -687,7 +687,7 @@ func TestUpdateMultipleTable(t *testing.T) { tk2.MustExec("use test") d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { if job.SchemaState == model.StateWriteOnly { tk2.MustExec("update t1, t2 set t1.c1 = 8, t2.c2 = 10 where t1.c2 = t2.c1") diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index c6f50fcf71874..97cf16ed694bc 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -2447,7 +2448,7 @@ func TestExchangePartitionHook(t *testing.T) { tk.MustExec(`insert into pt values (0), (4), (7)`) tk.MustExec("insert into nt values (1)") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} dom.DDL().SetHook(hook) hookFunc := func(job *model.Job) { @@ -3733,7 +3734,7 @@ func TestTruncatePartitionMultipleTimes(t *testing.T) { dom := domain.GetDomain(tk.Session()) originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} dom.DDL().SetHook(hook) injected := false hook.OnJobRunBeforeExported = func(job *model.Job) { @@ -4543,7 +4544,7 @@ func TestIssue40135Ver2(t *testing.T) { tk.MustExec("CREATE TABLE t40135 ( a int DEFAULT NULL, b varchar(32) DEFAULT 'md', index(a)) PARTITION BY HASH (a) PARTITIONS 6") tk.MustExec("insert into t40135 values (1, 'md'), (2, 'ma'), (3, 'md'), (4, 'ma'), (5, 'md'), (6, 'ma')") one := true - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error var wg sync.WaitGroup wg.Add(1) diff --git a/ddl/db_table_test.go b/ddl/db_table_test.go index 7725eaf981a13..f4952e2e1d483 100644 --- a/ddl/db_table_test.go +++ b/ddl/db_table_test.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" testddlutil "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -185,7 +186,7 @@ func TestTransactionOnAddDropColumn(t *testing.T) { originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -888,7 +889,7 @@ func TestAddColumn2(t *testing.T) { originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var writeOnlyTable table.Table hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState == model.StateWriteOnly { diff --git a/ddl/db_test.go b/ddl/db_test.go index 46cfe301ec4f4..4bfe194d5c626 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" ddlutil "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -269,7 +270,7 @@ func TestIssue22307(t *testing.T) { tk.MustExec("create table t (a int, b int)") tk.MustExec("insert into t values(1, 1);") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr1, checkErr2 error hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { @@ -570,7 +571,7 @@ func TestAddExpressionIndexRollback(t *testing.T) { tk1.MustExec("use test") d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var currJob *model.Job ctx := mock.NewContext() ctx.Store = store @@ -958,7 +959,7 @@ func TestDDLJobErrorCount(t *testing.T) { }() var jobID int64 - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { jobID = job.ID } @@ -1090,7 +1091,7 @@ func TestCancelJobWriteConflict(t *testing.T) { var cancelErr error var rs []sqlexec.RecordSet - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} d := dom.DDL() originalHook := d.GetHook() d.SetHook(hook) @@ -1503,7 +1504,7 @@ func TestDDLBlockedCreateView(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t(a int)") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} first := true hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { @@ -1528,7 +1529,7 @@ func TestHashPartitionAddColumn(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t(a int, b int) partition by hash(a) partitions 4") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { return @@ -1551,7 +1552,7 @@ func TestSetInvalidDefaultValueAfterModifyColumn(t *testing.T) { var wg sync.WaitGroup var checkErr error one := false - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateDeleteOnly { return @@ -1588,7 +1589,7 @@ func TestMDLTruncateTable(t *testing.T) { var wg sync.WaitGroup - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} wg.Add(2) var timetk2 time.Time var timetk3 time.Time diff --git a/ddl/ddl.go b/ddl/ddl.go index 7019146661ab4..0d2e63fc7d6e1 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -21,7 +21,6 @@ package ddl import ( "context" "encoding/json" - "flag" "fmt" "runtime" "strconv" @@ -1249,12 +1248,6 @@ var ( RunInGoTest bool ) -func init() { - if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { - RunInGoTest = true - } -} - // GetDropOrTruncateTableInfoFromJobsByStore implements GetDropOrTruncateTableInfoFromJobs func GetDropOrTruncateTableInfoFromJobsByStore(jobs []*model.Job, gcSafePoint uint64, getTable func(uint64, int64, int64) (*model.TableInfo, error), fn func(*model.Job, *model.TableInfo) (bool, error)) (bool, error) { for _, job := range jobs { diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index e07d1661f7d99..0471740d41ddf 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -97,7 +98,7 @@ func TestParallelDDL(t *testing.T) { // set hook to execute jobs after all jobs are in queue. jobCnt := 11 - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} once := sync.Once{} var checkErr error tc.OnJobRunBeforeExported = func(job *model.Job) { diff --git a/ddl/fail_test.go b/ddl/fail_test.go index 39437b43a2b73..3c4ca0769bc1e 100644 --- a/ddl/fail_test.go +++ b/ddl/fail_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" @@ -38,7 +38,7 @@ func TestFailBeforeDecodeArgs(t *testing.T) { tableID = int64(tableIDi) d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} first := true stateCnt := 0 diff --git a/ddl/foreign_key_test.go b/ddl/foreign_key_test.go index 627c924b21871..032adfb296120 100644 --- a/ddl/foreign_key_test.go +++ b/ddl/foreign_key_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" @@ -125,7 +126,7 @@ func TestForeignKey(t *testing.T) { var mu sync.Mutex checkOK := false var hookErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if job.State != model.JobStateDone { return @@ -167,7 +168,7 @@ func TestForeignKey(t *testing.T) { checkOK = false mu.Unlock() // fix data race pr/#9491 - tc2 := &ddl.TestDDLCallback{} + tc2 := &callback.TestDDLCallback{} onJobUpdatedExportedFunc2 := func(job *model.Job) { if job.State != model.JobStateDone { return @@ -224,7 +225,7 @@ func TestTruncateOrDropTableWithForeignKeyReferred2(t *testing.T) { var wg sync.WaitGroup var truncateErr, dropErr error testTruncate := true - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateNone { return @@ -280,7 +281,7 @@ func TestDropIndexNeededInForeignKey2(t *testing.T) { var wg sync.WaitGroup var dropErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StatePublic || job.Type != model.ActionDropIndex { return @@ -319,7 +320,7 @@ func TestDropDatabaseWithForeignKeyReferred2(t *testing.T) { tk.MustExec("create database test2") var wg sync.WaitGroup var dropErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateNone { return @@ -360,7 +361,7 @@ func TestAddForeignKey2(t *testing.T) { tk.MustExec("create table t2 (id int key, b int, index(b));") var wg sync.WaitGroup var addErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StatePublic || job.Type != model.ActionDropIndex { return @@ -400,7 +401,7 @@ func TestAddForeignKey3(t *testing.T) { var insertErrs []error var deleteErrs []error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.Type != model.ActionAddForeignKey { return diff --git a/ddl/index.go b/ddl/index.go index 221005f58c211..cc3a3d3795242 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -791,6 +791,9 @@ func doReorgWorkForCreateIndexMultiSchema(w *worker, d *ddlCtx, t *meta.Meta, jo done, ver, err = doReorgWorkForCreateIndex(w, d, t, job, tbl, indexInfo) if done { job.MarkNonRevertible() + if err == nil { + ver, err = updateVersionAndTableInfo(d, t, job, tbl.Meta(), true) + } } // We need another round to wait for all the others sub-jobs to finish. return false, ver, err @@ -877,7 +880,6 @@ func doReorgWorkForCreateIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Jo return false, ver, err } indexInfo.BackfillState = model.BackfillStateInapplicable // Prevent double-write on this index. - ver, err = updateVersionAndTableInfo(d, t, job, tbl.Meta(), true) return true, ver, err default: return false, 0, dbterror.ErrInvalidDDLState.GenWithStackByArgs("backfill", indexInfo.BackfillState) diff --git a/ddl/index_change_test.go b/ddl/index_change_test.go index f9dcc99154dc5..dc1b98f205f08 100644 --- a/ddl/index_change_test.go +++ b/ddl/index_change_test.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -41,7 +42,7 @@ func TestIndexChange(t *testing.T) { tk.MustExec("insert t values (1, 1), (2, 2), (3, 3);") d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} // set up hook prevState := model.StateNone addIndexDone := false diff --git a/ddl/indexmergetest/BUILD.bazel b/ddl/indexmergetest/BUILD.bazel new file mode 100644 index 0000000000000..25dfef99ecb3f --- /dev/null +++ b/ddl/indexmergetest/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "indexmergetest_test", + timeout = "moderate", + srcs = [ + "main_test.go", + "merge_test.go", + ], + flaky = True, + shard_count = 4, + deps = [ + "//config", + "//ddl", + "//ddl/ingest", + "//ddl/internal/callback", + "//ddl/testutil", + "//domain", + "//kv", + "//meta/autoid", + "//parser/model", + "//tablecodec", + "//testkit", + "//testkit/testsetup", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//assert", + "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//tikv", + "@org_uber_go_goleak//:goleak", + ], +) diff --git a/ddl/indexmergetest/main_test.go b/ddl/indexmergetest/main_test.go new file mode 100644 index 0000000000000..b4de8700ce167 --- /dev/null +++ b/ddl/indexmergetest/main_test.go @@ -0,0 +1,56 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexmergetest + +import ( + "testing" + "time" + + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/testkit/testsetup" + "github.com/tikv/client-go/v2/tikv" + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + testsetup.SetupForCommonTest() + tikv.EnableFailpoints() + + domain.SchemaOutOfDateRetryInterval.Store(50 * time.Millisecond) + domain.SchemaOutOfDateRetryTimes.Store(50) + + autoid.SetStep(5000) + ddl.RunInGoTest = true + + config.UpdateGlobal(func(conf *config.Config) { + conf.Instance.SlowThreshold = 10000 + conf.TiKVClient.AsyncCommit.SafeWindow = 0 + conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 + conf.Experimental.AllowsExpressionIndex = true + }) + + opts := []goleak.Option{ + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), + goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + } + + goleak.VerifyTestMain(m, opts...) +} diff --git a/ddl/index_merge_tmp_test.go b/ddl/indexmergetest/merge_test.go similarity index 94% rename from ddl/index_merge_tmp_test.go rename to ddl/indexmergetest/merge_test.go index b637a55d2925f..a31b3edcc23a4 100644 --- a/ddl/index_merge_tmp_test.go +++ b/ddl/indexmergetest/merge_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ddl_test +package indexmergetest import ( "testing" @@ -21,15 +21,14 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/ddl/ingest" - "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/ddl/internal/callback" + "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/util/logutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) func TestAddIndexMergeProcess(t *testing.T) { @@ -47,12 +46,12 @@ func TestAddIndexMergeProcess(t *testing.T) { var checkErr error var runDML, backfillDone bool originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { if !runDML && job.Type == model.ActionAddIndex && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil || idx.BackfillState != model.BackfillStateRunning { return } @@ -94,13 +93,13 @@ func TestAddPrimaryKeyMergeProcess(t *testing.T) { var checkErr error var runDML, backfillDone bool originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: nil, // We'll reload the schema manually. } onJobUpdatedExportedFunc := func(job *model.Job) { if !runDML && job.Type == model.ActionAddPrimaryKey && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "primary") + idx := testutil.FindIdxInfo(dom, "test", "t", "primary") if idx == nil || idx.BackfillState != model.BackfillStateRunning || job.SnapshotVer == 0 { return } @@ -143,12 +142,12 @@ func TestAddIndexMergeVersionIndexValue(t *testing.T) { var runDML bool var tblID, idxID int64 originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { if !runDML && job.Type == model.ActionAddIndex && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil || idx.BackfillState != model.BackfillStateReadyToMerge { return } @@ -198,14 +197,14 @@ func TestAddIndexMergeIndexUntouchedValue(t *testing.T) { var runInsert bool var runUpdate bool originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { if job.Type != model.ActionAddIndex || job.SchemaState != model.StateWriteReorganization { return } - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil { return } @@ -243,15 +242,6 @@ func TestAddIndexMergeIndexUntouchedValue(t *testing.T) { tk.MustQuery("select * from t ignore index (idx);").Check(testkit.Rows("1 1 a a", "100 2 a a")) } -func findIdxInfo(dom *domain.Domain, dbName, tbName, idxName string) *model.IndexInfo { - tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) - if err != nil { - logutil.BgLogger().Warn("cannot find table", zap.String("dbName", dbName), zap.String("tbName", tbName)) - return nil - } - return tbl.Meta().FindIndexByName(idxName) -} - // TestCreateUniqueIndexKeyExist this case will test below things: // Create one unique index idx((a*b+1)); // insert (0, 6) and delete it; @@ -279,7 +269,7 @@ func TestCreateUniqueIndexKeyExist(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if t.Failed() { return @@ -346,7 +336,7 @@ func TestAddIndexMergeIndexUpdateOnDeleteOnly(t *testing.T) { var checkErrs []error originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedBefore := func(job *model.Job) { @@ -383,7 +373,7 @@ func TestAddIndexMergeDeleteUniqueOnWriteOnly(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if t.Failed() { return @@ -441,7 +431,7 @@ func TestAddIndexMergeDoubleDelete(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if t.Failed() { return @@ -485,7 +475,7 @@ func TestAddIndexMergeConflictWithPessimistic(t *testing.T) { tk.MustExec("set @@global.tidb_enable_metadata_lock = 0;") originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} runPessimisticTxn := false callback.OnJobRunBeforeExported = func(job *model.Job) { @@ -498,7 +488,7 @@ func TestAddIndexMergeConflictWithPessimistic(t *testing.T) { assert.NoError(t, err) } if !runPessimisticTxn && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil { return } diff --git a/ddl/integration_test.go b/ddl/integration_test.go index 29e69aa855274..264e755889899 100644 --- a/ddl/integration_test.go +++ b/ddl/integration_test.go @@ -18,7 +18,7 @@ import ( "fmt" "testing" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" @@ -86,7 +86,7 @@ func TestDDLStatementsBackFill(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test;") needReorg := false - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { diff --git a/ddl/internal/callback/BUILD.bazel b/ddl/internal/callback/BUILD.bazel new file mode 100644 index 0000000000000..e0bb07c3b2c21 --- /dev/null +++ b/ddl/internal/callback/BUILD.bazel @@ -0,0 +1,51 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "callback", + srcs = ["callback.go"], + importpath = "github.com/pingcap/tidb/ddl/internal/callback", + visibility = ["//ddl:__subpackages__"], + deps = [ + "//ddl", + "//infoschema", + "//parser/model", + "//sessionctx", + "//util/logutil", + "@org_uber_go_zap//:zap", + ], +) + +go_test( + name = "callback_test", + srcs = ["callback_test.go"], + embed = [":callback"], + deps = [ + "//ddl", + "@com_github_stretchr_testify//require", + ], +) + +go_library( + name = "ddlcallback", + srcs = ["callback.go"], + importpath = "github.com/pingcap/tidb/ddl/internal/ddlcallback", + visibility = ["//ddl:__subpackages__"], + deps = [ + "//ddl", + "//infoschema", + "//parser/model", + "//sessionctx", + "//util/logutil", + "@org_uber_go_zap//:zap", + ], +) + +go_test( + name = "ddlcallback_test", + srcs = ["callback_test.go"], + embed = [":ddlcallback"], + deps = [ + "//ddl", + "@com_github_stretchr_testify//require", + ], +) diff --git a/ddl/callback_test.go b/ddl/internal/callback/callback.go similarity index 93% rename from ddl/callback_test.go rename to ddl/internal/callback/callback.go index 5a97e8212689e..a3c84d774dd5c 100644 --- a/ddl/callback_test.go +++ b/ddl/internal/callback/callback.go @@ -1,4 +1,4 @@ -// Copyright 2015 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,27 +12,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ddl +package callback import ( "context" "sync/atomic" - "testing" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/logutil" - "github.com/stretchr/testify/require" "go.uber.org/zap" ) +// TestInterceptor is a test interceptor in the ddl type TestInterceptor struct { - *BaseInterceptor + *ddl.BaseInterceptor OnGetInfoSchemaExported func(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema } +// OnGetInfoSchema is to run when to call GetInfoSchema func (ti *TestInterceptor) OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema { if ti.OnGetInfoSchemaExported != nil { return ti.OnGetInfoSchemaExported(ctx, is) @@ -43,10 +44,10 @@ func (ti *TestInterceptor) OnGetInfoSchema(ctx sessionctx.Context, is infoschema // TestDDLCallback is used to customize user callback themselves. type TestDDLCallback struct { - *BaseCallback + *ddl.BaseCallback // We recommended to pass the domain parameter to the test ddl callback, it will ensure // domain to reload schema before your ddl stepping into the next state change. - Do DomainReloader + Do ddl.DomainReloader onJobRunBefore func(*model.Job) OnJobRunBeforeExported func(*model.Job) @@ -149,11 +150,3 @@ func (tc *TestDDLCallback) OnGetJobAfter(jobType string, job *model.Job) { func (tc *TestDDLCallback) Clone() *TestDDLCallback { return &*tc } - -func TestCallback(t *testing.T) { - cb := &BaseCallback{} - require.Nil(t, cb.OnChanged(nil)) - cb.OnJobRunBefore(nil) - cb.OnJobUpdated(nil) - cb.OnWatched(context.TODO()) -} diff --git a/ddl/internal/callback/callback_test.go b/ddl/internal/callback/callback_test.go new file mode 100644 index 0000000000000..f611394909e48 --- /dev/null +++ b/ddl/internal/callback/callback_test.go @@ -0,0 +1,31 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package callback + +import ( + "context" + "testing" + + "github.com/pingcap/tidb/ddl" + "github.com/stretchr/testify/require" +) + +func TestCallback(t *testing.T) { + cb := &ddl.BaseCallback{} + require.Nil(t, cb.OnChanged(nil)) + cb.OnJobRunBefore(nil) + cb.OnJobUpdated(nil) + cb.OnWatched(context.TODO()) +} diff --git a/ddl/job_table_test.go b/ddl/job_table_test.go index d869dcecc2c0e..9f1150241bbd1 100644 --- a/ddl/job_table_test.go +++ b/ddl/job_table_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -62,7 +63,7 @@ func TestDDLScheduling(t *testing.T) { "ALTER TABLE e EXCHANGE PARTITION p1 WITH TABLE e3;", } - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} var wg util.WaitGroupWrapper wg.Add(1) var once sync.Once diff --git a/ddl/modify_column_test.go b/ddl/modify_column_test.go index 6eb8e633be007..583c0a435b4ec 100644 --- a/ddl/modify_column_test.go +++ b/ddl/modify_column_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/ast" @@ -72,7 +73,7 @@ func TestModifyColumnReorgInfo(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t1") // Check insert null before job first update. - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error var currJob *model.Job var elements []*meta.Element @@ -198,7 +199,7 @@ func TestModifyColumnNullToNotNull(t *testing.T) { tbl := external.GetTableByName(t, tk1, "test", "t1") // Check insert null before job first update. - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} tk1.MustExec("delete from t1") once := sync.Once{} var checkErr error @@ -253,7 +254,7 @@ func TestModifyColumnNullToNotNullWithChangingVal(t *testing.T) { tbl := external.GetTableByName(t, tk1, "test", "t1") // Check insert null before job first update. - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} tk1.MustExec("delete from t1") once := sync.Once{} var checkErr error diff --git a/ddl/multi_schema_change_test.go b/ddl/multi_schema_change_test.go index d9facec4642cf..1f6a52bcce244 100644 --- a/ddl/multi_schema_change_test.go +++ b/ddl/multi_schema_change_test.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" @@ -386,7 +387,7 @@ func TestMultiSchemaChangeRenameColumns(t *testing.T) { tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int default 1, b int default 2)") tk.MustExec("insert into t values ()") - hook1 := &ddl.TestDDLCallback{Do: dom} + hook1 := &callback.TestDDLCallback{Do: dom} hook1.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionMultiSchemaChange, job.Type) if job.MultiSchemaInfo.SubJobs[0].SchemaState == model.StateWriteReorganization { @@ -457,7 +458,7 @@ func TestMultiSchemaChangeAlterColumns(t *testing.T) { // Test dml stmts when do alter tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int default 1, b int default 2)") - hook1 := &ddl.TestDDLCallback{Do: dom} + hook1 := &callback.TestDDLCallback{Do: dom} hook1.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionMultiSchemaChange, job.Type) if job.MultiSchemaInfo.SubJobs[0].SchemaState == model.StateWriteOnly { @@ -972,7 +973,7 @@ func TestMultiSchemaChangeAlterIndex(t *testing.T) { tk.MustExec("insert into t values (1, 2);") originHook := dom.DDL().GetHook() var checked bool - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { assert.NotNil(t, job.MultiSchemaInfo) // "modify column a tinyint" in write-reorg. @@ -1042,7 +1043,7 @@ func TestMultiSchemaChangeAdminShowDDLJobs(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionMultiSchemaChange, job.Type) if job.MultiSchemaInfo.SubJobs[0].SchemaState == model.StateDeleteOnly { @@ -1129,7 +1130,7 @@ func TestMultiSchemaChangeWithExpressionIndex(t *testing.T) { tk.MustQuery("select * from t;").Check(testkit.Rows("1 2", "2 1")) originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -1191,7 +1192,7 @@ func TestMultiSchemaChangeSchemaVersion(t *testing.T) { schemaVerMap := map[int64]struct{}{} originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobSchemaStateChanged = func(schemaVer int64) { if schemaVer != 0 { // No same return schemaVer during multi-schema change @@ -1231,7 +1232,7 @@ func TestMultiSchemaChangeMixedWithUpdate(t *testing.T) { "'2020-01-01 10:00:00', 'wer', '10:00:00', 2.1, 12, 'qwer', 12, 'asdf');") originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -1266,7 +1267,7 @@ type cancelOnceHook struct { pred func(job *model.Job) bool s sessionctx.Context - ddl.TestDDLCallback + callback.TestDDLCallback } func (c *cancelOnceHook) OnJobUpdated(job *model.Job) { @@ -1299,7 +1300,7 @@ func newCancelJobHook(t *testing.T, store kv.Storage, dom *domain.Domain, return &cancelOnceHook{ store: store, pred: pred, - TestDDLCallback: ddl.TestDDLCallback{Do: dom}, + TestDDLCallback: callback.TestDDLCallback{Do: dom}, s: tk.Session(), } } diff --git a/ddl/mv_index_test.go b/ddl/mv_index_test.go index 964211ad76740..10fbe2971377a 100644 --- a/ddl/mv_index_test.go +++ b/ddl/mv_index_test.go @@ -19,7 +19,7 @@ import ( "strings" "testing" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" @@ -45,7 +45,7 @@ func TestMultiValuedIndexOnlineDDL(t *testing.T) { internalTK := testkit.NewTestKit(t, store) internalTK.MustExec("use test") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} n := 100 hook.OnJobRunBeforeExported = func(job *model.Job) { internalTK.MustExec(fmt.Sprintf("insert into t values (%d, '[%d, %d, %d]')", n, n, n+1, n+2)) diff --git a/ddl/placement_policy_test.go b/ddl/placement_policy_test.go index 559cc0ff59a46..327c7e02cf0b6 100644 --- a/ddl/placement_policy_test.go +++ b/ddl/placement_policy_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" @@ -135,7 +136,7 @@ func TestPlacementPolicy(t *testing.T) { tk.MustExec("use test") tk.MustExec("drop placement policy if exists x") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var policyID int64 onJobUpdatedExportedFunc := func(job *model.Job) { if policyID != 0 { diff --git a/ddl/repair_table_test.go b/ddl/repair_table_test.go index 6881c6ce5f019..8b16f9bfbc69d 100644 --- a/ddl/repair_table_test.go +++ b/ddl/repair_table_test.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -101,7 +101,7 @@ func TestRepairTable(t *testing.T) { // Repaired tableInfo has been filtered by `domain.InfoSchema()`, so get it in repairInfo. originTableInfo, _ := domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var repairErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type != model.ActionRepairTable { diff --git a/ddl/resource_group_test.go b/ddl/resource_group_test.go index 789e81f99f0fb..3bf33b04d9012 100644 --- a/ddl/resource_group_test.go +++ b/ddl/resource_group_test.go @@ -19,7 +19,7 @@ import ( "strconv" "testing" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/resourcegroup" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" @@ -36,7 +36,7 @@ func TestResourceGroupBasic(t *testing.T) { tk.MustExec("use test") re := require.New(t) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var groupID int64 onJobUpdatedExportedFunc := func(job *model.Job) { // job.SchemaID will be assigned when the group is created. diff --git a/ddl/rollingback_test.go b/ddl/rollingback_test.go index f1850eb80dcbc..ee5894441175f 100644 --- a/ddl/rollingback_test.go +++ b/ddl/rollingback_test.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/external" @@ -50,7 +51,7 @@ func TestCancelAddIndexJobError(t *testing.T) { require.NotNil(t, tbl) d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var ( checkErr error jobID int64 diff --git a/ddl/serial_test.go b/ddl/serial_test.go index 970f60a95ff96..668b675a0b185 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -445,7 +446,7 @@ func TestCancelAddIndexPanic(t *testing.T) { oldReorgWaitTimeout := ddl.ReorgWaitTimeout ddl.ReorgWaitTimeout = 50 * time.Millisecond defer func() { ddl.ReorgWaitTimeout = oldReorgWaitTimeout }() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type == model.ActionAddIndex && job.State == model.JobStateRunning && job.SchemaState == model.StateWriteReorganization && job.SnapshotVer != 0 { tkCancel.MustQuery(fmt.Sprintf("admin cancel ddl jobs %d", job.ID)) @@ -684,7 +685,7 @@ func TestRecoverTableByJobIDFail(t *testing.T) { tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // set hook - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type == model.ActionRecoverTable { require.NoError(t, failpoint.Enable("tikvclient/mockCommitError", `return(true)`)) @@ -743,7 +744,7 @@ func TestRecoverTableByTableNameFail(t *testing.T) { tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // set hook - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type == model.ActionRecoverTable { require.NoError(t, failpoint.Enable("tikvclient/mockCommitError", `return(true)`)) @@ -816,7 +817,7 @@ func TestCanceledJobTakeTime(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t_cjtt(a int)") - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} once := sync.Once{} hook.OnJobRunBeforeExported = func(job *model.Job) { once.Do(func() { diff --git a/ddl/table_modify_test.go b/ddl/table_modify_test.go index 590fea8ad973d..7f0b23e3fd894 100644 --- a/ddl/table_modify_test.go +++ b/ddl/table_modify_test.go @@ -20,6 +20,7 @@ import ( "time" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/infoschema" @@ -205,7 +206,7 @@ func TestConcurrentLockTables(t *testing.T) { } func testParallelExecSQL(t *testing.T, store kv.Storage, dom *domain.Domain, sql1, sql2 string, se1, se2 session.Session, f func(t *testing.T, err1, err2 error)) { - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} times := 0 callback.OnJobRunBeforeExported = func(job *model.Job) { if times != 0 { diff --git a/ddl/testutil/BUILD.bazel b/ddl/testutil/BUILD.bazel index 3562ca3b34571..052a747f427ba 100644 --- a/ddl/testutil/BUILD.bazel +++ b/ddl/testutil/BUILD.bazel @@ -14,6 +14,8 @@ go_library( "//table", "//table/tables", "//types", + "//util/logutil", "@com_github_pingcap_errors//:errors", + "@org_uber_go_zap//:zap", ], ) diff --git a/ddl/testutil/testutil.go b/ddl/testutil/testutil.go index 642579ba00ea7..52adf6b750f73 100644 --- a/ddl/testutil/testutil.go +++ b/ddl/testutil/testutil.go @@ -26,6 +26,8 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" ) // SessionExecInGoroutine export for testing. @@ -82,3 +84,13 @@ func ExtractAllTableHandles(se session.Session, dbName, tbName string) ([]int64, }) return allHandles, err } + +// FindIdxInfo is to get IndexInfo by index name. +func FindIdxInfo(dom *domain.Domain, dbName, tbName, idxName string) *model.IndexInfo { + tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) + if err != nil { + logutil.BgLogger().Warn("cannot find table", zap.String("dbName", dbName), zap.String("tbName", tbName)) + return nil + } + return tbl.Meta().FindIndexByName(idxName) +} diff --git a/ddl/tiflash_replica_test.go b/ddl/tiflash_replica_test.go index 874ab05359d3b..08a91c1086caa 100644 --- a/ddl/tiflash_replica_test.go +++ b/ddl/tiflash_replica_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" @@ -286,7 +286,7 @@ func TestCreateTableWithLike2(t *testing.T) { tbl1 := external.GetTableByName(t, tk, "test", "t1") doneCh := make(chan error, 2) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var onceChecker sync.Map hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type != model.ActionAddColumn && job.Type != model.ActionDropColumn && diff --git a/domain/BUILD.bazel b/domain/BUILD.bazel index 859943b6c6672..ccbf75dd48ee6 100644 --- a/domain/BUILD.bazel +++ b/domain/BUILD.bazel @@ -45,7 +45,6 @@ go_library( "//privilege/privileges", "//sessionctx", "//sessionctx/sessionstates", - "//sessionctx/stmtctx", "//sessionctx/variable", "//statistics/handle", "//telemetry", diff --git a/domain/plan_replayer.go b/domain/plan_replayer.go index 0f72a1ac8a575..8bbc26cf79ec2 100644 --- a/domain/plan_replayer.go +++ b/domain/plan_replayer.go @@ -34,8 +34,8 @@ import ( "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/replayer" @@ -164,7 +164,18 @@ func insertPlanReplayerSuccessStatusRecord(ctx context.Context, sctx sessionctx. record.SQLDigest, record.PlanDigest, record.OriginSQL, record.Token, instance)) if err != nil { logutil.BgLogger().Warn("insert mysql.plan_replayer_status record failed", + zap.String("sql", record.OriginSQL), zap.Error(err)) + // try insert record without original sql + _, err = exec.ExecuteInternal(ctx, fmt.Sprintf( + "insert into mysql.plan_replayer_status (sql_digest, plan_digest, token, instance) values ('%s','%s','%s','%s')", + record.SQLDigest, record.PlanDigest, record.Token, instance)) + if err != nil { + logutil.BgLogger().Warn("insert mysql.plan_replayer_status record failed", + zap.String("sqlDigest", record.SQLDigest), + zap.String("planDigest", record.PlanDigest), + zap.Error(err)) + } } } @@ -379,6 +390,7 @@ func (w *planReplayerTaskDumpWorker) handleTask(task *PlanReplayerDumpTask) { occupy := true handleTask := true defer func() { + util.Recover(metrics.LabelDomain, "PlanReplayerTaskDumpWorker", nil, false) logutil.BgLogger().Debug("[plan-replayer-capture] handle task", zap.String("sql-digest", sqlDigest), zap.String("plan-digest", planDigest), @@ -431,7 +443,6 @@ func (w *planReplayerTaskDumpWorker) HandleTask(task *PlanReplayerDumpTask) (suc } task.Zf = file task.FileName = fileName - task.EncodedPlan, _ = task.EncodePlan(task.SessionVars.StmtCtx, false) if task.InExecute && len(task.NormalizedSQL) > 0 { p := parser.New() stmts, _, err := p.ParseSQL(task.NormalizedSQL) @@ -538,7 +549,6 @@ type PlanReplayerDumpTask struct { replayer.PlanReplayerTaskKey // tmp variables stored during the query - EncodePlan func(*stmtctx.StatementContext, bool) (string, string) TblStats map[int64]interface{} InExecute bool NormalizedSQL string diff --git a/domain/plan_replayer_dump.go b/domain/plan_replayer_dump.go index 01ab473e16a90..5559dd3915b52 100644 --- a/domain/plan_replayer_dump.go +++ b/domain/plan_replayer_dump.go @@ -282,7 +282,14 @@ func DumpPlanReplayerInfo(ctx context.Context, sctx sessionctx.Context, // For capture task, we dump stats in storage only if EnableHistoricalStatsForCapture is disabled. // For manual plan replayer dump command, we directly dump stats in storage - if !variable.EnableHistoricalStatsForCapture.Load() || !task.IsCapture { + if task.IsCapture { + if !task.IsContinuesCapture && variable.EnableHistoricalStatsForCapture.Load() { + // Dump stats + if err = dumpStats(zw, pairs, do); err != nil { + return err + } + } + } else { // Dump stats if err = dumpStats(zw, pairs, do); err != nil { return err diff --git a/executor/adapter.go b/executor/adapter.go index 59ba22ce73809..145e1938216c2 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -1412,17 +1412,7 @@ func (a *ExecStmt) observePhaseDurations(internal bool, commitDetails *util.Comm // 4. update the `PrevStmt` in session variable. // 5. reset `DurationParse` in session variable. func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults bool) { - se := a.Ctx - if !se.GetSessionVars().InRestrictedSQL && se.GetSessionVars().IsPlanReplayerCaptureEnabled() { - stmtNode := a.GetStmtNode() - if se.GetSessionVars().EnablePlanReplayedContinuesCapture { - if checkPlanReplayerContinuesCaptureValidStmt(stmtNode) { - checkPlanReplayerContinuesCapture(se, stmtNode, txnTS) - } - } else { - checkPlanReplayerCaptureTask(se, stmtNode, txnTS) - } - } + a.checkPlanReplayerCapture(txnTS) sessVars := a.Ctx.GetSessionVars() execDetail := sessVars.StmtCtx.GetExecDetails() @@ -1485,6 +1475,23 @@ func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults boo } } +func (a *ExecStmt) checkPlanReplayerCapture(txnTS uint64) { + if kv.GetInternalSourceType(a.GoCtx) == kv.InternalTxnStats { + return + } + se := a.Ctx + if !se.GetSessionVars().InRestrictedSQL && se.GetSessionVars().IsPlanReplayerCaptureEnabled() { + stmtNode := a.GetStmtNode() + if se.GetSessionVars().EnablePlanReplayedContinuesCapture { + if checkPlanReplayerContinuesCaptureValidStmt(stmtNode) { + checkPlanReplayerContinuesCapture(se, stmtNode, txnTS) + } + } else { + checkPlanReplayerCaptureTask(se, stmtNode, txnTS) + } + } +} + // CloseRecordSet will finish the execution of current statement and do some record work func (a *ExecStmt) CloseRecordSet(txnStartTS uint64, lastErr error) { a.FinishExecuteStmt(txnStartTS, lastErr, false) @@ -2112,7 +2119,6 @@ func sendPlanReplayerDumpTask(key replayer.PlanReplayerTaskKey, sctx sessionctx. dumpTask := &domain.PlanReplayerDumpTask{ PlanReplayerTaskKey: key, StartTS: startTS, - EncodePlan: GetEncodedPlan, TblStats: stmtCtx.TableStats, SessionBindings: handle.GetAllBindRecord(), SessionVars: sctx.GetSessionVars(), @@ -2121,6 +2127,7 @@ func sendPlanReplayerDumpTask(key replayer.PlanReplayerTaskKey, sctx sessionctx. IsCapture: true, IsContinuesCapture: isContinuesCapture, } + dumpTask.EncodedPlan, _ = GetEncodedPlan(stmtCtx, false) if _, ok := stmtNode.(*ast.ExecuteStmt); ok { nsql, _ := sctx.GetSessionVars().StmtCtx.SQLDigest() dumpTask.InExecute = true diff --git a/executor/executor_test.go b/executor/executor_test.go index 7e6a51799d778..85d0dad290bb0 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -204,6 +204,15 @@ func TestPlanReplayerCapture(t *testing.T) { func TestPlanReplayerContinuesCapture(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) + + tk.MustExec("set @@global.tidb_enable_historical_stats='OFF'") + _, err := tk.Exec("set @@global.tidb_enable_plan_replayer_continues_capture='ON'") + require.Error(t, err) + require.Equal(t, err.Error(), "tidb_enable_historical_stats should be enabled before enabling tidb_enable_plan_replayer_continues_capture") + + tk.MustExec("set @@global.tidb_enable_historical_stats='ON'") + tk.MustExec("set @@global.tidb_enable_plan_replayer_continues_capture='ON'") + prHandle := dom.GetPlanReplayerHandle() tk.MustExec("delete from mysql.plan_replayer_status;") tk.MustExec("use test") diff --git a/executor/fktest/foreign_key_test.go b/executor/fktest/foreign_key_test.go index fb29d391aaf09..6e2e1d83662f1 100644 --- a/executor/fktest/foreign_key_test.go +++ b/executor/fktest/foreign_key_test.go @@ -2839,3 +2839,19 @@ func TestForeignKeyAndMultiValuedIndex(t *testing.T) { tk.MustExec("admin check table t1") tk.MustExec("admin check table t2") } + +func TestForeignKeyAndSessionVariable(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set @@foreign_key_checks=1") + tk.MustExec("use test") + tk.MustExec("create table t1 (t timestamp, index(t));") + tk.MustExec("create table t2 (t timestamp, foreign key (t) references t1(t) on delete cascade);") + tk.MustExec("set @@time_zone='+8:00';") + tk.MustExec("insert into t1 values ('2023-01-28 10:29:16');") + tk.MustExec("insert into t2 values ('2023-01-28 10:29:16');") + tk.MustExec("set @@time_zone='+6:00';") + tk.MustExec("delete from t1;") + tk.MustQuery("select * from t1").Check(testkit.Rows()) + tk.MustQuery("select * from t2").Check(testkit.Rows()) +} diff --git a/executor/infoschema_cluster_table_test.go b/executor/infoschema_cluster_table_test.go index be2f04cb5c6ac..b1a6d4c57f4f8 100644 --- a/executor/infoschema_cluster_table_test.go +++ b/executor/infoschema_cluster_table_test.go @@ -290,7 +290,7 @@ func TestTableStorageStats(t *testing.T) { "test 2", )) rows := tk.MustQuery("select TABLE_NAME from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql';").Rows() - result := 45 + result := 46 require.Len(t, rows, result) // More tests about the privileges. diff --git a/executor/memtable_reader_test.go b/executor/memtable_reader_test.go index 870a4193fb3b2..f6d98d4ec24fc 100644 --- a/executor/memtable_reader_test.go +++ b/executor/memtable_reader_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/fn" "github.com/pingcap/sysutil" + "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/util/pdapi" pmodel "github.com/prometheus/common/model" @@ -56,7 +57,7 @@ func TestMetricTableData(t *testing.T) { } matrix = append(matrix, &pmodel.SampleStream{Metric: metric, Values: []pmodel.SamplePair{v1}}) - ctx := context.WithValue(context.Background(), "__mockMetricsPromData", matrix) + ctx := context.WithValue(context.Background(), executor.MockMetricsPromDataKey{}, matrix) ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool { return fpname == fpName }) diff --git a/executor/metrics_reader.go b/executor/metrics_reader.go index 314616785d60f..d9e0bd39f1128 100644 --- a/executor/metrics_reader.go +++ b/executor/metrics_reader.go @@ -89,9 +89,12 @@ func (e *MetricRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) return totalRows, nil } +// MockMetricsPromDataKey is for test +type MockMetricsPromDataKey struct{} + func (e *MetricRetriever) queryMetric(ctx context.Context, sctx sessionctx.Context, queryRange promv1.Range, quantile float64) (result pmodel.Value, err error) { failpoint.InjectContext(ctx, "mockMetricsPromData", func() { - failpoint.Return(ctx.Value("__mockMetricsPromData").(pmodel.Matrix), nil) + failpoint.Return(ctx.Value(MockMetricsPromDataKey{}).(pmodel.Matrix), nil) }) // Add retry to avoid network error. diff --git a/executor/seqtest/prepared_test.go b/executor/seqtest/prepared_test.go index 12a99a7b90ca7..9da67370b9b09 100644 --- a/executor/seqtest/prepared_test.go +++ b/executor/seqtest/prepared_test.go @@ -334,7 +334,7 @@ func TestPrepareWithAggregation(t *testing.T) { tk.MustExec(fmt.Sprintf(`set @@tidb_enable_prepared_plan_cache=%v`, flag)) se, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk.SetSession(se) @@ -599,7 +599,7 @@ func TestPrepareDealloc(t *testing.T) { tk.MustExec(`set @@tidb_enable_prepared_plan_cache=true`) se, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: plannercore.NewLRUPlanCache(3, 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: plannercore.NewLRUPlanCache(3, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk.SetSession(se) diff --git a/executor/slow_query.go b/executor/slow_query.go index b83a480f85857..e8a2731a476e7 100644 --- a/executor/slow_query.go +++ b/executor/slow_query.go @@ -52,6 +52,8 @@ import ( "golang.org/x/exp/slices" ) +type signalsKey struct{} + // ParseSlowLogBatchSize is the batch size of slow-log lines for a worker to parse, exported for testing. var ParseSlowLogBatchSize = 64 @@ -474,7 +476,7 @@ func (e *slowQueryRetriever) parseSlowLog(ctx context.Context, sctx sessionctx.C } failpoint.Inject("mockReadSlowLogSlow", func(val failpoint.Value) { if val.(bool) { - signals := ctx.Value("signals").([]chan int) + signals := ctx.Value(signalsKey{}).([]chan int) signals[0] <- 1 <-signals[1] } diff --git a/executor/slow_query_test.go b/executor/slow_query_test.go index d696afa3c945d..fe2a5b68a329a 100644 --- a/executor/slow_query_test.go +++ b/executor/slow_query_test.go @@ -666,7 +666,7 @@ select * from t;` retriever, err := newSlowQueryRetriever() require.NoError(t, err) var signal1, signal2 = make(chan int, 1), make(chan int, 1) - ctx := context.WithValue(context.Background(), "signals", []chan int{signal1, signal2}) + ctx := context.WithValue(context.Background(), signalsKey{}, []chan int{signal1, signal2}) ctx, cancel := context.WithCancel(ctx) err = failpoint.Enable("github.com/pingcap/tidb/executor/mockReadSlowLogSlow", "return(true)") require.NoError(t, err) diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go index dec5d06983679..bcb27a1233da7 100644 --- a/expression/builtin_compare.go +++ b/expression/builtin_compare.go @@ -1565,33 +1565,17 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express arg0Type, arg1Type := args[0].GetType(), args[1].GetType() arg0IsInt := arg0Type.EvalType() == types.ETInt arg1IsInt := arg1Type.EvalType() == types.ETInt - arg0IsString := arg0Type.EvalType() == types.ETString - arg1IsString := arg1Type.EvalType() == types.ETString arg0, arg0IsCon := args[0].(*Constant) arg1, arg1IsCon := args[1].(*Constant) isExceptional, finalArg0, finalArg1 := false, args[0], args[1] isPositiveInfinite, isNegativeInfinite := false, false - if MaybeOverOptimized4PlanCache(ctx, args) { - // To keep the result be compatible with MySQL, refine `int non-constant str constant` - // here and skip this refine operation in all other cases for safety. - if (arg0IsInt && !arg0IsCon && arg1IsString && arg1IsCon) || (arg1IsInt && !arg1IsCon && arg0IsString && arg0IsCon) { - var reason error - if arg1IsString { - reason = errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg1.String()) - } else { // arg0IsString - reason = errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg0.String()) - } - ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(reason) - RemoveMutableConst(ctx, args) - } else { - return args - } - } else if !ctx.GetSessionVars().StmtCtx.UseCache { - // We should remove the mutable constant for correctness, because its value may be changed. - RemoveMutableConst(ctx, args) - } // int non-constant [cmp] non-int constant if arg0IsInt && !arg0IsCon && !arg1IsInt && arg1IsCon { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg1}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg1.String())) + RemoveMutableConst(ctx, args) + } + arg1, isExceptional = RefineComparedConstant(ctx, *arg0Type, arg1, c.op) // Why check not null flag // eg: int_col > const_val(which is less than min_int32) @@ -1619,6 +1603,11 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express } // non-int constant [cmp] int non-constant if arg1IsInt && !arg1IsCon && !arg0IsInt && arg0IsCon { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg0}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg0.String())) + RemoveMutableConst(ctx, args) + } + arg0, isExceptional = RefineComparedConstant(ctx, *arg1Type, arg0, symmetricOp[c.op]) if !isExceptional || (isExceptional && mysql.HasNotNullFlag(arg1Type.GetFlag())) { finalArg0 = arg0 @@ -1636,6 +1625,11 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express } // int constant [cmp] year type if arg0IsCon && arg0IsInt && arg1Type.GetType() == mysql.TypeYear && !arg0.Value.IsNull() { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg0}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to YEAR", arg0.String())) + RemoveMutableConst(ctx, args) + } + adjusted, failed := types.AdjustYear(arg0.Value.GetInt64(), false) if failed == nil { arg0.Value.SetInt64(adjusted) @@ -1644,6 +1638,11 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express } // year type [cmp] int constant if arg1IsCon && arg1IsInt && arg0Type.GetType() == mysql.TypeYear && !arg1.Value.IsNull() { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg1}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to YEAR", arg1.String())) + RemoveMutableConst(ctx, args) + } + adjusted, failed := types.AdjustYear(arg1.Value.GetInt64(), false) if failed == nil { arg1.Value.SetInt64(adjusted) diff --git a/expression/integration_serial_test.go b/expression/integration_serial_test.go index b70b7be4a5070..c50aa687659a9 100644 --- a/expression/integration_serial_test.go +++ b/expression/integration_serial_test.go @@ -3790,7 +3790,7 @@ func TestPreparePlanCacheOnCachedTable(t *testing.T) { var err error se, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk.SetSession(se) diff --git a/go.mod b/go.mod index a11045833f165..88bc1f0fd6e53 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/Shopify/sarama v1.29.0 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 + github.com/apache/skywalking-eyes v0.4.0 github.com/ashanbrown/makezero v1.1.1 github.com/aws/aws-sdk-go v1.44.48 github.com/blacktear23/go-proxyprotocol v1.0.2 @@ -86,6 +87,7 @@ require ( github.com/soheilhy/cmux v0.1.5 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 + github.com/spkg/bom v1.0.0 github.com/stathat/consistent v1.0.0 github.com/stretchr/testify v1.8.1 github.com/tdakkota/asciicheck v0.1.1 @@ -119,7 +121,7 @@ require ( golang.org/x/term v0.4.0 golang.org/x/text v0.6.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.2.0 + golang.org/x/tools v0.5.0 google.golang.org/api v0.103.0 google.golang.org/grpc v1.51.0 gopkg.in/yaml.v2 v2.4.0 @@ -137,12 +139,16 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1 // indirect github.com/DataDog/zstd v1.4.5 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.2 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bmatcuk/doublestar/v2 v2.0.4 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect @@ -169,6 +175,7 @@ require ( github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/licensecheck v0.3.1 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect @@ -179,6 +186,8 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/huandu/xstrings v1.3.1 // indirect + github.com/imdario/mergo v0.3.11 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -201,6 +210,8 @@ require ( github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect @@ -221,10 +232,12 @@ require ( github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect diff --git a/go.sum b/go.sum index 8bfacd2bddec7..d74e95aeb8783 100644 --- a/go.sum +++ b/go.sum @@ -409,8 +409,14 @@ github.com/Jeffail/gabs/v2 v2.5.1 h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk github.com/Jeffail/gabs/v2 v2.5.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= @@ -436,6 +442,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 h1:Q/yk4z/cHUVZfgTqtD09qeYBxHwshQAjVRX73qs8UH0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/skywalking-eyes v0.4.0 h1:O13kdRU6FCEZevfD01mdhTgCZLLfPZIQ0GXZrLl7FpQ= +github.com/apache/skywalking-eyes v0.4.0/go.mod h1:WblDbBgOLsLN0FJEBa9xj6PhuUA/J6spKYVTG4/F8Ls= github.com/apache/thrift v0.0.0-20181112125854-24918abba929/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 h1:Jz3KVLYY5+JO7rDiX0sAuRGtuv2vG01r17Y9nLMWNUw= github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -463,6 +471,8 @@ github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blacktear23/go-proxyprotocol v1.0.2 h1:zR7PZeoU0wAkElcIXenFiy3R56WB6A+UEVi4c6RH8wo= github.com/blacktear23/go-proxyprotocol v1.0.2/go.mod h1:FSCbgnRZrQXazBLL5snfBbrcFSMtcmUDhSRb9OfFA1o= +github.com/bmatcuk/doublestar/v2 v2.0.4 h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI= +github.com/bmatcuk/doublestar/v2 v2.0.4/go.mod h1:QMmcs3H2AUQICWhfzLXz+IYln8lRQmTZRptLie8RgRw= github.com/carlmjohnson/flagext v0.21.0 h1:/c4uK3ie786Z7caXLcIMvePNSSiH3bQVGDvmGLMme60= github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac= github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= @@ -543,6 +553,7 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -618,8 +629,8 @@ github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -753,10 +764,14 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/licensecheck v0.3.1 h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs= +github.com/google/licensecheck v0.3.1/go.mod h1:ORkR35t/JjW+emNKtfJDII0zlciG9JgbT7SmsohlHmY= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -781,6 +796,7 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20211122183932-1daafda22083 h1:c8EUapQFi+kjzedr4c6WqbwMdmB95+oDBWZ5XFHFYxY= github.com/google/pprof v0.0.0-20211122183932-1daafda22083/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -866,12 +882,16 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= @@ -1023,6 +1043,8 @@ github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/le github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.10/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -1031,6 +1053,8 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -1210,6 +1234,8 @@ github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8 github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4c5eA= github.com/shirou/gopsutil/v3 v3.22.9/go.mod h1:bBYl1kjgEJpWpxeHmLI+dVHWtyAwfcmSBLDsp2TNT8A= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= @@ -1223,6 +1249,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1237,8 +1264,12 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -1247,6 +1278,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spkg/bom v1.0.0 h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64= +github.com/spkg/bom v1.0.0/go.mod h1:lAz2VbTuYNcvs7iaFF8WW0ufXrHShJ7ck1fYFFbVXJs= github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U= github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1423,6 +1456,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1430,6 +1464,7 @@ golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1539,6 +1574,7 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220517181318-183a9ca12b87/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1680,6 +1716,7 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1796,8 +1833,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2080,6 +2117,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index e2f04758dda6e..f796345bbd8e7 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -56,8 +56,7 @@ func newTestKitWithRoot(t *testing.T, store kv.Storage) *testkit.TestKit { func newTestKitWithPlanCache(t *testing.T, store kv.Storage) *testkit.TestKit { tk := testkit.NewTestKit(t, store) - se, err := session.CreateSession4TestWithOpt(store, &session.Opt{PreparedPlanCache: plannercore.NewLRUPlanCache(100, - 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session())}) + se, err := session.CreateSession4TestWithOpt(store, &session.Opt{PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session())}) require.NoError(t, err) tk.SetSession(se) tk.RefreshConnectionID() diff --git a/kv/option.go b/kv/option.go index 80d0f7792f172..d779ff61ac215 100644 --- a/kv/option.go +++ b/kv/option.go @@ -15,6 +15,8 @@ package kv import ( + "context" + "github.com/tikv/client-go/v2/util" ) @@ -136,6 +138,15 @@ type RequestSource = util.RequestSource // WithInternalSourceType create context with internal source. var WithInternalSourceType = util.WithInternalSourceType +// GetInternalSourceType get internal source +func GetInternalSourceType(ctx context.Context) string { + v := ctx.Value(util.RequestSourceKey) + if v == nil { + return "" + } + return v.(util.RequestSource).RequestSourceType +} + const ( // InternalTxnOthers is the type of requests that consume low resources. // This reduces the size of metrics. diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index a2d1f242a0ff6..c7af8385d556c 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -1563,7 +1563,7 @@ func (er *expressionRewriter) inToExpression(lLen int, not bool, tp *types.Field continue // no need to refine it } er.sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to INT", c.String())) - expression.RemoveMutableConst(er.sctx, []expression.Expression{c}) + expression.RemoveMutableConst(er.sctx, args) } args[i], isExceptional = expression.RefineComparedConstant(er.sctx, *leftFt, c, opcode.EQ) if isExceptional { diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index c2d3e1a62d7bc..6c5d2c49fddf0 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -944,6 +944,9 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if canConvertPointGet && expression.MaybeOverOptimized4PlanCache(ds.ctx, path.AccessConds) { canConvertPointGet = ds.canConvertToPointGetForPlanCache(path) } + if canConvertPointGet && path.Index != nil && path.Index.MVIndex { + canConvertPointGet = false // cannot use PointGet upon MVIndex + } if canConvertPointGet && !path.IsIntHandlePath { // We simply do not build [batch] point get for prefix indexes. This can be optimized. diff --git a/planner/core/indexmerge_path.go b/planner/core/indexmerge_path.go index 8a71e5d5d4f5a..6f62f30eef239 100644 --- a/planner/core/indexmerge_path.go +++ b/planner/core/indexmerge_path.go @@ -15,7 +15,6 @@ package core import ( - "fmt" "math" "strings" @@ -37,6 +36,16 @@ import ( // generateIndexMergePath generates IndexMerge AccessPaths on this DataSource. func (ds *DataSource) generateIndexMergePath() error { + var warningMsg string + stmtCtx := ds.ctx.GetSessionVars().StmtCtx + defer func() { + if len(ds.indexMergeHints) > 0 && warningMsg != "" { + ds.indexMergeHints = nil + stmtCtx.AppendWarning(errors.Errorf(warningMsg)) + logutil.BgLogger().Debug(warningMsg) + } + }() + // Consider the IndexMergePath. Now, we just generate `IndexMergePath` in DNF case. // Use allConds instread of pushedDownConds, // because we want to use IndexMerge even if some expr cannot be pushed to TiKV. @@ -46,11 +55,26 @@ func (ds *DataSource) generateIndexMergePath() error { indexMergeConds = append(indexMergeConds, expression.PushDownNot(ds.ctx, expr)) } - stmtCtx := ds.ctx.GetSessionVars().StmtCtx isPossibleIdxMerge := len(indexMergeConds) > 0 && // have corresponding access conditions, and (len(ds.possibleAccessPaths) > 1 || // (have multiple index paths, or (len(ds.possibleAccessPaths) == 1 && isMVIndexPath(ds.possibleAccessPaths[0]))) // have a MVIndex) + + if !isPossibleIdxMerge { + warningMsg = "IndexMerge is inapplicable or disabled. No available filter or available index." + return nil + } + sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || len(ds.indexMergeHints) > 0) && !stmtCtx.NoIndexMergeHint + if !sessionAndStmtPermission { + warningMsg = "IndexMerge is inapplicable or disabled. Got no_index_merge hint or tidb_enable_index_merge is off." + return nil + } + + if ds.tableInfo.TempTableType == model.TempTableLocal { + warningMsg = "IndexMerge is inapplicable or disabled. Cannot use IndexMerge on temporary table." + return nil + } + // We current do not consider `IndexMergePath`: // 1. If there is an index path. // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. @@ -87,26 +111,37 @@ func (ds *DataSource) generateIndexMergePath() error { } } - if isPossibleIdxMerge && sessionAndStmtPermission && needConsiderIndexMerge && ds.tableInfo.TempTableType != model.TempTableLocal { - err := ds.generateAndPruneIndexMergePath(indexMergeConds, ds.indexMergeHints != nil) - if err != nil { - return err - } - } else if len(ds.indexMergeHints) > 0 { + if !needConsiderIndexMerge { + warningMsg = "IndexMerge is inapplicable or disabled. " + return nil + } + regularPathCount := len(ds.possibleAccessPaths) + if err := ds.generateAndPruneIndexMergePath(indexMergeConds); err != nil { + return err + } + + // If without hints, it means that `enableIndexMerge` is true + if len(ds.indexMergeHints) == 0 { + return nil + } + // If len(indexMergeHints) > 0, then add warnings if index-merge hints cannot work. + if regularPathCount == len(ds.possibleAccessPaths) { ds.indexMergeHints = nil - var msg string - if !isPossibleIdxMerge { - msg = "No available filter or available index." - } else if !sessionAndStmtPermission { - msg = "Got no_index_merge hint or tidb_enable_index_merge is off." - } else if ds.tableInfo.TempTableType == model.TempTableLocal { - msg = "Cannot use IndexMerge on temporary table." - } - msg = fmt.Sprintf("IndexMerge is inapplicable or disabled. %s", msg) - stmtCtx.AppendWarning(errors.Errorf(msg)) - logutil.BgLogger().Debug(msg) + ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable")) + return nil } + // If len(indexMergeHints) > 0 and some index-merge paths were added, then prune all other non-index-merge paths. + ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] + minRowCount := ds.possibleAccessPaths[0].CountAfterAccess + for _, path := range ds.possibleAccessPaths { + if minRowCount < path.CountAfterAccess { + minRowCount = path.CountAfterAccess + } + } + if ds.stats.RowCount > minRowCount { + ds.stats = ds.tableStats.ScaleByExpectCnt(minRowCount) + } return nil } @@ -441,7 +476,7 @@ func (ds *DataSource) generateIndexMergeAndPaths(normalPathCnt int) *util.Access return indexMergePath } -func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expression.Expression, needPrune bool) error { +func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expression.Expression) error { regularPathCount := len(ds.possibleAccessPaths) // 1. Generate possible IndexMerge paths for `OR`. err := ds.generateIndexMergeOrPaths(indexMergeConds) @@ -461,36 +496,6 @@ func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expressio if mvIndexMergePath != nil { ds.possibleAccessPaths = append(ds.possibleAccessPaths, mvIndexMergePath...) } - - // 4. If needed, append a warning if no IndexMerge is generated. - - // If without hints, it means that `enableIndexMerge` is true - if len(ds.indexMergeHints) == 0 { - return nil - } - // With hints and without generated IndexMerge paths - if regularPathCount == len(ds.possibleAccessPaths) { - ds.indexMergeHints = nil - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable")) - return nil - } - - // 4. If needPrune is true, prune non-IndexMerge paths. - - // Do not need to consider the regular paths in find_best_task(). - // So we can use index merge's row count as DataSource's row count. - if needPrune { - ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] - minRowCount := ds.possibleAccessPaths[0].CountAfterAccess - for _, path := range ds.possibleAccessPaths { - if minRowCount < path.CountAfterAccess { - minRowCount = path.CountAfterAccess - } - } - if ds.stats.RowCount > minRowCount { - ds.stats = ds.tableStats.ScaleByExpectCnt(minRowCount) - } - } return nil } diff --git a/planner/core/indexmerge_path_test.go b/planner/core/indexmerge_path_test.go index 1119cfb5c666e..689893028e937 100644 --- a/planner/core/indexmerge_path_test.go +++ b/planner/core/indexmerge_path_test.go @@ -15,13 +15,53 @@ package core_test import ( + "strings" "testing" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/testdata" + "github.com/stretchr/testify/require" ) +func TestAnalyzeMVIndex(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t(a int, b int, c int, j json, +index(a), index(b), +index idx(a, b, (cast(j as signed array)), c), +index idx2(a, b, (cast(j->'$.str' as char(10) array)), c))`) + + tk.MustExec("set tidb_analyze_version=2") + tk.MustExec("analyze table t") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) + tk.MustExec("analyze table t index idx") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t", + "Warning 1105 The version 2 would collect all statistics not only the selected indexes", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) + + tk.MustExec("set tidb_analyze_version=1") + tk.MustExec("analyze table t") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) + tk.MustExec("analyze table t index idx") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx")) + tk.MustExec("analyze table t index a") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows()) + tk.MustExec("analyze table t index a, idx, idx2") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) +} + func TestIndexMergeJSONMemberOf(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -149,3 +189,25 @@ func TestMVIndexIndexMergePlanCache(t *testing.T) { tk.MustExec("execute st") tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) } + +func TestMVIndexPointGet(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t(j json, unique kj((cast(j as signed array))))`) + + for _, sql := range []string{ + "select j from t where j=1", + "select j from t where j=1 or j=2", + "select j from t where j in (1, 2)", + } { + plan := tk.MustQuery("explain " + sql).Rows() + hasPointGet := false + for _, line := range plan { + if strings.Contains(strings.ToLower(line[0].(string)), "point") { + hasPointGet = true + } + } + require.True(t, !hasPointGet) // no point-get plan + } +} diff --git a/planner/core/plan_cache_lru.go b/planner/core/plan_cache_lru.go index 062ed6cc13735..ac3c90b269056 100644 --- a/planner/core/plan_cache_lru.go +++ b/planner/core/plan_cache_lru.go @@ -51,9 +51,6 @@ type LRUPlanCache struct { lruList *list.List // lock make cache thread safe lock sync.Mutex - - // pickFromBucket get one element from bucket. The LRUPlanCache can not work if it is nil - pickFromBucket func(map[*list.Element]struct{}, *planCacheMatchOpts) (*list.Element, bool) // onEvict will be called if any eviction happened, only for test use now onEvict func(kvcache.Key, kvcache.Value) @@ -67,21 +64,19 @@ type LRUPlanCache struct { // NewLRUPlanCache creates a PCLRUCache object, whose capacity is "capacity". // NOTE: "capacity" should be a positive value. -func NewLRUPlanCache(capacity uint, guard float64, quota uint64, - pickFromBucket func(map[*list.Element]struct{}, *planCacheMatchOpts) (*list.Element, bool), sctx sessionctx.Context) *LRUPlanCache { +func NewLRUPlanCache(capacity uint, guard float64, quota uint64, sctx sessionctx.Context) *LRUPlanCache { if capacity < 1 { capacity = 100 logutil.BgLogger().Info("capacity of LRU cache is less than 1, will use default value(100) init cache") } return &LRUPlanCache{ - capacity: capacity, - size: 0, - buckets: make(map[string]map[*list.Element]struct{}, 1), //Generally one query has one plan - lruList: list.New(), - pickFromBucket: pickFromBucket, - quota: quota, - guard: guard, - sctx: sctx, + capacity: capacity, + size: 0, + buckets: make(map[string]map[*list.Element]struct{}, 1), //Generally one query has one plan + lruList: list.New(), + quota: quota, + guard: guard, + sctx: sctx, } } @@ -260,7 +255,7 @@ func (l *LRUPlanCache) memoryControl() { } // PickPlanFromBucket pick one plan from bucket -func PickPlanFromBucket(bucket map[*list.Element]struct{}, matchOpts *planCacheMatchOpts) (*list.Element, bool) { +func (l *LRUPlanCache) pickFromBucket(bucket map[*list.Element]struct{}, matchOpts *planCacheMatchOpts) (*list.Element, bool) { for k := range bucket { plan := k.Value.(*planCacheEntry).PlanValue.(*PlanCacheValue) ok1 := plan.matchOpts.paramTypes.CheckTypesCompatibility4PC(matchOpts.paramTypes) diff --git a/planner/core/plan_cache_lru_test.go b/planner/core/plan_cache_lru_test.go index 72e4549b337a9..11b145ef4c372 100644 --- a/planner/core/plan_cache_lru_test.go +++ b/planner/core/plan_cache_lru_test.go @@ -47,11 +47,11 @@ func randomPlanCacheValue(types []*types.FieldType) *PlanCacheValue { func TestLRUPCPut(t *testing.T) { // test initialize - lruA := NewLRUPlanCache(0, 0, 0, PickPlanFromBucket, MockContext()) + lruA := NewLRUPlanCache(0, 0, 0, MockContext()) require.Equal(t, lruA.capacity, uint(100)) maxMemDroppedKv := make(map[kvcache.Key]kvcache.Value) - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, MockContext()) lru.onEvict = func(key kvcache.Key, value kvcache.Value) { maxMemDroppedKv[key] = value } @@ -131,7 +131,7 @@ func TestLRUPCPut(t *testing.T) { } func TestLRUPCGet(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, MockContext()) keys := make([]*planCacheKey, 5) vals := make([]*PlanCacheValue, 5) @@ -185,7 +185,7 @@ func TestLRUPCGet(t *testing.T) { } func TestLRUPCDelete(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, MockContext()) keys := make([]*planCacheKey, 3) vals := make([]*PlanCacheValue, 3) @@ -222,7 +222,7 @@ func TestLRUPCDelete(t *testing.T) { } func TestLRUPCDeleteAll(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, MockContext()) keys := make([]*planCacheKey, 3) vals := make([]*PlanCacheValue, 3) @@ -253,7 +253,7 @@ func TestLRUPCDeleteAll(t *testing.T) { func TestLRUPCSetCapacity(t *testing.T) { maxMemDroppedKv := make(map[kvcache.Key]kvcache.Value) - lru := NewLRUPlanCache(5, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(5, 0, 0, MockContext()) lru.onEvict = func(key kvcache.Key, value kvcache.Value) { maxMemDroppedKv[key] = value } @@ -318,7 +318,7 @@ func TestLRUPCSetCapacity(t *testing.T) { } func TestIssue37914(t *testing.T) { - lru := NewLRUPlanCache(3, 0.1, 1, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0.1, 1, MockContext()) pTypes := []*types.FieldType{types.NewFieldType(mysql.TypeFloat), types.NewFieldType(mysql.TypeDouble)} key := &planCacheKey{database: strconv.FormatInt(int64(1), 10)} @@ -330,7 +330,7 @@ func TestIssue37914(t *testing.T) { } func TestIssue38244(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, MockContext()) require.Equal(t, uint(3), lru.capacity) keys := make([]*planCacheKey, 5) @@ -357,7 +357,7 @@ func TestLRUPlanCacheMemoryUsage(t *testing.T) { pTypes := []*types.FieldType{types.NewFieldType(mysql.TypeFloat), types.NewFieldType(mysql.TypeDouble)} ctx := MockContext() ctx.GetSessionVars().EnablePreparedPlanCacheMemoryMonitor = true - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, ctx) + lru := NewLRUPlanCache(3, 0, 0, ctx) evict := make(map[kvcache.Key]kvcache.Value) lru.onEvict = func(key kvcache.Key, value kvcache.Value) { evict[key] = value diff --git a/planner/core/plan_cache_test.go b/planner/core/plan_cache_test.go index 8acc28b7b0062..9f8ff161fd658 100644 --- a/planner/core/plan_cache_test.go +++ b/planner/core/plan_cache_test.go @@ -78,7 +78,7 @@ func TestInitLRUWithSystemVar(t *testing.T) { tk.MustQuery("select @@session.tidb_prepared_plan_cache_size").Check(testkit.Rows("1")) sessionVar := tk.Session().GetSessionVars() - lru := plannercore.NewLRUPlanCache(uint(sessionVar.PreparedPlanCacheSize), 0, 0, plannercore.PickPlanFromBucket, tk.Session()) + lru := plannercore.NewLRUPlanCache(uint(sessionVar.PreparedPlanCacheSize), 0, 0, tk.Session()) require.NotNil(t, lru) } @@ -503,3 +503,22 @@ func TestPlanCacheWithLimit(t *testing.T) { tk.MustExec("execute stmt using @a") tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip plan-cache: limit count more than 10000")) } + +func TestIssue40679(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int, key(a));") + tk.MustExec("prepare st from 'select * from t use index(a) where a < ?'") + tk.MustExec("set @a1=1.1") + tk.MustExec("execute st using @a1") + + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.True(t, strings.Contains(rows[1][0].(string), "RangeScan")) // RangeScan not FullScan + + tk.MustExec("execute st using @a1") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip plan-cache: '1.1' may be converted to INT")) +} diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 5af0bb004d6c9..d14a6bf51ea49 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -2320,12 +2320,16 @@ func getColOffsetForAnalyze(colsInfo []*model.ColumnInfo, colID int64) int { // in tblInfo.Indices, index.Columns[i].Offset is set according to tblInfo.Columns. Since we decode row samples according to colsInfo rather than tbl.Columns // in the execution phase of ANALYZE, we need to modify index.Columns[i].Offset according to colInfos. // TODO: find a better way to find indexed columns in ANALYZE rather than use IndexColumn.Offset -func getModifiedIndexesInfoForAnalyze(tblInfo *model.TableInfo, allColumns bool, colsInfo []*model.ColumnInfo) []*model.IndexInfo { +func getModifiedIndexesInfoForAnalyze(sctx sessionctx.Context, tblInfo *model.TableInfo, allColumns bool, colsInfo []*model.ColumnInfo) []*model.IndexInfo { idxsInfo := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) for _, originIdx := range tblInfo.Indices { if originIdx.State != model.StatePublic { continue } + if originIdx.MVIndex { + sctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", originIdx.Name.L)) + continue + } if allColumns { // If all the columns need to be analyzed, we don't need to modify IndexColumn.Offset. idxsInfo = append(idxsInfo, originIdx) @@ -2401,7 +2405,7 @@ func (b *PlanBuilder) buildAnalyzeFullSamplingTask( execColsInfo = colsInfo } allColumns := len(tbl.TableInfo.Columns) == len(execColsInfo) - indexes := getModifiedIndexesInfoForAnalyze(tbl.TableInfo, allColumns, execColsInfo) + indexes := getModifiedIndexesInfoForAnalyze(b.ctx, tbl.TableInfo, allColumns, execColsInfo) handleCols := BuildHandleColsForAnalyze(b.ctx, tbl.TableInfo, allColumns, execColsInfo) newTask := AnalyzeColumnsTask{ HandleCols: handleCols, @@ -2631,6 +2635,10 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A commonHandleInfo = idx continue } + if idx.MVIndex { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", idx.Name.L)) + continue + } for i, id := range physicalIDs { if id == tbl.TableInfo.ID { id = -1 @@ -2724,6 +2732,10 @@ func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.A if idx == nil || idx.State != model.StatePublic { return nil, ErrAnalyzeMissIndex.GenWithStackByArgs(idxName.O, tblInfo.Name.O) } + if idx.MVIndex { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", idx.Name.L)) + continue + } for i, id := range physicalIDs { if id == tblInfo.ID { id = -1 @@ -2766,6 +2778,11 @@ func (b *PlanBuilder) buildAnalyzeAllIndex(as *ast.AnalyzeTableStmt, opts map[as } for _, idx := range tblInfo.Indices { if idx.State == model.StatePublic { + if idx.MVIndex { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", idx.Name.L)) + continue + } + for i, id := range physicalIDs { if id == tblInfo.ID { id = -1 diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 0453d29c59842..1b285a83a1596 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -731,7 +731,7 @@ func newBatchPointGetPlan( } } for _, idxInfo := range tbl.Indices { - if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || + if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || idxInfo.MVIndex || !indexIsAvailableByHints(idxInfo, indexHints) { continue } @@ -1099,7 +1099,7 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt, check bool var err error for _, idxInfo := range tbl.Indices { - if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || + if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || idxInfo.MVIndex || !indexIsAvailableByHints(idxInfo, tblName.IndexHints) { continue } diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index 656aed73ca189..c217cafbdb242 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -1339,7 +1339,7 @@ func TestPlanCacheSwitchDB(t *testing.T) { // DB is not specified se2, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: core.NewLRUPlanCache(100, 0.1, math.MaxUint64, core.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: core.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk2 := testkit.NewTestKitWithSession(t, store, se2) diff --git a/resourcemanager/pooltask/BUILD.bazel b/resourcemanager/pooltask/BUILD.bazel index 151a0ddfdec02..c4113b69dd141 100644 --- a/resourcemanager/pooltask/BUILD.bazel +++ b/resourcemanager/pooltask/BUILD.bazel @@ -5,6 +5,8 @@ go_library( srcs = [ "task.go", "task_manager.go", + "task_manager_iterator.go", + "task_manager_scheduler.go", ], importpath = "github.com/pingcap/tidb/resourcemanager/pooltask", visibility = ["//visibility:public"], diff --git a/resourcemanager/pooltask/task_manager.go b/resourcemanager/pooltask/task_manager.go index 25ce9e8ad1b4b..66d6451b163ba 100644 --- a/resourcemanager/pooltask/task_manager.go +++ b/resourcemanager/pooltask/task_manager.go @@ -32,29 +32,29 @@ type tContainer[T any, U any, C any, CT any, TF Context[CT]] struct { task *TaskBox[T, U, C, CT, TF] } -type meta struct { - stats *list.List - createTS time.Time - origin int32 - running int32 +type meta[T any, U any, C any, CT any, TF Context[CT]] struct { + stats *list.List + createTS time.Time + initialConcurrency int32 + running atomic.Int32 } -func newStats(concurrency int32) *meta { - s := &meta{ - createTS: time.Now(), - stats: list.New(), - origin: concurrency, +func newStats[T any, U any, C any, CT any, TF Context[CT]](concurrency int32) *meta[T, U, C, CT, TF] { + s := &meta[T, U, C, CT, TF]{ + createTS: time.Now(), + stats: list.New(), + initialConcurrency: concurrency, } return s } -func (m *meta) getOriginConcurrency() int32 { - return m.origin +func (m *meta[T, U, C, CT, TF]) getOriginConcurrency() int32 { + return m.initialConcurrency } // TaskStatusContainer is a container that can control or watch the pool. type TaskStatusContainer[T any, U any, C any, CT any, TF Context[CT]] struct { - stats map[uint64]*meta + stats map[uint64]*meta[T, U, C, CT, TF] rw sync.RWMutex } @@ -70,7 +70,7 @@ func NewTaskManager[T any, U any, C any, CT any, TF Context[CT]](c int32) TaskMa task := make([]TaskStatusContainer[T, U, C, CT, TF], shard) for i := 0; i < shard; i++ { task[i] = TaskStatusContainer[T, U, C, CT, TF]{ - stats: make(map[uint64]*meta), + stats: make(map[uint64]*meta[T, U, C, CT, TF]), } } return TaskManager[T, U, C, CT, TF]{ @@ -83,7 +83,7 @@ func NewTaskManager[T any, U any, C any, CT any, TF Context[CT]](c int32) TaskMa func (t *TaskManager[T, U, C, CT, TF]) RegisterTask(taskID uint64, concurrency int32) { id := getShardID(taskID) t.task[id].rw.Lock() - t.task[id].stats[taskID] = newStats(concurrency) + t.task[id].stats[taskID] = newStats[T, U, C, CT, TF](concurrency) t.task[id].rw.Unlock() } @@ -113,7 +113,7 @@ func (t *TaskManager[T, U, C, CT, TF]) AddSubTask(taskID uint64, task *TaskBox[T t.running.Inc() t.task[shardID].rw.Lock() t.task[shardID].stats[taskID].stats.PushBack(tc) - t.task[shardID].stats[taskID].running++ // running job in this task + t.task[shardID].stats[taskID].running.Inc() // running job in this task t.task[shardID].rw.Unlock() } @@ -122,7 +122,7 @@ func (t *TaskManager[T, U, C, CT, TF]) ExitSubTask(taskID uint64) { shardID := getShardID(taskID) t.running.Dec() // total running tasks t.task[shardID].rw.Lock() - t.task[shardID].stats[taskID].running-- // running job in this task + t.task[shardID].stats[taskID].running.Dec() // running job in this task t.task[shardID].rw.Unlock() } @@ -131,7 +131,7 @@ func (t *TaskManager[T, U, C, CT, TF]) Running(taskID uint64) int32 { shardID := getShardID(taskID) t.task[shardID].rw.Lock() defer t.task[shardID].rw.Unlock() - return t.task[shardID].stats[taskID].running + return t.task[shardID].stats[taskID].running.Load() } // StopTask is to stop a task by TaskID. diff --git a/resourcemanager/pooltask/task_manager_iterator.go b/resourcemanager/pooltask/task_manager_iterator.go new file mode 100644 index 0000000000000..ada5994599ff5 --- /dev/null +++ b/resourcemanager/pooltask/task_manager_iterator.go @@ -0,0 +1,131 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pooltask + +import ( + "container/list" + "time" +) + +func (t *TaskManager[T, U, C, CT, TF]) getBoostTask() (tid uint64, result *TaskBox[T, U, C, CT, TF]) { + // boost task + // 1、the count of running task is less than concurrency + // 2、less run time, more possible to boost + tid, element := t.iter(canBoost[T, U, C, CT, TF]) + if element != nil { + return tid, element.Value.(tContainer[T, U, C, CT, TF]).task + } + return 0, nil +} + +func (t *TaskManager[T, U, C, CT, TF]) pauseTask() { + // pause task, + // 1、more run time, more possible to pause + // 2、if task have been boosted, first to pause. + tid, result := t.iter(canPause[T, U, C, CT, TF]) + if result != nil { + result.Value.(tContainer[T, U, C, CT, TF]).task.status.CompareAndSwap(RunningTask, StopTask) + // delete it from list + shardID := getShardID(tid) + t.task[shardID].rw.Lock() + defer t.task[shardID].rw.Unlock() + t.task[shardID].stats[tid].stats.Remove(result) + } +} + +func (t *TaskManager[T, U, C, CT, TF]) iter(fn func(m *meta[T, U, C, CT, TF], max time.Time) (*list.Element, bool)) (tid uint64, result *list.Element) { + var compareTS time.Time + for i := 0; i < shard; i++ { + breakFind := func(index int) (breakFind bool) { + t.task[i].rw.RLock() + defer t.task[i].rw.RUnlock() + for id, stats := range t.task[i].stats { + if result == nil { + result = findTask[T, U, C, CT, TF](stats, RunningTask) + tid = id + compareTS = stats.createTS + continue + } + newResult, pauseFind := fn(stats, compareTS) + if pauseFind { + result = newResult + tid = id + compareTS = stats.createTS + return true + } + if newResult != nil { + result = newResult + tid = id + compareTS = stats.createTS + } + } + return false + }(shard) + if breakFind { + break + } + } + return tid, result +} + +func canPause[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF], max time.Time) (result *list.Element, isBreak bool) { + if m.initialConcurrency < m.running.Load() { + box := findTask[T, U, C, CT, TF](m, RunningTask) + if box != nil { + return box, true + } + } + if m.createTS.Before(max) { + box := findTask[T, U, C, CT, TF](m, RunningTask) + if box != nil { + return box, false + } + } + return nil, false +} + +func canBoost[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF], min time.Time) (result *list.Element, isBreak bool) { + if m.running.Load() < m.initialConcurrency { + box := getTask[T, U, C, CT, TF](m) + if box != nil { + return box, true + } + } + if m.createTS.After(min) { + box := getTask[T, U, C, CT, TF](m) + if box != nil { + return box, false + } + } + return nil, false +} + +func findTask[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF], status int32) *list.Element { + for e := m.stats.Front(); e != nil; e = e.Next() { + box := e.Value.(tContainer[T, U, C, CT, TF]) + if box.task.status.Load() == status { + return e + } + } + return nil +} + +func getTask[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF]) *list.Element { + e := m.stats.Front() + if e != nil { + return e + } + return nil +} diff --git a/resourcemanager/pooltask/task_manager_scheduler.go b/resourcemanager/pooltask/task_manager_scheduler.go new file mode 100644 index 0000000000000..dcc158df06d82 --- /dev/null +++ b/resourcemanager/pooltask/task_manager_scheduler.go @@ -0,0 +1,28 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pooltask + +// Overclock is to increase the concurrency of pool. +func (t *TaskManager[T, U, C, CT, TF]) Overclock() (tid uint64, task *TaskBox[T, U, C, CT, TF]) { + if t.concurrency > t.running.Load() { + return t.getBoostTask() + } + return 0, nil +} + +// Downclock is to decrease the concurrency of pool. +func (t *TaskManager[T, U, C, CT, TF]) Downclock() { + t.pauseTask() +} diff --git a/resourcemanager/rm.go b/resourcemanager/rm.go index e6e48de2059cd..025eb0fcbc129 100644 --- a/resourcemanager/rm.go +++ b/resourcemanager/rm.go @@ -78,7 +78,7 @@ func (r *ResourceManager) Stop() { } // Register is to register pool into resource manager -func (r *ResourceManager) Register(pool util.GorotinuePool, name string, component util.Component) error { +func (r *ResourceManager) Register(pool util.GoroutinePool, name string, component util.Component) error { p := util.PoolContainer{Pool: pool, Component: component} return r.registerPool(name, &p) } diff --git a/resourcemanager/schedule.go b/resourcemanager/schedule.go index f6ac691e09b15..50a5f54697800 100644 --- a/resourcemanager/schedule.go +++ b/resourcemanager/schedule.go @@ -52,14 +52,14 @@ func (*ResourceManager) exec(pool *util.PoolContainer, cmd scheduler.Command) { switch cmd { case scheduler.Downclock: concurrency := con - 1 - log.Info("downclock goroutine pool", + log.Info("[resource manager] downclock goroutine pool", zap.Int("origin concurrency", con), zap.Int("concurrency", concurrency), zap.String("name", pool.Pool.Name())) pool.Pool.Tune(concurrency) case scheduler.Overclock: concurrency := con + 1 - log.Info("overclock goroutine pool", + log.Info("[resource manager] overclock goroutine pool", zap.Int("origin concurrency", con), zap.Int("concurrency", concurrency), zap.String("name", pool.Pool.Name())) diff --git a/resourcemanager/scheduler/cpu_scheduler.go b/resourcemanager/scheduler/cpu_scheduler.go index c84fcf36fb697..14338d80683d4 100644 --- a/resourcemanager/scheduler/cpu_scheduler.go +++ b/resourcemanager/scheduler/cpu_scheduler.go @@ -30,7 +30,7 @@ func NewCPUScheduler() *CPUScheduler { } // Tune is to tune the goroutine pool -func (*CPUScheduler) Tune(_ util.Component, pool util.GorotinuePool) Command { +func (*CPUScheduler) Tune(_ util.Component, pool util.GoroutinePool) Command { if time.Since(pool.LastTunerTs()) < util.MinSchedulerInterval.Load() { return Hold } diff --git a/resourcemanager/scheduler/scheduler.go b/resourcemanager/scheduler/scheduler.go index 3af8e6aff5b0b..521536a741dee 100644 --- a/resourcemanager/scheduler/scheduler.go +++ b/resourcemanager/scheduler/scheduler.go @@ -32,5 +32,5 @@ const ( // Scheduler is a scheduler interface type Scheduler interface { - Tune(component util.Component, p util.GorotinuePool) Command + Tune(component util.Component, p util.GoroutinePool) Command } diff --git a/resourcemanager/util/util.go b/resourcemanager/util/util.go index 4d433975fabb7..6d1959bd08904 100644 --- a/resourcemanager/util/util.go +++ b/resourcemanager/util/util.go @@ -25,8 +25,8 @@ var ( MinSchedulerInterval = atomic.NewDuration(200 * time.Millisecond) ) -// GorotinuePool is a pool interface -type GorotinuePool interface { +// GoroutinePool is a pool interface +type GoroutinePool interface { ReleaseAndWait() Tune(size int) LastTunerTs() time.Time @@ -37,7 +37,7 @@ type GorotinuePool interface { // PoolContainer is a pool container type PoolContainer struct { - Pool GorotinuePool + Pool GoroutinePool Component Component } diff --git a/session/bootstrap.go b/session/bootstrap.go index 74eecb28a68a4..ed65bb0720cf0 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -516,6 +516,27 @@ const ( created_time timestamp NOT NULL, primary key(job_id, scan_id), key(created_time));` + + // CreateTTLJobHistory is a table that stores ttl job's history + CreateTTLJobHistory = `CREATE TABLE IF NOT EXISTS mysql.tidb_ttl_job_history ( + job_id varchar(64) PRIMARY KEY, + table_id bigint(64) NOT NULL, + parent_table_id bigint(64) NOT NULL, + table_schema varchar(64) NOT NULL, + table_name varchar(64) NOT NULL, + partition_name varchar(64) DEFAULT NULL, + create_time timestamp NOT NULL, + finish_time timestamp NOT NULL, + ttl_expire timestamp NOT NULL, + summary_text text, + expired_rows bigint(64) DEFAULT NULL, + deleted_rows bigint(64) DEFAULT NULL, + error_delete_rows bigint(64) DEFAULT NULL, + status varchar(64) NOT NULL, + key(table_schema, table_name, create_time), + key(parent_table_id, create_time), + key(create_time) + );` ) // bootstrap initiates system DB for a store. @@ -757,7 +778,7 @@ const ( version109 = 109 // version110 sets tidb_enable_gc_aware_memory_track to off when a cluster upgrades from some version lower than v6.5.0. version110 = 110 - // version111 adds the table tidb_ttl_task + // version111 adds the table tidb_ttl_task and tidb_ttl_job_history version111 = 111 ) @@ -2239,6 +2260,7 @@ func upgradeToVer111(s Session, ver int64) { return } doReentrantDDL(s, CreateTTLTask) + doReentrantDDL(s, CreateTTLJobHistory) } func writeOOMAction(s Session) { @@ -2349,6 +2371,8 @@ func doDDLWorks(s Session) { mustExecute(s, CreateTTLTableStatus) // Create tidb_ttl_task table mustExecute(s, CreateTTLTask) + // Create tidb_ttl_job_history table + mustExecute(s, CreateTTLJobHistory) } // doBootstrapSQLFile executes SQL commands in a file as the last stage of bootstrap. diff --git a/session/session.go b/session/session.go index 7e62b74d53a64..11226483660bb 100644 --- a/session/session.go +++ b/session/session.go @@ -465,8 +465,7 @@ func (s *session) GetPlanCache(isNonPrepared bool) sessionctx.PlanCache { } if s.nonPreparedPlanCache == nil { // lazy construction s.nonPreparedPlanCache = plannercore.NewLRUPlanCache(uint(s.GetSessionVars().NonPreparedPlanCacheSize), - variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), - plannercore.PickPlanFromBucket, s) + variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), s) } return s.nonPreparedPlanCache } @@ -477,8 +476,7 @@ func (s *session) GetPlanCache(isNonPrepared bool) sessionctx.PlanCache { } if s.preparedPlanCache == nil { // lazy construction s.preparedPlanCache = plannercore.NewLRUPlanCache(uint(s.GetSessionVars().PreparedPlanCacheSize), - variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), - plannercore.PickPlanFromBucket, s) + variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), s) } return s.preparedPlanCache } diff --git a/sessionctx/variable/featuretag/distributereorg/BUILD.bazel b/sessionctx/variable/featuretag/distributereorg/BUILD.bazel index 153ce052ecbb2..f31f9ddd6e2d8 100644 --- a/sessionctx/variable/featuretag/distributereorg/BUILD.bazel +++ b/sessionctx/variable/featuretag/distributereorg/BUILD.bazel @@ -4,7 +4,7 @@ go_library( name = "distributereorg", srcs = [ "default.go", - "non_default.go", + "non_default.go", #keep ], importpath = "github.com/pingcap/tidb/sessionctx/variable/featuretag/distributereorg", visibility = ["//visibility:public"], diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 3f5ba31412074..af9711cdca1da 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1188,12 +1188,29 @@ var defaultSysVars = []*SysVar{ /* The system variables below have GLOBAL and SESSION scope */ {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePlanReplayerContinuesCapture, Value: BoolToOnOff(false), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + historicalStatsEnabled, err := s.GlobalVarsAccessor.GetGlobalSysVar(TiDBEnableHistoricalStats) + if err != nil { + return err + } + if !TiDBOptOn(historicalStatsEnabled) && TiDBOptOn(val) { + return errors.Errorf("%v should be enabled before enabling %v", TiDBEnableHistoricalStats, TiDBEnablePlanReplayerContinuesCapture) + } s.EnablePlanReplayedContinuesCapture = TiDBOptOn(val) return nil }, GetSession: func(vars *SessionVars) (string, error) { return BoolToOnOff(vars.EnablePlanReplayedContinuesCapture), nil }, + Validation: func(vars *SessionVars, s string, s2 string, flag ScopeFlag) (string, error) { + historicalStatsEnabled, err := vars.GlobalVarsAccessor.GetGlobalSysVar(TiDBEnableHistoricalStats) + if err != nil { + return "", err + } + if !TiDBOptOn(historicalStatsEnabled) && TiDBOptOn(s) { + return "", errors.Errorf("%v should be enabled before enabling %v", TiDBEnableHistoricalStats, TiDBEnablePlanReplayerContinuesCapture) + } + return s, nil + }, }, {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePlanReplayerCapture, Value: BoolToOnOff(true), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { diff --git a/statistics/BUILD.bazel b/statistics/BUILD.bazel index e6992020197c3..8dccd523fc887 100644 --- a/statistics/BUILD.bazel +++ b/statistics/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "fmsketch.go", "histogram.go", "index.go", + "interact_with_storage.go", "merge_worker.go", "row_sampler.go", "sample.go", diff --git a/statistics/handle/dump.go b/statistics/handle/dump.go index 81e982881ee83..da8603ea90573 100644 --- a/statistics/handle/dump.go +++ b/statistics/handle/dump.go @@ -263,7 +263,7 @@ func (h *Handle) tableHistoricalStatsToJSON(physicalID int64, snapshot uint64) ( }() // get meta version - rows, _, err := reader.read("select distinct version from mysql.stats_meta_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) + rows, _, err := reader.Read("select distinct version from mysql.stats_meta_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) if err != nil { return nil, errors.AddStack(err) } @@ -272,14 +272,14 @@ func (h *Handle) tableHistoricalStatsToJSON(physicalID int64, snapshot uint64) ( } statsMetaVersion := rows[0].GetInt64(0) // get stats meta - rows, _, err = reader.read("select modify_count, count from mysql.stats_meta_history where table_id = %? and version = %?", physicalID, statsMetaVersion) + rows, _, err = reader.Read("select modify_count, count from mysql.stats_meta_history where table_id = %? and version = %?", physicalID, statsMetaVersion) if err != nil { return nil, errors.AddStack(err) } modifyCount, count := rows[0].GetInt64(0), rows[0].GetInt64(1) // get stats version - rows, _, err = reader.read("select distinct version from mysql.stats_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) + rows, _, err = reader.Read("select distinct version from mysql.stats_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) if err != nil { return nil, errors.AddStack(err) } @@ -289,7 +289,7 @@ func (h *Handle) tableHistoricalStatsToJSON(physicalID int64, snapshot uint64) ( statsVersion := rows[0].GetInt64(0) // get stats - rows, _, err = reader.read("select stats_data from mysql.stats_history where table_id = %? and version = %? order by seq_no", physicalID, statsVersion) + rows, _, err = reader.Read("select stats_data from mysql.stats_history where table_id = %? and version = %? order by seq_no", physicalID, statsVersion) if err != nil { return nil, errors.AddStack(err) } diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index fc4f86dc54fb8..fd4b32fdb3c10 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -1067,7 +1067,7 @@ func (h *Handle) LoadNeededHistograms() (err error) { return nil } -func (h *Handle) loadNeededColumnHistograms(reader *statsReader, col model.TableItemID, loadFMSketch bool) (err error) { +func (h *Handle) loadNeededColumnHistograms(reader *statistics.StatsReader, col model.TableItemID, loadFMSketch bool) (err error) { oldCache := h.statsCache.Load().(statsCache) tbl, ok := oldCache.Get(col.TableID) if !ok { @@ -1093,7 +1093,7 @@ func (h *Handle) loadNeededColumnHistograms(reader *statsReader, col model.Table return errors.Trace(err) } } - rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where is_index = 0 and table_id = %? and hist_id = %?", col.TableID, col.ID) + rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where is_index = 0 and table_id = %? and hist_id = %?", col.TableID, col.ID) if err != nil { return errors.Trace(err) } @@ -1134,7 +1134,7 @@ func (h *Handle) loadNeededColumnHistograms(reader *statsReader, col model.Table return nil } -func (h *Handle) loadNeededIndexHistograms(reader *statsReader, idx model.TableItemID, loadFMSketch bool) (err error) { +func (h *Handle) loadNeededIndexHistograms(reader *statistics.StatsReader, idx model.TableItemID, loadFMSketch bool) (err error) { oldCache := h.statsCache.Load().(statsCache) tbl, ok := oldCache.Get(idx.TableID) if !ok { @@ -1160,7 +1160,7 @@ func (h *Handle) loadNeededIndexHistograms(reader *statsReader, idx model.TableI return errors.Trace(err) } } - rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where is_index = 1 and table_id = %? and hist_id = %?", idx.TableID, idx.ID) + rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where is_index = 1 and table_id = %? and hist_id = %?", idx.TableID, idx.ID) if err != nil { return errors.Trace(err) } @@ -1214,12 +1214,12 @@ func (h *Handle) FlushStats() { } } -func (h *Handle) cmSketchAndTopNFromStorage(reader *statsReader, tblID int64, isIndex, histID int64) (_ *statistics.CMSketch, _ *statistics.TopN, err error) { - topNRows, _, err := reader.read("select HIGH_PRIORITY value, count from mysql.stats_top_n where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) +func (h *Handle) cmSketchAndTopNFromStorage(reader *statistics.StatsReader, tblID int64, isIndex, histID int64) (_ *statistics.CMSketch, _ *statistics.TopN, err error) { + topNRows, _, err := reader.Read("select HIGH_PRIORITY value, count from mysql.stats_top_n where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) if err != nil { return nil, nil, err } - rows, _, err := reader.read("select cm_sketch from mysql.stats_histograms where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) + rows, _, err := reader.Read("select cm_sketch from mysql.stats_histograms where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) if err != nil { return nil, nil, err } @@ -1229,15 +1229,15 @@ func (h *Handle) cmSketchAndTopNFromStorage(reader *statsReader, tblID int64, is return statistics.DecodeCMSketchAndTopN(rows[0].GetBytes(0), topNRows) } -func (h *Handle) fmSketchFromStorage(reader *statsReader, tblID int64, isIndex, histID int64) (_ *statistics.FMSketch, err error) { - rows, _, err := reader.read("select value from mysql.stats_fm_sketch where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) +func (h *Handle) fmSketchFromStorage(reader *statistics.StatsReader, tblID int64, isIndex, histID int64) (_ *statistics.FMSketch, err error) { + rows, _, err := reader.Read("select value from mysql.stats_fm_sketch where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) if err != nil || len(rows) == 0 { return nil, err } return statistics.DecodeFMSketch(rows[0].GetBytes(0)) } -func (h *Handle) indexStatsFromStorage(reader *statsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo) error { +func (h *Handle) indexStatsFromStorage(reader *statistics.StatsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo) error { histID := row.GetInt64(2) distinct := row.GetInt64(3) histVer := row.GetUint64(4) @@ -1247,7 +1247,7 @@ func (h *Handle) indexStatsFromStorage(reader *statsReader, row chunk.Row, table errorRate := statistics.ErrorRate{} flag := row.GetInt64(8) lastAnalyzePos := row.GetDatum(10, types.NewFieldType(mysql.TypeBlob)) - if statistics.IsAnalyzed(flag) && !reader.isHistory() { + if statistics.IsAnalyzed(flag) && !reader.IsHistory() { h.mu.rateMap.clear(table.PhysicalID, histID, true) } else if idx != nil { errorRate = idx.ErrorRate @@ -1295,7 +1295,7 @@ func (h *Handle) indexStatsFromStorage(reader *statsReader, row chunk.Row, table return nil } -func (h *Handle) columnStatsFromStorage(reader *statsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo, loadAll bool) error { +func (h *Handle) columnStatsFromStorage(reader *statistics.StatsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo, loadAll bool) error { histID := row.GetInt64(2) distinct := row.GetInt64(3) histVer := row.GetUint64(4) @@ -1307,7 +1307,7 @@ func (h *Handle) columnStatsFromStorage(reader *statsReader, row chunk.Row, tabl col := table.Columns[histID] errorRate := statistics.ErrorRate{} flag := row.GetInt64(8) - if statistics.IsAnalyzed(flag) && !reader.isHistory() { + if statistics.IsAnalyzed(flag) && !reader.IsHistory() { h.mu.rateMap.clear(table.PhysicalID, histID, false) } else if col != nil { errorRate = col.ErrorRate @@ -1439,14 +1439,14 @@ func (h *Handle) TableStatsFromStorage(tableInfo *model.TableInfo, physicalID in } table.Pseudo = false - rows, _, err := reader.read("select modify_count, count from mysql.stats_meta where table_id = %?", physicalID) + rows, _, err := reader.Read("select modify_count, count from mysql.stats_meta where table_id = %?", physicalID) if err != nil || len(rows) == 0 { return nil, err } table.ModifyCount = rows[0].GetInt64(0) table.Count = rows[0].GetInt64(1) - rows, _, err = reader.read("select table_id, is_index, hist_id, distinct_count, version, null_count, tot_col_size, stats_ver, flag, correlation, last_analyze_pos from mysql.stats_histograms where table_id = %?", physicalID) + rows, _, err = reader.Read("select table_id, is_index, hist_id, distinct_count, version, null_count, tot_col_size, stats_ver, flag, correlation, last_analyze_pos from mysql.stats_histograms where table_id = %?", physicalID) // Check deleted table. if err != nil || len(rows) == 0 { return nil, nil @@ -1464,7 +1464,7 @@ func (h *Handle) TableStatsFromStorage(tableInfo *model.TableInfo, physicalID in return h.extendedStatsFromStorage(reader, table, physicalID, loadAll) } -func (h *Handle) extendedStatsFromStorage(reader *statsReader, table *statistics.Table, physicalID int64, loadAll bool) (*statistics.Table, error) { +func (h *Handle) extendedStatsFromStorage(reader *statistics.StatsReader, table *statistics.Table, physicalID int64, loadAll bool) (*statistics.Table, error) { failpoint.Inject("injectExtStatsLoadErr", func() { failpoint.Return(nil, errors.New("gofail extendedStatsFromStorage error")) }) @@ -1474,7 +1474,7 @@ func (h *Handle) extendedStatsFromStorage(reader *statsReader, table *statistics } else { table.ExtendedStats = statistics.NewExtendedStatsColl() } - rows, _, err := reader.read("select name, status, type, column_ids, stats, version from mysql.stats_extended where table_id = %? and status in (%?, %?, %?) and version > %?", physicalID, StatsStatusInited, StatsStatusAnalyzed, StatsStatusDeleted, lastVersion) + rows, _, err := reader.Read("select name, status, type, column_ids, stats, version from mysql.stats_extended where table_id = %? and status in (%?, %?, %?) and version > %?", physicalID, StatsStatusInited, StatsStatusAnalyzed, StatsStatusDeleted, lastVersion) if err != nil || len(rows) == 0 { return table, nil } @@ -1525,7 +1525,7 @@ func (h *Handle) StatsMetaCountAndModifyCount(tableID int64) (int64, int64, erro err = err1 } }() - rows, _, err := reader.read("select count, modify_count from mysql.stats_meta where table_id = %?", tableID) + rows, _, err := reader.Read("select count, modify_count from mysql.stats_meta where table_id = %?", tableID) if err != nil { return 0, 0, err } @@ -1634,6 +1634,7 @@ func SaveTableStatsToStorage(sctx sessionctx.Context, results *statistics.Analyz logutil.BgLogger().Error("record historical stats meta failed", zap.Int64("table-id", tableID), zap.Uint64("version", statsVer), + zap.String("source", source), zap.Error(err1)) } } @@ -1913,8 +1914,8 @@ func (h *Handle) SaveMetaToStorage(tableID, count, modifyCount int64, source str return err } -func (h *Handle) histogramFromStorage(reader *statsReader, tableID int64, colID int64, tp *types.FieldType, distinct int64, isIndex int, ver uint64, nullCount int64, totColSize int64, corr float64) (_ *statistics.Histogram, err error) { - rows, fields, err := reader.read("select count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where table_id = %? and is_index = %? and hist_id = %? order by bucket_id", tableID, isIndex, colID) +func (h *Handle) histogramFromStorage(reader *statistics.StatsReader, tableID int64, colID int64, tp *types.FieldType, distinct int64, isIndex int, ver uint64, nullCount int64, totColSize int64, corr float64) (_ *statistics.Histogram, err error) { + rows, fields, err := reader.Read("select count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where table_id = %? and is_index = %? and hist_id = %? order by bucket_id", tableID, isIndex, colID) if err != nil { return nil, errors.Trace(err) } @@ -1961,9 +1962,9 @@ func (h *Handle) histogramFromStorage(reader *statsReader, tableID int64, colID return hg, nil } -func (h *Handle) columnCountFromStorage(reader *statsReader, tableID, colID, statsVer int64) (int64, error) { +func (h *Handle) columnCountFromStorage(reader *statistics.StatsReader, tableID, colID, statsVer int64) (int64, error) { count := int64(0) - rows, _, err := reader.read("select sum(count) from mysql.stats_buckets where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) + rows, _, err := reader.Read("select sum(count) from mysql.stats_buckets where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) if err != nil { return 0, errors.Trace(err) } @@ -1979,7 +1980,7 @@ func (h *Handle) columnCountFromStorage(reader *statsReader, tableID, colID, sta // Before stats ver 2, histogram represents all data in this column. // In stats ver 2, histogram + TopN represent all data in this column. // So we need to add TopN total count here. - rows, _, err = reader.read("select sum(count) from mysql.stats_top_n where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) + rows, _, err = reader.Read("select sum(count) from mysql.stats_top_n where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) if err != nil { return 0, errors.Trace(err) } @@ -2014,26 +2015,7 @@ func (h *Handle) statsMetaByTableIDFromStorage(tableID int64, snapshot uint64) ( return } -// statsReader is used for simplify code that needs to read system tables in different sqls -// but requires the same transactions. -type statsReader struct { - ctx sqlexec.RestrictedSQLExecutor - snapshot uint64 -} - -func (sr *statsReader) read(sql string, args ...interface{}) (rows []chunk.Row, fields []*ast.ResultField, err error) { - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) - if sr.snapshot > 0 { - return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool, sqlexec.ExecOptionWithSnapshot(sr.snapshot)}, sql, args...) - } - return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, args...) -} - -func (sr *statsReader) isHistory() bool { - return sr.snapshot > 0 -} - -func (h *Handle) getGlobalStatsReader(snapshot uint64) (reader *statsReader, err error) { +func (h *Handle) getGlobalStatsReader(snapshot uint64) (reader *statistics.StatsReader, err error) { h.mu.Lock() defer func() { if r := recover(); r != nil { @@ -2043,44 +2025,12 @@ func (h *Handle) getGlobalStatsReader(snapshot uint64) (reader *statsReader, err h.mu.Unlock() } }() - return h.getStatsReader(snapshot, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) + return statistics.GetStatsReader(snapshot, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) } -func (h *Handle) releaseGlobalStatsReader(reader *statsReader) error { +func (h *Handle) releaseGlobalStatsReader(reader *statistics.StatsReader) error { defer h.mu.Unlock() - return h.releaseStatsReader(reader, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) -} - -func (h *Handle) getStatsReader(snapshot uint64, exec sqlexec.RestrictedSQLExecutor) (reader *statsReader, err error) { - failpoint.Inject("mockGetStatsReaderFail", func(val failpoint.Value) { - if val.(bool) { - failpoint.Return(nil, errors.New("gofail genStatsReader error")) - } - }) - if snapshot > 0 { - return &statsReader{ctx: exec, snapshot: snapshot}, nil - } - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("getStatsReader panic %v", r) - } - }() - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) - failpoint.Inject("mockGetStatsReaderPanic", nil) - _, err = exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") - if err != nil { - return nil, err - } - return &statsReader{ctx: exec}, nil -} - -func (h *Handle) releaseStatsReader(reader *statsReader, exec sqlexec.RestrictedSQLExecutor) error { - if reader.snapshot > 0 { - return nil - } - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) - _, err := exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") - return err + return reader.Close() } const ( diff --git a/statistics/handle/handle_hist.go b/statistics/handle/handle_hist.go index ad04d946e3f22..1d41e14791446 100644 --- a/statistics/handle/handle_hist.go +++ b/statistics/handle/handle_hist.go @@ -177,7 +177,7 @@ var errExit = errors.New("Stop loading since domain is closed") // StatsReaderContext exported for testing type StatsReaderContext struct { - reader *statsReader + reader *statistics.StatsReader createdTime time.Time } @@ -188,7 +188,7 @@ func (h *Handle) SubLoadWorker(ctx sessionctx.Context, exit chan struct{}, exitW exitWg.Done() logutil.BgLogger().Info("SubLoadWorker exited.") if readerCtx.reader != nil { - err := h.releaseStatsReader(readerCtx.reader, ctx.(sqlexec.RestrictedSQLExecutor)) + err := readerCtx.reader.Close() if err != nil { logutil.BgLogger().Error("Fail to release stats loader: ", zap.Error(err)) } @@ -295,13 +295,13 @@ func (h *Handle) handleOneItemTask(task *NeededItemTask, readerCtx *StatsReaderC func (h *Handle) loadFreshStatsReader(readerCtx *StatsReaderContext, ctx sqlexec.RestrictedSQLExecutor) { if readerCtx.reader == nil || readerCtx.createdTime.Add(h.Lease()).Before(time.Now()) { if readerCtx.reader != nil { - err := h.releaseStatsReader(readerCtx.reader, ctx) + err := readerCtx.reader.Close() if err != nil { logutil.BgLogger().Warn("Fail to release stats loader: ", zap.Error(err)) } } for { - newReader, err := h.getStatsReader(0, ctx) + newReader, err := statistics.GetStatsReader(0, ctx) if err != nil { logutil.BgLogger().Error("Fail to new stats loader, retry after a while.", zap.Error(err)) time.Sleep(h.Lease() / 10) @@ -317,7 +317,7 @@ func (h *Handle) loadFreshStatsReader(readerCtx *StatsReaderContext, ctx sqlexec } // readStatsForOneItem reads hist for one column/index, TODO load data via kv-get asynchronously -func (h *Handle) readStatsForOneItem(item model.TableItemID, w *statsWrapper, reader *statsReader) (*statsWrapper, error) { +func (h *Handle) readStatsForOneItem(item model.TableItemID, w *statsWrapper, reader *statistics.StatsReader) (*statsWrapper, error) { failpoint.Inject("mockReadStatsForOnePanic", nil) failpoint.Inject("mockReadStatsForOneFail", func(val failpoint.Value) { if val.(bool) { @@ -357,7 +357,7 @@ func (h *Handle) readStatsForOneItem(item model.TableItemID, w *statsWrapper, re return nil, errors.Trace(err) } } - rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where table_id = %? and hist_id = %? and is_index = %?", item.TableID, item.ID, int(isIndexFlag)) + rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where table_id = %? and hist_id = %? and is_index = %?", item.TableID, item.ID, int(isIndexFlag)) if err != nil { return nil, errors.Trace(err) } diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index 2b0669033f8c9..dc399a87fcad3 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -622,16 +622,16 @@ func TestLoadStats(t *testing.T) { require.True(t, idx.IsFullLoad()) // Following test tests whether the LoadNeededHistograms would panic. - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderFail", `return(true)`)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/mockGetStatsReaderFail", `return(true)`)) err = h.LoadNeededHistograms() require.Error(t, err) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderFail")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/mockGetStatsReaderFail")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderPanic", "panic")) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/mockGetStatsReaderPanic", "panic")) err = h.LoadNeededHistograms() require.Error(t, err) require.Regexp(t, ".*getStatsReader panic.*", err.Error()) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderPanic")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/mockGetStatsReaderPanic")) err = h.LoadNeededHistograms() require.NoError(t, err) } diff --git a/statistics/handle/historical_stats_handler.go b/statistics/handle/historical_stats_handler.go index c7a683da8b740..87b94b656899a 100644 --- a/statistics/handle/historical_stats_handler.go +++ b/statistics/handle/historical_stats_handler.go @@ -86,6 +86,7 @@ func (h *Handle) recordHistoricalStatsMeta(tableID int64, version uint64, source logutil.BgLogger().Error("record historical stats meta failed", zap.Int64("table-id", tableID), zap.Uint64("version", version), + zap.String("source", source), zap.Error(err)) } } diff --git a/statistics/interact_with_storage.go b/statistics/interact_with_storage.go new file mode 100644 index 0000000000000..478b845937067 --- /dev/null +++ b/statistics/interact_with_storage.go @@ -0,0 +1,86 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "context" + "fmt" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/sqlexec" +) + +// StatsReader is used for simplifying code that needs to read statistics from system tables(mysql.stats_xxx) in different sqls +// but requires the same transactions. +// +// Note that: +// 1. Remember to call (*StatsReader).Close after reading all statistics. +// 2. StatsReader is not thread-safe. Different goroutines cannot call (*StatsReader).Read concurrently. +type StatsReader struct { + ctx sqlexec.RestrictedSQLExecutor + snapshot uint64 +} + +// GetStatsReader returns a StatsReader. +func GetStatsReader(snapshot uint64, exec sqlexec.RestrictedSQLExecutor) (reader *StatsReader, err error) { + failpoint.Inject("mockGetStatsReaderFail", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(nil, errors.New("gofail genStatsReader error")) + } + }) + if snapshot > 0 { + return &StatsReader{ctx: exec, snapshot: snapshot}, nil + } + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("getStatsReader panic %v", r) + } + }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + failpoint.Inject("mockGetStatsReaderPanic", nil) + _, err = exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") + if err != nil { + return nil, err + } + return &StatsReader{ctx: exec}, nil +} + +// Read is a thin wrapper reading statistics from storage by sql command. +func (sr *StatsReader) Read(sql string, args ...interface{}) (rows []chunk.Row, fields []*ast.ResultField, err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + if sr.snapshot > 0 { + return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool, sqlexec.ExecOptionWithSnapshot(sr.snapshot)}, sql, args...) + } + return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, args...) +} + +// IsHistory indicates whether to read history statistics. +func (sr *StatsReader) IsHistory() bool { + return sr.snapshot > 0 +} + +// Close closes the StatsReader. +func (sr *StatsReader) Close() error { + if sr.IsHistory() || sr.ctx == nil { + return nil + } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, err := sr.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") + return err +} diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 9f9d4ef6fb002..065fc3621dabd 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -567,7 +567,7 @@ func buildBatchCopTasksConsistentHash(bo *backoff.Backoffer, for i, ranges := range rangesForEachPhysicalTable { rangesLen += ranges.Len() - locations, err := cache.SplitKeyRangesByLocations(bo, ranges) + locations, err := cache.SplitKeyRangesByLocations(bo, ranges, UnspecifiedLimit) if err != nil { return nil, errors.Trace(err) } @@ -677,7 +677,7 @@ func buildBatchCopTasksCore(bo *backoff.Backoffer, store *kvStore, rangesForEach rangesLen = 0 for i, ranges := range rangesForEachPhysicalTable { rangesLen += ranges.Len() - locations, err := cache.SplitKeyRangesByLocations(bo, ranges) + locations, err := cache.SplitKeyRangesByLocations(bo, ranges, UnspecifiedLimit) if err != nil { return nil, errors.Trace(err) } diff --git a/store/copr/coprocessor_test.go b/store/copr/coprocessor_test.go index 36ae88758bbc5..f7b15ebfd682d 100644 --- a/store/copr/coprocessor_test.go +++ b/store/copr/coprocessor_test.go @@ -381,46 +381,51 @@ func TestSplitRegionRanges(t *testing.T) { bo := backoff.NewBackofferWithVars(context.Background(), 3000, nil) - ranges, err := cache.SplitRegionRanges(bo, BuildKeyRanges("a", "c")) + ranges, err := cache.SplitRegionRanges(bo, BuildKeyRanges("a", "c"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "a", "c") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("h", "y")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("h", "y"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 3) rangeEqual(t, ranges, "h", "n", "n", "t", "t", "y") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "z")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "z"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 2) rangeEqual(t, ranges, "s", "t", "t", "z") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "s")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "s"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "s", "s") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "t")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "t"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "t", "t") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "u")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "u"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "t", "u") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("u", "z")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("u", "z"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "u", "z") // min --> max - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("a", "z")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("a", "z"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 4) rangeEqual(t, ranges, "a", "g", "g", "n", "n", "t", "t", "z") + + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("a", "z"), 3) + require.NoError(t, err) + require.Len(t, ranges, 3) + rangeEqual(t, ranges, "a", "g", "g", "n", "n", "t") } func TestRebuild(t *testing.T) { diff --git a/store/copr/region_cache.go b/store/copr/region_cache.go index 97c3d705c223b..aa33656c39cca 100644 --- a/store/copr/region_cache.go +++ b/store/copr/region_cache.go @@ -42,10 +42,10 @@ func NewRegionCache(rc *tikv.RegionCache) *RegionCache { } // SplitRegionRanges gets the split ranges from pd region. -func (c *RegionCache) SplitRegionRanges(bo *Backoffer, keyRanges []kv.KeyRange) ([]kv.KeyRange, error) { +func (c *RegionCache) SplitRegionRanges(bo *Backoffer, keyRanges []kv.KeyRange, limit int) ([]kv.KeyRange, error) { ranges := NewKeyRanges(keyRanges) - locations, err := c.SplitKeyRangesByLocations(bo, ranges) + locations, err := c.SplitKeyRangesByLocations(bo, ranges, limit) if err != nil { return nil, derr.ToTiDBErr(err) } @@ -122,10 +122,16 @@ func (l *LocationKeyRanges) splitKeyRangesByBuckets() []*LocationKeyRanges { return res } +// UnspecifiedLimit means no limit. +const UnspecifiedLimit = -1 + // SplitKeyRangesByLocations splits the KeyRanges by logical info in the cache. -func (c *RegionCache) SplitKeyRangesByLocations(bo *Backoffer, ranges *KeyRanges) ([]*LocationKeyRanges, error) { +func (c *RegionCache) SplitKeyRangesByLocations(bo *Backoffer, ranges *KeyRanges, limit int) ([]*LocationKeyRanges, error) { res := make([]*LocationKeyRanges, 0) for ranges.Len() > 0 { + if limit != UnspecifiedLimit && len(res) >= limit { + break + } loc, err := c.LocateKey(bo.TiKVBackoffer(), ranges.At(0).StartKey) if err != nil { return res, derr.ToTiDBErr(err) @@ -176,7 +182,7 @@ func (c *RegionCache) SplitKeyRangesByLocations(bo *Backoffer, ranges *KeyRanges // // TODO(youjiali1995): Try to do it in one round and reduce allocations if bucket is not enabled. func (c *RegionCache) SplitKeyRangesByBuckets(bo *Backoffer, ranges *KeyRanges) ([]*LocationKeyRanges, error) { - locs, err := c.SplitKeyRangesByLocations(bo, ranges) + locs, err := c.SplitKeyRangesByLocations(bo, ranges, UnspecifiedLimit) if err != nil { return nil, derr.ToTiDBErr(err) } diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index a14780b878f19..f9b53e55988d4 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -1198,6 +1198,7 @@ func (w *GCWorker) resolveLocksForRange( failpoint.Inject("setGcResolveMaxBackoff", func(v failpoint.Value) { sleep := v.(int) // cooperate with github.com/tikv/client-go/v2/locate/invalidCacheAndRetry + //nolint: SA1029 ctx = context.WithValue(ctx, "injectedBackoff", struct{}{}) bo = tikv.NewBackofferWithVars(ctx, sleep, nil) }) diff --git a/telemetry/BUILD.bazel b/telemetry/BUILD.bazel index 1f032aa3f237a..a6c79f7de596f 100644 --- a/telemetry/BUILD.bazel +++ b/telemetry/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "id.go", "status.go", "telemetry.go", + "ttl.go", "util.go", ], importpath = "github.com/pingcap/tidb/telemetry", @@ -24,6 +25,7 @@ go_library( "//infoschema", "//kv", "//metrics", + "//parser/ast", "//parser/model", "//parser/mysql", "//sessionctx", diff --git a/telemetry/data_feature_usage.go b/telemetry/data_feature_usage.go index 81bf7a9785a3a..8661ce13ecccb 100644 --- a/telemetry/data_feature_usage.go +++ b/telemetry/data_feature_usage.go @@ -60,6 +60,7 @@ type featureUsage struct { AutoIDNoCache bool `json:"autoIDNoCache"` IndexMergeUsageCounter *m.IndexMergeUsageCounter `json:"indexMergeUsageCounter"` ResourceControlUsage *resourceControlUsage `json:"resourceControl"` + TTLUsage *ttlUsageCounter `json:"ttlUsage"` } type placementPolicyUsage struct { @@ -117,6 +118,8 @@ func getFeatureUsage(ctx context.Context, sctx sessionctx.Context) (*featureUsag usage.IndexMergeUsageCounter = getIndexMergeUsageInfo() + usage.TTLUsage = getTTLUsageInfo(ctx, sctx) + return &usage, nil } diff --git a/telemetry/data_feature_usage_test.go b/telemetry/data_feature_usage_test.go index a678bc681eb18..a667219ba50a8 100644 --- a/telemetry/data_feature_usage_test.go +++ b/telemetry/data_feature_usage_test.go @@ -15,12 +15,16 @@ package telemetry_test import ( + "encoding/json" "fmt" + "strings" "testing" + "time" _ "github.com/pingcap/tidb/autoid_service" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/telemetry" "github.com/pingcap/tidb/testkit" @@ -619,3 +623,157 @@ func TestIndexMergeUsage(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), usage.IndexMergeUsageCounter.IndexMergeUsed) } + +func TestTTLTelemetry(t *testing.T) { + timeFormat := "2006-01-02 15:04:05" + dateFormat := "2006-01-02" + + now := time.Now() + curDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + if interval := curDate.Add(time.Hour * 24).Sub(now); interval > 0 && interval < 5*time.Minute { + // make sure testing is not running at the end of one day + time.Sleep(interval) + } + + store, do := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@global.tidb_ttl_job_enable=0") + + getTTLTable := func(name string) *model.TableInfo { + tbl, err := do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr(name)) + require.NoError(t, err) + require.NotNil(t, tbl.Meta().TTLInfo) + return tbl.Meta() + } + + jobIDIdx := 1 + insertTTLHistory := func(tblName string, partitionName string, createTime, finishTime, ttlExpire time.Time, scanError string, totalRows, errorRows int64, status string) { + defer func() { + jobIDIdx++ + }() + + tbl := getTTLTable(tblName) + tblID := tbl.ID + partitionID := tbl.ID + if partitionName != "" { + for _, def := range tbl.Partition.Definitions { + if def.Name.L == strings.ToLower(partitionName) { + partitionID = def.ID + } + } + require.NotEqual(t, tblID, partitionID) + } + + summary := make(map[string]interface{}) + summary["total_rows"] = totalRows + summary["success_rows"] = totalRows - errorRows + summary["error_rows"] = errorRows + summary["total_scan_task"] = 1 + summary["scheduled_scan_task"] = 1 + summary["finished_scan_task"] = 1 + if scanError != "" { + summary["scan_task_err"] = scanError + } + + summaryText, err := json.Marshal(summary) + require.NoError(t, err) + + tk.MustExec("insert into "+ + "mysql.tidb_ttl_job_history ("+ + " job_id, table_id, parent_table_id, table_schema, table_name, partition_name, "+ + " create_time, finish_time, ttl_expire, summary_text, "+ + " expired_rows, deleted_rows, error_delete_rows, status) "+ + "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + jobIDIdx, partitionID, tblID, "test", tblName, partitionName, + createTime.Format(timeFormat), finishTime.Format(timeFormat), ttlExpire.Format(timeFormat), summaryText, + totalRows, totalRows-errorRows, errorRows, status, + ) + } + + oneDayAgoDate := curDate.Add(-24 * time.Hour) + // start today, end today + times11 := []time.Time{curDate.Add(time.Hour), curDate.Add(2 * time.Hour), curDate} + // start yesterday, end today + times21 := []time.Time{curDate.Add(-2 * time.Hour), curDate, curDate.Add(-3 * time.Hour)} + // start yesterday, end yesterday + times31 := []time.Time{oneDayAgoDate, oneDayAgoDate.Add(time.Hour), oneDayAgoDate.Add(-time.Hour)} + times32 := []time.Time{oneDayAgoDate.Add(2 * time.Hour), oneDayAgoDate.Add(3 * time.Hour), oneDayAgoDate.Add(time.Hour)} + times33 := []time.Time{oneDayAgoDate.Add(4 * time.Hour), oneDayAgoDate.Add(5 * time.Hour), oneDayAgoDate.Add(3 * time.Hour)} + // start 2 days ago, end yesterday + times41 := []time.Time{oneDayAgoDate.Add(-2 * time.Hour), oneDayAgoDate.Add(time.Hour), oneDayAgoDate.Add(-3 * time.Hour)} + // start two days ago, end two days ago + times51 := []time.Time{oneDayAgoDate.Add(-5 * time.Hour), oneDayAgoDate.Add(-4 * time.Hour), oneDayAgoDate.Add(-6 * time.Hour)} + + tk.MustExec("create table t1 (t timestamp) TTL=`t` + interval 1 hour") + insertTTLHistory("t1", "", times11[0], times11[1], times11[2], "", 100000000, 0, "finished") + insertTTLHistory("t1", "", times21[0], times21[1], times21[2], "", 100000000, 0, "finished") + insertTTLHistory("t1", "", times31[0], times31[1], times31[2], "err1", 112600, 110000, "finished") + insertTTLHistory("t1", "", times32[0], times32[1], times32[2], "", 2600, 0, "timeout") + insertTTLHistory("t1", "", times33[0], times33[1], times33[2], "", 2600, 0, "finished") + insertTTLHistory("t1", "", times41[0], times41[1], times41[2], "", 2600, 0, "finished") + insertTTLHistory("t1", "", times51[0], times51[1], times51[2], "", 100000000, 1, "finished") + + usage, err := telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + checkTableHistWithDeleteRows := func(vals ...int64) { + require.Equal(t, 5, len(vals)) + require.Equal(t, 5, len(usage.TTLUsage.TableHistWithDeleteRows)) + require.Equal(t, int64(10*1000), *usage.TTLUsage.TableHistWithDeleteRows[0].LessThan) + require.Equal(t, vals[0], usage.TTLUsage.TableHistWithDeleteRows[0].Count) + require.Equal(t, int64(100*1000), *usage.TTLUsage.TableHistWithDeleteRows[1].LessThan) + require.Equal(t, vals[1], usage.TTLUsage.TableHistWithDeleteRows[1].Count) + require.Equal(t, int64(1000*1000), *usage.TTLUsage.TableHistWithDeleteRows[2].LessThan) + require.Equal(t, vals[2], usage.TTLUsage.TableHistWithDeleteRows[2].Count) + require.Equal(t, int64(10*1000*1000), *usage.TTLUsage.TableHistWithDeleteRows[3].LessThan) + require.Equal(t, vals[3], usage.TTLUsage.TableHistWithDeleteRows[3].Count) + require.True(t, usage.TTLUsage.TableHistWithDeleteRows[4].LessThanMax) + require.Nil(t, usage.TTLUsage.TableHistWithDeleteRows[4].LessThan) + require.Equal(t, vals[4], usage.TTLUsage.TableHistWithDeleteRows[4].Count) + } + + checkTableHistWithDelay := func(vals ...int64) { + require.Equal(t, 5, len(vals)) + require.Equal(t, 5, len(usage.TTLUsage.TableHistWithDelayTime)) + require.Equal(t, int64(1), *usage.TTLUsage.TableHistWithDelayTime[0].LessThan) + require.Equal(t, vals[0], usage.TTLUsage.TableHistWithDelayTime[0].Count) + require.Equal(t, int64(6), *usage.TTLUsage.TableHistWithDelayTime[1].LessThan) + require.Equal(t, vals[1], usage.TTLUsage.TableHistWithDelayTime[1].Count) + require.Equal(t, int64(24), *usage.TTLUsage.TableHistWithDelayTime[2].LessThan) + require.Equal(t, vals[2], usage.TTLUsage.TableHistWithDelayTime[2].Count) + require.Equal(t, int64(72), *usage.TTLUsage.TableHistWithDelayTime[3].LessThan) + require.Equal(t, vals[3], usage.TTLUsage.TableHistWithDelayTime[3].Count) + require.True(t, usage.TTLUsage.TableHistWithDelayTime[4].LessThanMax) + require.Nil(t, usage.TTLUsage.TableHistWithDelayTime[4].LessThan) + require.Equal(t, vals[4], usage.TTLUsage.TableHistWithDelayTime[4].Count) + } + + require.False(t, usage.TTLUsage.TTLJobEnabled) + require.Equal(t, int64(1), usage.TTLUsage.TTLTables) + require.Equal(t, int64(1), usage.TTLUsage.TTLJobEnabledTables) + require.Equal(t, oneDayAgoDate.Format(dateFormat), usage.TTLUsage.TTLHistDate) + checkTableHistWithDeleteRows(0, 1, 0, 0, 0) + checkTableHistWithDelay(0, 0, 1, 0, 0) + + tk.MustExec("create table t2 (t timestamp) TTL=`t` + interval 20 hour") + tk.MustExec("set @@global.tidb_ttl_job_enable=1") + insertTTLHistory("t2", "", times31[0], times31[1], times31[2], "", 9999, 0, "finished") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.True(t, usage.TTLUsage.TTLJobEnabled) + require.Equal(t, int64(2), usage.TTLUsage.TTLTables) + require.Equal(t, int64(2), usage.TTLUsage.TTLJobEnabledTables) + require.Equal(t, oneDayAgoDate.Format(dateFormat), usage.TTLUsage.TTLHistDate) + checkTableHistWithDeleteRows(1, 1, 0, 0, 0) + checkTableHistWithDelay(0, 1, 1, 0, 0) + + tk.MustExec("create table t3 (t timestamp) TTL=`t` + interval 1 hour TTL_ENABLE='OFF'") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.True(t, usage.TTLUsage.TTLJobEnabled) + require.Equal(t, int64(3), usage.TTLUsage.TTLTables) + require.Equal(t, int64(2), usage.TTLUsage.TTLJobEnabledTables) + require.Equal(t, oneDayAgoDate.Format(dateFormat), usage.TTLUsage.TTLHistDate) + checkTableHistWithDeleteRows(1, 1, 0, 0, 0) + checkTableHistWithDelay(0, 1, 1, 0, 1) +} diff --git a/telemetry/main_test.go b/telemetry/main_test.go index 0e8d98b2a4f6c..8478a3ead4084 100644 --- a/telemetry/main_test.go +++ b/telemetry/main_test.go @@ -41,6 +41,8 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), } goleak.VerifyTestMain(m, opts...) diff --git a/telemetry/ttl.go b/telemetry/ttl.go new file mode 100644 index 0000000000000..b9c8c0210fb0c --- /dev/null +++ b/telemetry/ttl.go @@ -0,0 +1,214 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package telemetry + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" +) + +const ( + // selectDeletedRowsOneDaySQL selects the deleted rows for each table of last day + selectDeletedRowsOneDaySQL = `SELECT parent_table_id, CAST(SUM(deleted_rows) AS SIGNED) + FROM + mysql.tidb_ttl_job_history + WHERE + create_time >= CURDATE() - INTERVAL 7 DAY + AND finish_time >= CURDATE() - INTERVAL 1 DAY + AND finish_time < CURDATE() + GROUP BY parent_table_id;` + // selectDelaySQL selects the deletion delay in minute for each table at the end of last day + selectDelaySQL = `SELECT + parent_table_id, TIMESTAMPDIFF(MINUTE, MIN(tm), CURDATE()) AS ttl_minutes + FROM + ( + SELECT + table_id, + parent_table_id, + MAX(ttl_expire) AS tm + FROM + mysql.tidb_ttl_job_history + WHERE + create_time > CURDATE() - INTERVAL 7 DAY + AND finish_time < CURDATE() + AND status = 'finished' + AND JSON_VALID(summary_text) + AND summary_text ->> "$.scan_task_err" IS NULL + GROUP BY + table_id, parent_table_id + ) t + GROUP BY parent_table_id;` +) + +type ttlHistItem struct { + // LessThan is not null means it collects the count of items with condition [prevLessThan, LessThan) + // Notice that it's type is an int64 pointer to forbid serializing it when it is not set. + LessThan *int64 `json:"less_than,omitempty"` + // LessThanMax is true means the condition is [prevLessThan, MAX) + LessThanMax bool `json:"less_than_max,omitempty"` + // Count is the count of items that fit the condition + Count int64 `json:"count"` +} + +type ttlUsageCounter struct { + TTLJobEnabled bool `json:"ttl_job_enabled"` + TTLTables int64 `json:"ttl_table_count"` + TTLJobEnabledTables int64 `json:"ttl_job_enabled_tables"` + TTLHistDate string `json:"ttl_hist_date"` + TableHistWithDeleteRows []*ttlHistItem `json:"table_hist_with_delete_rows"` + TableHistWithDelayTime []*ttlHistItem `json:"table_hist_with_delay_time"` +} + +func int64Pointer(val int64) *int64 { + v := val + return &v +} + +func (c *ttlUsageCounter) UpdateTableHistWithDeleteRows(rows int64) { + for _, item := range c.TableHistWithDeleteRows { + if item.LessThanMax || rows < *item.LessThan { + item.Count++ + return + } + } +} + +func (c *ttlUsageCounter) UpdateTableHistWithDelayTime(tblCnt int, hours int64) { + for _, item := range c.TableHistWithDelayTime { + if item.LessThanMax || hours < *item.LessThan { + item.Count += int64(tblCnt) + return + } + } +} + +func getTTLUsageInfo(ctx context.Context, sctx sessionctx.Context) (counter *ttlUsageCounter) { + counter = &ttlUsageCounter{ + TTLJobEnabled: variable.EnableTTLJob.Load(), + TTLHistDate: time.Now().Add(-24 * time.Hour).Format("2006-01-02"), + TableHistWithDeleteRows: []*ttlHistItem{ + { + LessThan: int64Pointer(10 * 1000), + }, + { + LessThan: int64Pointer(100 * 1000), + }, + { + LessThan: int64Pointer(1000 * 1000), + }, + { + LessThan: int64Pointer(10000 * 1000), + }, + { + LessThanMax: true, + }, + }, + TableHistWithDelayTime: []*ttlHistItem{ + { + LessThan: int64Pointer(1), + }, + { + LessThan: int64Pointer(6), + }, + { + LessThan: int64Pointer(24), + }, + { + LessThan: int64Pointer(72), + }, + { + LessThanMax: true, + }, + }, + } + + is, ok := sctx.GetDomainInfoSchema().(infoschema.InfoSchema) + if !ok { + // it should never happen + logutil.BgLogger().Error(fmt.Sprintf("GetDomainInfoSchema returns a invalid type: %T", is)) + return + } + + ttlTables := make(map[int64]*model.TableInfo) + for _, db := range is.AllSchemas() { + for _, tbl := range is.SchemaTables(db.Name) { + tblInfo := tbl.Meta() + if tblInfo.State != model.StatePublic || tblInfo.TTLInfo == nil { + continue + } + + counter.TTLTables++ + if tblInfo.TTLInfo.Enable { + counter.TTLJobEnabledTables++ + } + ttlTables[tblInfo.ID] = tblInfo + } + } + + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, selectDeletedRowsOneDaySQL) + if err != nil { + logutil.BgLogger().Error("exec sql error", zap.String("SQL", selectDeletedRowsOneDaySQL), zap.Error(err)) + } else { + for _, row := range rows { + counter.UpdateTableHistWithDeleteRows(row.GetInt64(1)) + } + } + + rows, _, err = exec.ExecRestrictedSQL(ctx, nil, selectDelaySQL) + if err != nil { + logutil.BgLogger().Error("exec sql error", zap.String("SQL", selectDelaySQL), zap.Error(err)) + } else { + noHistoryTables := len(ttlTables) + for _, row := range rows { + tblID := row.GetInt64(0) + tbl, ok := ttlTables[tblID] + if !ok { + // table not exist, maybe truncated or deleted + continue + } + noHistoryTables-- + + evalIntervalSQL := fmt.Sprintf( + "SELECT TIMESTAMPDIFF(HOUR, CURDATE() - INTERVAL %d MINUTE, CURDATE() - INTERVAL %s %s)", + row.GetInt64(1), tbl.TTLInfo.IntervalExprStr, ast.TimeUnitType(tbl.TTLInfo.IntervalTimeUnit).String(), + ) + + innerRows, _, err := exec.ExecRestrictedSQL(ctx, nil, evalIntervalSQL) + if err != nil || len(innerRows) == 0 { + logutil.BgLogger().Error("exec sql error or empty rows returned", zap.String("SQL", evalIntervalSQL), zap.Error(err)) + continue + } + + hours := innerRows[0].GetInt64(0) + counter.UpdateTableHistWithDelayTime(1, hours) + } + + // When no history found for a table, use max delay + counter.UpdateTableHistWithDelayTime(noHistoryTables, math.MaxInt64) + } + return +} diff --git a/tests/realtikvtest/addindextest/BUILD.bazel b/tests/realtikvtest/addindextest/BUILD.bazel index a2e9c9906380b..a79f2a15f8ca7 100644 --- a/tests/realtikvtest/addindextest/BUILD.bazel +++ b/tests/realtikvtest/addindextest/BUILD.bazel @@ -43,10 +43,8 @@ go_test( "//parser/model", "//testkit", "//tests/realtikvtest", - "//util/logutil", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_uber_go_zap//:zap", ], ) diff --git a/tests/realtikvtest/addindextest/integration_test.go b/tests/realtikvtest/addindextest/integration_test.go index 07b54089395da..ed1e4e6c85dcd 100644 --- a/tests/realtikvtest/addindextest/integration_test.go +++ b/tests/realtikvtest/addindextest/integration_test.go @@ -31,10 +31,8 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/tests/realtikvtest" - "github.com/pingcap/tidb/util/logutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) func TestAddIndexIngestMemoryUsage(t *testing.T) { @@ -422,7 +420,7 @@ func TestAddIndexIngestCancel(t *testing.T) { return } if job.Type == model.ActionAddIndex && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "addindexlit", "t", "idx") + idx := testutil.FindIdxInfo(dom, "addindexlit", "t", "idx") if idx == nil { return } @@ -460,12 +458,3 @@ func (c *testCallback) OnJobRunBefore(job *model.Job) { c.OnJobRunBeforeExported(job) } } - -func findIdxInfo(dom *domain.Domain, dbName, tbName, idxName string) *model.IndexInfo { - tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) - if err != nil { - logutil.BgLogger().Warn("cannot find table", zap.String("dbName", dbName), zap.String("tbName", tbName)) - return nil - } - return tbl.Meta().FindIndexByName(idxName) -} diff --git a/ttl/cache/ttlstatus.go b/ttl/cache/ttlstatus.go index d28bafa5a76c8..b21a50a161f79 100644 --- a/ttl/cache/ttlstatus.go +++ b/ttl/cache/ttlstatus.go @@ -30,13 +30,15 @@ const ( // JobStatusWaiting means the job hasn't started JobStatusWaiting JobStatus = "waiting" // JobStatusRunning means this job is running - JobStatusRunning = "running" + JobStatusRunning JobStatus = "running" // JobStatusCancelling means this job is being canceled, but not canceled yet - JobStatusCancelling = "cancelling" + JobStatusCancelling JobStatus = "cancelling" // JobStatusCancelled means this job has been canceled successfully - JobStatusCancelled = "cancelled" + JobStatusCancelled JobStatus = "cancelled" // JobStatusTimeout means this job has timeout - JobStatusTimeout = "timeout" + JobStatusTimeout JobStatus = "timeout" + // JobStatusFinished means job has been finished + JobStatusFinished JobStatus = "finished" ) const selectFromTTLTableStatus = "SELECT LOW_PRIORITY table_id,parent_table_id,table_statistics,last_job_id,last_job_start_time,last_job_finish_time,last_job_ttl_expire,last_job_summary,current_job_id,current_job_owner_id,current_job_owner_addr,current_job_owner_hb_time,current_job_start_time,current_job_ttl_expire,current_job_state,current_job_status,current_job_status_update_time FROM mysql.tidb_ttl_table_status" diff --git a/ttl/client/BUILD.bazel b/ttl/client/BUILD.bazel index 6f2c7acaae481..e842ad03a887b 100644 --- a/ttl/client/BUILD.bazel +++ b/ttl/client/BUILD.bazel @@ -2,10 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "client", - srcs = ["command.go"], + srcs = [ + "command.go", + "notification.go", + ], importpath = "github.com/pingcap/tidb/ttl/client", visibility = ["//visibility:public"], deps = [ + "//ddl/util", "//util/logutil", "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", diff --git a/ttl/client/command.go b/ttl/client/command.go index bad2d756353cd..a285d9b186e3c 100644 --- a/ttl/client/command.go +++ b/ttl/client/command.go @@ -112,12 +112,13 @@ func TriggerNewTTLJob(ctx context.Context, cli CommandClient, dbName, tableName return &resp, nil } +// etcdClient is the client of etcd which implements the commandCli and notificationCli interface type etcdClient struct { etcdCli *clientv3.Client } -// NewEtcdCommandClient creates a client with etcd -func NewEtcdCommandClient(etcdCli *clientv3.Client) CommandClient { +// NewCommandClient creates a command client with etcd +func NewCommandClient(etcdCli *clientv3.Client) CommandClient { return &etcdClient{ etcdCli: etcdCli, } @@ -196,6 +197,7 @@ loop: return json.Unmarshal(cmdResp.Data, obj) } +// Command implements the CommandClient func (c *etcdClient) Command(ctx context.Context, cmdType string, request interface{}, response interface{}) (string, error) { requestID, err := c.sendCmd(ctx, cmdType, request) if err != nil { @@ -204,6 +206,7 @@ func (c *etcdClient) Command(ctx context.Context, cmdType string, request interf return requestID, c.waitCmdResponse(ctx, requestID, &response) } +// TakeCommand implements the CommandClient func (c *etcdClient) TakeCommand(ctx context.Context, reqID string) (bool, error) { resp, err := c.etcdCli.Delete(ctx, ttlCmdKeyRequestPrefix+reqID) if err != nil { @@ -212,6 +215,7 @@ func (c *etcdClient) TakeCommand(ctx context.Context, reqID string) (bool, error return resp.Deleted > 0, nil } +// ResponseCommand implements the CommandClient func (c *etcdClient) ResponseCommand(ctx context.Context, reqID string, obj interface{}) error { resp := &cmdResponse{ RequestID: reqID, @@ -241,6 +245,7 @@ func (c *etcdClient) ResponseCommand(ctx context.Context, reqID string, obj inte return err } +// WatchCommand implements the CommandClient func (c *etcdClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { ch := make(chan *CmdRequest) go func() { @@ -279,20 +284,24 @@ func (c *etcdClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { return ch } +// mockClient is a mock implementation for CommandCli and NotificationCli type mockClient struct { sync.Mutex - store map[string]interface{} - watchers []chan *CmdRequest + store map[string]interface{} + commandWatchers []chan *CmdRequest + notificationWatchers map[string][]chan clientv3.WatchResponse } -// NewMockCommandClient creates a mock client +// NewMockCommandClient creates a mock command client func NewMockCommandClient() CommandClient { return &mockClient{ - store: make(map[string]interface{}), - watchers: make([]chan *CmdRequest, 0, 1), + store: make(map[string]interface{}), + commandWatchers: make([]chan *CmdRequest, 0, 1), + notificationWatchers: make(map[string][]chan clientv3.WatchResponse), } } +// Command implements the CommandClient func (c *mockClient) Command(ctx context.Context, cmdType string, request interface{}, response interface{}) (string, error) { ctx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(ttlCmdKeyLeaseSeconds)) defer cancel() @@ -346,7 +355,7 @@ func (c *mockClient) sendCmd(ctx context.Context, cmdType string, request interf defer c.Unlock() key := ttlCmdKeyRequestPrefix + reqID c.store[key] = req - for _, ch := range c.watchers { + for _, ch := range c.commandWatchers { select { case <-ctx.Done(): return reqID, ctx.Err() @@ -358,6 +367,7 @@ func (c *mockClient) sendCmd(ctx context.Context, cmdType string, request interf return reqID, nil } +// TakeCommand implements the CommandClient func (c *mockClient) TakeCommand(_ context.Context, reqID string) (bool, error) { c.Lock() defer c.Unlock() @@ -369,6 +379,7 @@ func (c *mockClient) TakeCommand(_ context.Context, reqID string) (bool, error) return false, nil } +// ResponseCommand implements the CommandClient func (c *mockClient) ResponseCommand(_ context.Context, reqID string, obj interface{}) error { c.Lock() defer c.Unlock() @@ -391,11 +402,12 @@ func (c *mockClient) ResponseCommand(_ context.Context, reqID string, obj interf return nil } +// WatchCommand implements the CommandClient func (c *mockClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { c.Lock() defer c.Unlock() ch := make(chan *CmdRequest, 16+len(c.store)) - c.watchers = append(c.watchers, ch) + c.commandWatchers = append(c.commandWatchers, ch) for key, val := range c.store { if strings.HasPrefix(key, ttlCmdKeyRequestPrefix) { if req, ok := val.(*CmdRequest); ok { @@ -407,9 +419,9 @@ func (c *mockClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { <-ctx.Done() c.Lock() defer c.Unlock() - for i, chItem := range c.watchers { + for i, chItem := range c.commandWatchers { if chItem == ch { - c.watchers = append(c.watchers[:i], c.watchers[i+1:]...) + c.commandWatchers = append(c.commandWatchers[:i], c.commandWatchers[i+1:]...) break } } diff --git a/ttl/client/command_test.go b/ttl/client/command_test.go index 830137f32904e..69cde75309ad6 100644 --- a/ttl/client/command_test.go +++ b/ttl/client/command_test.go @@ -42,7 +42,7 @@ func TestCommandClient(t *testing.T) { defer cluster.Terminate(t) etcd := cluster.RandClient() - etcdCli := NewEtcdCommandClient(etcd) + etcdCli := NewCommandClient(etcd) mockCli := NewMockCommandClient() ctx, cancel := context.WithTimeout(context.TODO(), time.Minute) diff --git a/ttl/client/notification.go b/ttl/client/notification.go new file mode 100644 index 0000000000000..6c44cd0dd7aa9 --- /dev/null +++ b/ttl/client/notification.go @@ -0,0 +1,79 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + + "github.com/pingcap/tidb/ddl/util" + clientv3 "go.etcd.io/etcd/client/v3" +) + +const ttlNotificationPrefix string = "/tidb/ttl/notification/" + +// NotificationClient is a client to notify other TTL workers +type NotificationClient interface { + // Notify sends a notification + Notify(ctx context.Context, typ string, data string) error + // WatchNotification opens a channel, in which we could receive all notifications + WatchNotification(ctx context.Context, typ string) clientv3.WatchChan +} + +// NewNotificationClient creates a notification client with etcd +func NewNotificationClient(etcdCli *clientv3.Client) NotificationClient { + return &etcdClient{ + etcdCli: etcdCli, + } +} + +// Notify stores the corresponding K-V in the etcd +func (c *etcdClient) Notify(ctx context.Context, typ string, data string) error { + return util.PutKVToEtcd(ctx, c.etcdCli, 1, ttlNotificationPrefix+typ, data) +} + +// WatchNotification returns a go channel to get notification +func (c *etcdClient) WatchNotification(ctx context.Context, typ string) clientv3.WatchChan { + return c.etcdCli.Watch(ctx, ttlNotificationPrefix+typ) +} + +// NewMockNotificationClient creates a mock notification client +func NewMockNotificationClient() NotificationClient { + return &mockClient{ + store: make(map[string]interface{}), + commandWatchers: make([]chan *CmdRequest, 0, 1), + notificationWatchers: make(map[string][]chan clientv3.WatchResponse), + } +} + +// Notify implements the NotificationClient +func (c *mockClient) Notify(_ context.Context, typ string, data string) error { + c.Lock() + defer c.Unlock() + + for _, ch := range c.notificationWatchers[typ] { + ch <- clientv3.WatchResponse{} + } + return nil +} + +// WatchNotification implements the NotificationClient +func (c *mockClient) WatchNotification(_ context.Context, typ string) clientv3.WatchChan { + c.Lock() + defer c.Unlock() + + ch := make(chan clientv3.WatchResponse, 1) + c.notificationWatchers[typ] = append(c.notificationWatchers[typ], ch) + return ch +} diff --git a/ttl/metrics/metrics.go b/ttl/metrics/metrics.go index 8768b0e267388..3c8ceee213a14 100644 --- a/ttl/metrics/metrics.go +++ b/ttl/metrics/metrics.go @@ -133,16 +133,16 @@ func (t *PhaseTracer) EndPhase() { t.EnterPhase("") } -const ttlPhaseTraceKey = "ttlPhaseTraceKey" +type ttlPhaseTraceKey struct{} // CtxWithPhaseTracer create a new context with tracer func CtxWithPhaseTracer(ctx context.Context, tracer *PhaseTracer) context.Context { - return context.WithValue(ctx, ttlPhaseTraceKey, tracer) + return context.WithValue(ctx, ttlPhaseTraceKey{}, tracer) } // PhaseTracerFromCtx returns a tracer from a given context func PhaseTracerFromCtx(ctx context.Context) *PhaseTracer { - if tracer, ok := ctx.Value(ttlPhaseTraceKey).(*PhaseTracer); ok { + if tracer, ok := ctx.Value(ttlPhaseTraceKey{}).(*PhaseTracer); ok { return tracer } return nil diff --git a/ttl/ttlworker/config.go b/ttl/ttlworker/config.go index c1774bc667348..89ca9eedae010 100644 --- a/ttl/ttlworker/config.go +++ b/ttl/ttlworker/config.go @@ -32,6 +32,7 @@ const ttlJobTimeout = 6 * time.Hour const taskManagerLoopTickerInterval = time.Minute const ttlTaskHeartBeatTickerInterval = time.Minute +const ttlGCInterval = time.Hour func getUpdateInfoSchemaCacheInterval() time.Duration { failpoint.Inject("update-info-schema-cache-interval", func(val failpoint.Value) time.Duration { diff --git a/ttl/ttlworker/job.go b/ttl/ttlworker/job.go index f2a78e7ef0270..5ba91dcc375a0 100644 --- a/ttl/ttlworker/job.go +++ b/ttl/ttlworker/job.go @@ -43,6 +43,25 @@ const finishJobTemplate = `UPDATE mysql.tidb_ttl_table_status current_job_status_update_time = NULL WHERE table_id = %? AND current_job_id = %?` const removeTaskForJobTemplate = "DELETE FROM mysql.tidb_ttl_task WHERE job_id = %?" +const addJobHistoryTemplate = `INSERT INTO + mysql.tidb_ttl_job_history ( + job_id, + table_id, + parent_table_id, + table_schema, + table_name, + partition_name, + create_time, + finish_time, + ttl_expire, + summary_text, + expired_rows, + deleted_rows, + error_delete_rows, + status + ) +VALUES + (%?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?)` func updateJobCurrentStatusSQL(tableID int64, oldStatus cache.JobStatus, newStatus cache.JobStatus, jobID string) (string, []interface{}) { return updateJobCurrentStatusTemplate, []interface{}{string(newStatus), tableID, string(oldStatus), jobID} @@ -56,11 +75,41 @@ func removeTaskForJob(jobID string) (string, []interface{}) { return removeTaskForJobTemplate, []interface{}{jobID} } +func addJobHistorySQL(job *ttlJob, finishTime time.Time, summary *TTLSummary) (string, []interface{}) { + status := cache.JobStatusFinished + if job.status == cache.JobStatusTimeout || job.status == cache.JobStatusCancelled { + status = job.status + } + + var partitionName interface{} + if job.tbl.Partition.O != "" { + partitionName = job.tbl.Partition.O + } + + return addJobHistoryTemplate, []interface{}{ + job.id, + job.tbl.ID, + job.tbl.TableInfo.ID, + job.tbl.Schema.O, + job.tbl.Name.O, + partitionName, + job.createTime.Format(timeFormat), + finishTime.Format(timeFormat), + job.ttlExpireTime.Format(timeFormat), + summary.SummaryText, + summary.TotalRows, + summary.SuccessRows, + summary.ErrorRows, + string(status), + } +} + type ttlJob struct { id string ownerID string - createTime time.Time + createTime time.Time + ttlExpireTime time.Time tbl *cache.PhysicalTable @@ -71,11 +120,11 @@ type ttlJob struct { } // finish turns current job into last job, and update the error message and statistics summary -func (job *ttlJob) finish(se session.Session, now time.Time, summary string) { +func (job *ttlJob) finish(se session.Session, now time.Time, summary *TTLSummary) { // at this time, the job.ctx may have been canceled (to cancel this job) // even when it's canceled, we'll need to update the states, so use another context err := se.RunInTxn(context.TODO(), func() error { - sql, args := finishJobSQL(job.tbl.ID, now, summary, job.id) + sql, args := finishJobSQL(job.tbl.ID, now, summary.SummaryText, job.id) _, err := se.ExecuteSQL(context.TODO(), sql, args...) if err != nil { return errors.Wrapf(err, "execute sql: %s", sql) @@ -87,6 +136,12 @@ func (job *ttlJob) finish(se session.Session, now time.Time, summary string) { return errors.Wrapf(err, "execute sql: %s", sql) } + sql, args = addJobHistorySQL(job, now, summary) + _, err = se.ExecuteSQL(context.TODO(), sql, args...) + if err != nil { + return errors.Wrapf(err, "execute sql: %s", sql) + } + return nil }, session.TxnModeOptimistic) diff --git a/ttl/ttlworker/job_manager.go b/ttl/ttlworker/job_manager.go index 223128b52f26c..0b427e64318ac 100644 --- a/ttl/ttlworker/job_manager.go +++ b/ttl/ttlworker/job_manager.go @@ -38,6 +38,8 @@ import ( "go.uber.org/zap" ) +const scanTaskNotificationType string = "scan" + const insertNewTableIntoStatusTemplate = "INSERT INTO mysql.tidb_ttl_table_status (table_id,parent_table_id) VALUES (%?, %?)" const setTableStatusOwnerTemplate = `UPDATE mysql.tidb_ttl_table_status SET current_job_id = %?, @@ -56,6 +58,8 @@ const taskGCTemplate = `DELETE task FROM ON task.job_id = job.current_job_id WHERE job.table_id IS NULL` +const ttlJobHistoryGCTemplate = `DELETE FROM mysql.tidb_ttl_job_history WHERE create_time < CURDATE() - INTERVAL 90 DAY` + const timeFormat = "2006-01-02 15:04:05" func insertNewTableIntoStatusSQL(tableID int64, parentTableID int64) (string, []interface{}) { @@ -82,8 +86,9 @@ type JobManager struct { // id is the ddl id of this instance id string - store kv.Storage - cmdCli client.CommandClient + store kv.Storage + cmdCli client.CommandClient + notificationCli client.NotificationClient // infoSchemaCache and tableStatusCache are a cache stores the information from info schema and the tidb_ttl_table_status // table. They don't need to be protected by mutex, because they are only used in job loop goroutine. @@ -113,9 +118,11 @@ func NewJobManager(id string, sessPool sessionPool, store kv.Storage, etcdCli *c manager.tableStatusCache = cache.NewTableStatusCache(getUpdateTTLTableStatusCacheInterval()) if etcdCli != nil { - manager.cmdCli = client.NewEtcdCommandClient(etcdCli) + manager.cmdCli = client.NewCommandClient(etcdCli) + manager.notificationCli = client.NewNotificationClient(etcdCli) } else { manager.cmdCli = client.NewMockCommandClient() + manager.notificationCli = client.NewMockNotificationClient() } manager.taskManager = newTaskManager(manager.ctx, sessPool, manager.infoSchemaCache, id) @@ -138,7 +145,7 @@ func (m *JobManager) jobLoop() error { infoSchemaCacheUpdateTicker := time.Tick(m.infoSchemaCache.GetInterval()) tableStatusCacheUpdateTicker := time.Tick(m.tableStatusCache.GetInterval()) resizeWorkersTicker := time.Tick(getResizeWorkersInterval()) - taskGC := time.Tick(jobManagerLoopTickerInterval) + gcTicker := time.Tick(ttlGCInterval) scheduleJobTicker := time.Tick(jobManagerLoopTickerInterval) jobCheckTicker := time.Tick(jobManagerLoopTickerInterval) @@ -150,6 +157,7 @@ func (m *JobManager) jobLoop() error { checkScanTaskFinishedTicker := time.Tick(getTaskManagerLoopTickerInterval()) cmdWatcher := m.cmdCli.WatchCommand(m.ctx) + scanTaskNotificationWatcher := m.notificationCli.WatchNotification(m.ctx, scanTaskNotificationType) m.taskManager.resizeWorkersWithSysVar() for { m.reportMetrics() @@ -169,12 +177,9 @@ func (m *JobManager) jobLoop() error { if err != nil { logutil.Logger(m.ctx).Warn("fail to update table status cache", zap.Error(err)) } - case <-taskGC: - taskGCCtx, cancel := context.WithTimeout(m.ctx, ttlInternalSQLTimeout) - _, err = se.ExecuteSQL(taskGCCtx, taskGCTemplate) - if err != nil { - logutil.Logger(m.ctx).Warn("fail to gc redundant scan task", zap.Error(err)) - } + case <-gcTicker: + gcCtx, cancel := context.WithTimeout(m.ctx, ttlInternalSQLTimeout) + DoGC(gcCtx, se) cancel() // Job Schedule loop: case <-updateJobHeartBeatTicker: @@ -208,6 +213,17 @@ func (m *JobManager) jobLoop() error { // Task Manager Loop case <-scheduleTaskTicker: m.taskManager.rescheduleTasks(se, now) + case _, ok := <-scanTaskNotificationWatcher: + if !ok { + if m.ctx.Err() != nil { + return nil + } + + logutil.BgLogger().Warn("The TTL scan task notification watcher is closed unexpectedly, re-watch it again") + scanTaskNotificationWatcher = m.notificationCli.WatchNotification(m.ctx, scanTaskNotificationType) + continue + } + m.taskManager.rescheduleTasks(se, now) case <-taskCheckTicker: m.taskManager.checkInvalidTask(se) m.taskManager.checkFinishedTask(se, now) @@ -611,23 +627,32 @@ func (m *JobManager) lockNewJob(ctx context.Context, se session.Session, table * if err != nil { return nil, err } - return m.createNewJob(now, table) + + job := m.createNewJob(expireTime, now, table) + + // job is created, notify every scan managers to fetch new tasks + err = m.notificationCli.Notify(m.ctx, scanTaskNotificationType, job.id) + if err != nil { + logutil.Logger(m.ctx).Warn("fail to trigger scan tasks", zap.Error(err)) + } + return job, nil } -func (m *JobManager) createNewJob(now time.Time, table *cache.PhysicalTable) (*ttlJob, error) { +func (m *JobManager) createNewJob(expireTime time.Time, now time.Time, table *cache.PhysicalTable) *ttlJob { id := m.tableStatusCache.Tables[table.ID].CurrentJobID return &ttlJob{ id: id, ownerID: m.id, - createTime: now, + createTime: now, + ttlExpireTime: expireTime, // at least, the info schema cache and table status cache are consistent in table id, so it's safe to get table // information from schema cache directly tbl: table, status: cache.JobStatusWaiting, - }, nil + } } // updateHeartBeat updates the heartbeat for all task with current instance as owner @@ -687,7 +712,13 @@ func (m *JobManager) GetCommandCli() client.CommandClient { return m.cmdCli } -type ttlSummary struct { +// GetNotificationCli returns the notification client +func (m *JobManager) GetNotificationCli() client.NotificationClient { + return m.notificationCli +} + +// TTLSummary is the summary for TTL job +type TTLSummary struct { TotalRows uint64 `json:"total_rows"` SuccessRows uint64 `json:"success_rows"` ErrorRows uint64 `json:"error_rows"` @@ -697,22 +728,24 @@ type ttlSummary struct { FinishedScanTask int `json:"finished_scan_task"` ScanTaskErr string `json:"scan_task_err,omitempty"` + SummaryText string `json:"-"` } -func summarizeErr(err error) (string, error) { - summary := &ttlSummary{ +func summarizeErr(err error) (*TTLSummary, error) { + summary := &TTLSummary{ ScanTaskErr: err.Error(), } buf, err := json.Marshal(summary) if err != nil { - return "", err + return nil, err } - return string(buf), nil + summary.SummaryText = string(buf) + return summary, nil } -func summarizeTaskResult(tasks []*cache.TTLTask) (string, error) { - summary := &ttlSummary{} +func summarizeTaskResult(tasks []*cache.TTLTask) (*TTLSummary, error) { + summary := &TTLSummary{} var allErr error for _, t := range tasks { if t.State != nil { @@ -738,7 +771,19 @@ func summarizeTaskResult(tasks []*cache.TTLTask) (string, error) { buf, err := json.Marshal(summary) if err != nil { - return "", err + return nil, err + } + summary.SummaryText = string(buf) + return summary, nil +} + +// DoGC deletes some old TTL job histories and redundant scan tasks +func DoGC(ctx context.Context, se session.Session) { + if _, err := se.ExecuteSQL(ctx, taskGCTemplate); err != nil { + logutil.Logger(ctx).Warn("fail to gc redundant scan task", zap.Error(err)) + } + + if _, err := se.ExecuteSQL(ctx, ttlJobHistoryGCTemplate); err != nil { + logutil.Logger(ctx).Warn("fail to gc ttl job history", zap.Error(err)) } - return string(buf), nil } diff --git a/ttl/ttlworker/job_manager_integration_test.go b/ttl/ttlworker/job_manager_integration_test.go index c763e1363aecd..e2e864344fde3 100644 --- a/ttl/ttlworker/job_manager_integration_test.go +++ b/ttl/ttlworker/job_manager_integration_test.go @@ -16,8 +16,10 @@ package ttlworker_test import ( "context" + "encoding/json" "fmt" "strconv" + "strings" "sync" "testing" "time" @@ -69,7 +71,7 @@ func TestParallelLockNewJob(t *testing.T) { se := sessionFactory() job, err := m.LockNewJob(context.Background(), se, testTable, time.Now(), false) require.NoError(t, err) - job.Finish(se, time.Now(), "") + job.Finish(se, time.Now(), &ttlworker.TTLSummary{}) // lock one table in parallel, only one of them should lock successfully testTimes := 100 @@ -103,18 +105,19 @@ func TestParallelLockNewJob(t *testing.T) { wg.Wait() require.Equal(t, uint64(1), successCounter.Load()) - successJob.Finish(se, time.Now(), "") + successJob.Finish(se, time.Now(), &ttlworker.TTLSummary{}) } } func TestFinishJob(t *testing.T) { + timeFormat := "2006-01-02 15:04:05" store, dom := testkit.CreateMockStoreAndDomain(t) waitAndStopTTLManager(t, dom) tk := testkit.NewTestKit(t, store) sessionFactory := sessionFactory(t, store) - testTable := &cache.PhysicalTable{ID: 2, TableInfo: &model.TableInfo{ID: 1, TTLInfo: &model.TTLInfo{IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay)}}} + testTable := &cache.PhysicalTable{ID: 2, Schema: model.NewCIStr("db1"), TableInfo: &model.TableInfo{ID: 1, Name: model.NewCIStr("t1"), TTLInfo: &model.TTLInfo{IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay)}}} tk.MustExec("insert into mysql.tidb_ttl_table_status(table_id) values (2)") @@ -122,13 +125,33 @@ func TestFinishJob(t *testing.T) { m := ttlworker.NewJobManager("test-id", nil, store, nil) m.InfoSchemaCache().Tables[testTable.ID] = testTable se := sessionFactory() - job, err := m.LockNewJob(context.Background(), se, testTable, time.Now(), false) + startTime := time.Now() + job, err := m.LockNewJob(context.Background(), se, testTable, startTime, false) + require.NoError(t, err) + + expireTime, err := testTable.EvalExpireTime(context.Background(), se, startTime) require.NoError(t, err) - summary := `{"total_rows":0,"scan_task_err":"\"'an error message contains both single and double quote'\""}` - job.Finish(se, time.Now(), summary) - tk.MustQuery("select table_id, last_job_summary from mysql.tidb_ttl_table_status").Check(testkit.Rows(`2 {"total_rows":0,"scan_task_err":"\"'an error message contains both single and double quote'\""}`)) + summary := &ttlworker.TTLSummary{ + ScanTaskErr: "\"'an error message contains both single and double quote'\"", + TotalRows: 128, + SuccessRows: 120, + ErrorRows: 8, + } + summaryBytes, err := json.Marshal(summary) + summary.SummaryText = string(summaryBytes) + + require.NoError(t, err) + endTime := time.Now() + job.Finish(se, endTime, summary) + tk.MustQuery("select table_id, last_job_summary from mysql.tidb_ttl_table_status").Check(testkit.Rows("2 " + summary.SummaryText)) tk.MustQuery("select * from mysql.tidb_ttl_task").Check(testkit.Rows()) + expectedRow := []string{ + job.ID(), "2", "1", "db1", "t1", "", + startTime.Format(timeFormat), endTime.Format(timeFormat), expireTime.Format(timeFormat), + summary.SummaryText, "128", "120", "8", "finished", + } + tk.MustQuery("select * from mysql.tidb_ttl_job_history").Check(testkit.Rows(strings.Join(expectedRow, " "))) } func TestTTLAutoAnalyze(t *testing.T) { @@ -407,6 +430,50 @@ func TestJobTimeout(t *testing.T) { tk.MustQuery("select count(*) from mysql.tidb_ttl_task").Check(testkit.Rows("0")) } +func TestTriggerScanTask(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + sessionFactory := sessionFactory(t, store) + now := time.Now() + se := sessionFactory() + + waitAndStopTTLManager(t, dom) + + tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") + + m := ttlworker.NewJobManager("manager-1", nil, store, nil) + require.NoError(t, m.InfoSchemaCache().Update(se)) + m.TaskManager().ResizeWorkersWithSysVar() + m.Start() + defer func() { + m.Stop() + require.NoError(t, m.WaitStopped(context.Background(), time.Second*10)) + }() + + nCli := m.GetNotificationCli() + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + <-nCli.WatchNotification(context.Background(), "scan") + wg.Done() + }() + m.RescheduleJobs(se, now) + + // notification is sent + wg.Wait() + + for time.Now().Before(now.Add(time.Second * 5)) { + time.Sleep(time.Second) + rows := tk.MustQuery("SELECT status FROM mysql.tidb_ttl_task").Rows() + if len(rows) == 0 { + break + } + if rows[0][0] == cache.TaskStatusFinished { + break + } + } +} + func waitAndStopTTLManager(t *testing.T, dom *domain.Domain) { maxWaitTime := 30 for { @@ -423,3 +490,87 @@ func waitAndStopTTLManager(t *testing.T, dom *domain.Domain) { continue } } + +func TestGCScanTasks(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + addTableStatusRecord := func(tableID, parentTableID, curJobID int64) { + tk.MustExec("INSERT INTO mysql.tidb_ttl_table_status (table_id,parent_table_id) VALUES (?, ?)", tableID, parentTableID) + if curJobID == 0 { + return + } + + tk.MustExec(`UPDATE mysql.tidb_ttl_table_status + SET current_job_id = ?, + current_job_owner_id = '12345', + current_job_start_time = NOW(), + current_job_status = 'running', + current_job_status_update_time = NOW(), + current_job_ttl_expire = NOW(), + current_job_owner_hb_time = NOW() + WHERE table_id = ?`, curJobID, tableID) + } + + addScanTaskRecord := func(jobID, tableID, scanID int64) { + tk.MustExec(`INSERT INTO mysql.tidb_ttl_task SET + job_id = ?, + table_id = ?, + scan_id = ?, + expire_time = NOW(), + created_time = NOW()`, jobID, tableID, scanID) + } + + addTableStatusRecord(1, 1, 1) + addScanTaskRecord(1, 1, 1) + addScanTaskRecord(1, 1, 2) + addScanTaskRecord(2, 1, 1) + addScanTaskRecord(2, 1, 2) + addScanTaskRecord(3, 2, 1) + addScanTaskRecord(3, 2, 2) + + se := session.NewSession(tk.Session(), tk.Session(), func(_ session.Session) {}) + ttlworker.DoGC(context.TODO(), se) + tk.MustQuery("select job_id, scan_id from mysql.tidb_ttl_task order by job_id, scan_id asc").Check(testkit.Rows("1 1", "1 2")) +} + +func TestGCTTLHistory(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + addHistory := func(jobID, createdBeforeDays int) { + tk.MustExec(fmt.Sprintf(`INSERT INTO mysql.tidb_ttl_job_history ( + job_id, + table_id, + parent_table_id, + table_schema, + table_name, + partition_name, + create_time, + finish_time, + ttl_expire, + summary_text, + expired_rows, + deleted_rows, + error_delete_rows, + status + ) + VALUES + ( + %d, 1, 1, 'test', 't1', '', + CURDATE() - INTERVAL %d DAY, + CURDATE() - INTERVAL %d DAY + INTERVAL 1 HOUR, + CURDATE() - INTERVAL %d DAY, + "", 100, 100, 0, "finished" + )`, jobID, createdBeforeDays, createdBeforeDays, createdBeforeDays)) + } + + addHistory(1, 1) + addHistory(2, 30) + addHistory(3, 60) + addHistory(4, 89) + addHistory(5, 90) + addHistory(6, 91) + addHistory(7, 100) + se := session.NewSession(tk.Session(), tk.Session(), func(_ session.Session) {}) + ttlworker.DoGC(context.TODO(), se) + tk.MustQuery("select job_id from mysql.tidb_ttl_job_history order by job_id asc").Check(testkit.Rows("1", "2", "3", "4", "5")) +} diff --git a/ttl/ttlworker/job_manager_test.go b/ttl/ttlworker/job_manager_test.go index 9e0211410591b..bf7837fc2ee64 100644 --- a/ttl/ttlworker/job_manager_test.go +++ b/ttl/ttlworker/job_manager_test.go @@ -171,7 +171,7 @@ func (m *JobManager) UpdateHeartBeat(ctx context.Context, se session.Session, no return m.updateHeartBeat(ctx, se, now) } -func (j *ttlJob) Finish(se session.Session, now time.Time, summary string) { +func (j *ttlJob) Finish(se session.Session, now time.Time, summary *TTLSummary) { j.finish(se, now, summary) } diff --git a/util/gpool/spmc/spmcpool.go b/util/gpool/spmc/spmcpool.go index 6644a0e895650..5f58bba12d5b4 100644 --- a/util/gpool/spmc/spmcpool.go +++ b/util/gpool/spmc/spmcpool.go @@ -140,12 +140,22 @@ func (p *Pool[T, U, C, CT, TF]) Tune(size int) { p.SetLastTuneTs(time.Now()) p.capacity.Store(int32(size)) if size > capacity { - // boost + for i := 0; i < size-capacity; i++ { + if tid, boostTask := p.taskManager.Overclock(); boostTask != nil { + p.addWaitingTask() + p.taskManager.AddSubTask(tid, boostTask.Clone()) + p.taskCh <- boostTask + } + } if size-capacity == 1 { p.cond.Signal() return } p.cond.Broadcast() + return + } + if size < capacity { + p.taskManager.Downclock() } } diff --git a/util/gpool/spmc/spmcpool_test.go b/util/gpool/spmc/spmcpool_test.go index 3036ad7412a3c..5bc5da4fdf3bc 100644 --- a/util/gpool/spmc/spmcpool_test.go +++ b/util/gpool/spmc/spmcpool_test.go @@ -15,9 +15,11 @@ package spmc import ( + "fmt" "sync" "sync/atomic" "testing" + "time" "github.com/pingcap/tidb/resourcemanager/pooltask" rmutil "github.com/pingcap/tidb/resourcemanager/util" @@ -121,6 +123,78 @@ func TestStopPool(t *testing.T) { pool.ReleaseAndWait() } +func TestTuneSimplePool(t *testing.T) { + testTunePool(t, "TestTuneSimplePool") +} + +func TestTuneMultiPool(t *testing.T) { + var concurrency = 5 + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + go func(id int) { + testTunePool(t, fmt.Sprintf("TestTuneMultiPool%d", id)) + wg.Done() + }(i) + } + wg.Wait() +} + +func testTunePool(t *testing.T, name string) { + type ConstArgs struct { + a int + } + myArgs := ConstArgs{a: 10} + // init the pool + // input type, output type, constArgs type + pool, err := NewSPMCPool[int, int, ConstArgs, any, pooltask.NilContext](name, 10, rmutil.UNKNOWN) + require.NoError(t, err) + pool.SetConsumerFunc(func(task int, constArgs ConstArgs, ctx any) int { + return task + constArgs.a + }) + + exit := make(chan struct{}) + + pfunc := func() (int, error) { + select { + case <-exit: + return 0, gpool.ErrProducerClosed + default: + return 1, nil + } + } + // add new task + resultCh, control := pool.AddProducer(pfunc, myArgs, pooltask.NilContext{}, WithConcurrency(10)) + tid := control.TaskID() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for result := range resultCh { + require.Greater(t, result, 10) + } + }() + time.Sleep(1 * time.Second) + newSize := pool.Cap() - 1 + pool.Tune(newSize) + time.Sleep(1 * time.Second) + require.Equal(t, newSize, pool.Cap()) + require.Equal(t, int32(newSize), pool.taskManager.Running(tid)) + + newSize = pool.Cap() + 1 + pool.Tune(newSize) + time.Sleep(1 * time.Second) + require.Equal(t, newSize, pool.Cap()) + require.Equal(t, int32(newSize), pool.taskManager.Running(tid)) + + // exit test + close(exit) + control.Wait() + wg.Wait() + // close pool + pool.ReleaseAndWait() +} + func TestPoolWithEnoughCapacity(t *testing.T) { const ( RunTimes = 1000 diff --git a/util/gpool/spmc/worker.go b/util/gpool/spmc/worker.go index b8e22376bb79a..158c677775987 100644 --- a/util/gpool/spmc/worker.go +++ b/util/gpool/spmc/worker.go @@ -67,7 +67,7 @@ func (w *goWorker[T, U, C, CT, TF]) run() { for t := range f.GetTaskCh() { if f.GetStatus() == pooltask.StopTask { f.Done() - continue + break } f.GetResultCh() <- w.pool.consumerFunc(t.Task, f.ConstArgs(), ctx) f.Done() diff --git a/util/signal/BUILD.bazel b/util/signal/BUILD.bazel index af19da427497c..e2963d6f554bb 100644 --- a/util/signal/BUILD.bazel +++ b/util/signal/BUILD.bazel @@ -9,8 +9,31 @@ go_library( ], importpath = "github.com/pingcap/tidb/util/signal", visibility = ["//visibility:public"], - deps = [ - "//util/logutil", - "@org_uber_go_zap//:zap", - ], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "//conditions:default": [], + }), ) diff --git a/util/syncutil/BUILD.bazel b/util/syncutil/BUILD.bazel index 919301546f69c..7703cfd35f89b 100644 --- a/util/syncutil/BUILD.bazel +++ b/util/syncutil/BUILD.bazel @@ -3,10 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "syncutil", srcs = [ - "mutex_deadlock.go", + "mutex_deadlock.go", #keep "mutex_sync.go", ], importpath = "github.com/pingcap/tidb/util/syncutil", visibility = ["//visibility:public"], - deps = ["@com_github_sasha_s_go_deadlock//:go-deadlock"], + deps = ["@com_github_sasha_s_go_deadlock//:go-deadlock"], #keep ) diff --git a/util/sys/linux/BUILD.bazel b/util/sys/linux/BUILD.bazel index 1212afe2fc3d9..f1363c2cb0d71 100644 --- a/util/sys/linux/BUILD.bazel +++ b/util/sys/linux/BUILD.bazel @@ -9,7 +9,48 @@ go_library( ], importpath = "github.com/pingcap/tidb/util/sys/linux", visibility = ["//visibility:public"], - deps = ["@org_golang_x_sys//unix"], + deps = select({ + "@io_bazel_rules_go//go/platform:aix": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:android": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:illumos": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:js": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "@org_golang_x_sys//unix", + ], + "//conditions:default": [], + }), ) go_test(