diff --git a/.bazelversion b/.bazelversion index 84197c89467dd..09b254e90c61e 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -5.3.2 +6.0.0 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9fd79475351a3..b1d1d17e8acd9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,6 +1,5 @@ # Require review from domain experts when the PR modified significant config files. -# TODO: Enable these again before merging the feature branch to pingcap/master -#/sessionctx/variable @pingcap/tidb-configuration-reviewer -#/config/config.toml.example @pingcap/tidb-configuration-reviewer -#/session/bootstrap.go @pingcap/tidb-configuration-reviewer -#/telemetry/ @pingcap/telemetry-reviewer +/sessionctx/variable @pingcap/tidb-configuration-reviewer +/config/config.toml.example @pingcap/tidb-configuration-reviewer +/session/bootstrap.go @pingcap/tidb-configuration-reviewer +/telemetry/ @pingcap/telemetry-reviewer diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml deleted file mode 100644 index 94b68e9c95510..0000000000000 --- a/.github/workflows/misc.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: misc - -on: - workflow_dispatch: - pull_request: - branches: - - "master" - - "main" - - "release-**" - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - check: - permissions: - contents: read # to fetch code (actions/checkout) - pull-requests: write # to comment on pull-requests - - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Check File Permission - run: make check-file-perm - - name: Check License Header - uses: apache/skywalking-eyes/header@v0.4.0 - with: - log: info - token: ${{ secrets.GITHUB_TOKEN }} - config: .github/licenserc.yml diff --git a/DEPS.bzl b/DEPS.bzl index a1ff508ca83c7..f9a9cfe05aad5 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -147,6 +147,13 @@ def go_deps(): sum = "h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=", version = "v0.1.1", ) + go_repository( + name = "com_github_apache_skywalking_eyes", + build_file_proto_mode = "disable", + importpath = "github.com/apache/skywalking-eyes", + sum = "h1:O13kdRU6FCEZevfD01mdhTgCZLLfPZIQ0GXZrLl7FpQ=", + version = "v0.4.0", + ) go_repository( name = "com_github_apache_thrift", @@ -337,6 +344,14 @@ def go_deps(): sum = "h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=", version = "v0.8.0", ) + go_repository( + name = "com_github_bmatcuk_doublestar_v2", + build_file_proto_mode = "disable", + importpath = "github.com/bmatcuk/doublestar/v2", + sum = "h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI=", + version = "v2.0.4", + ) + go_repository( name = "com_github_bombsimon_wsl_v3", build_file_proto_mode = "disable", @@ -1090,8 +1105,8 @@ def go_deps(): name = "com_github_frankban_quicktest", build_file_proto_mode = "disable_global", importpath = "github.com/frankban/quicktest", - sum = "h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=", - version = "v1.11.3", + sum = "h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=", + version = "v1.14.3", ) go_repository( name = "com_github_fsnotify_fsnotify", @@ -1667,12 +1682,20 @@ def go_deps(): sum = "h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=", version = "v0.5.9", ) + go_repository( + name = "com_github_google_go_github_v33", + build_file_proto_mode = "disable", + importpath = "github.com/google/go-github/v33", + sum = "h1:qAf9yP0qc54ufQxzwv+u9H0tiVOnPJxo0lI/JXqw3ZM=", + version = "v33.0.0", + ) + go_repository( name = "com_github_google_go_querystring", build_file_proto_mode = "disable_global", importpath = "github.com/google/go-querystring", - sum = "h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=", - version = "v1.0.0", + sum = "h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=", + version = "v1.1.0", ) go_repository( name = "com_github_google_gofuzz", @@ -1681,6 +1704,14 @@ def go_deps(): sum = "h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=", version = "v1.1.0", ) + go_repository( + name = "com_github_google_licensecheck", + build_file_proto_mode = "disable", + importpath = "github.com/google/licensecheck", + sum = "h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs=", + version = "v0.3.1", + ) + go_repository( name = "com_github_google_martian", build_file_proto_mode = "disable_global", @@ -2051,6 +2082,14 @@ def go_deps(): sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=", version = "v1.0.0", ) + go_repository( + name = "com_github_huandu_xstrings", + build_file_proto_mode = "disable", + importpath = "github.com/huandu/xstrings", + sum = "h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=", + version = "v1.3.1", + ) + go_repository( name = "com_github_hydrogen18_memlistener", build_file_proto_mode = "disable_global", @@ -2072,6 +2111,14 @@ def go_deps(): sum = "h1:uGg2frlt3IcT7kbV6LEp5ONv4vmoO2FW4qSO+my/aoM=", version = "v0.0.0-20210905161508-09a460cdf81d", ) + go_repository( + name = "com_github_imdario_mergo", + build_file_proto_mode = "disable", + importpath = "github.com/imdario/mergo", + sum = "h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=", + version = "v0.3.11", + ) + go_repository( name = "com_github_imkira_go_interpol", build_file_proto_mode = "disable_global", @@ -2667,6 +2714,14 @@ def go_deps(): sum = "h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q=", version = "v1.1.0", ) + go_repository( + name = "com_github_masterminds_goutils", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/goutils", + sum = "h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=", + version = "v1.1.1", + ) + go_repository( name = "com_github_masterminds_semver", build_file_proto_mode = "disable", @@ -2674,6 +2729,21 @@ def go_deps(): sum = "h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=", version = "v1.5.0", ) + go_repository( + name = "com_github_masterminds_semver_v3", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/semver/v3", + sum = "h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=", + version = "v3.1.1", + ) + go_repository( + name = "com_github_masterminds_sprig_v3", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/sprig/v3", + sum = "h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=", + version = "v3.2.2", + ) + go_repository( name = "com_github_matoous_godox", build_file_proto_mode = "disable", @@ -2807,6 +2877,14 @@ def go_deps(): sum = "h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=", version = "v1.0.0", ) + go_repository( + name = "com_github_mitchellh_copystructure", + build_file_proto_mode = "disable", + importpath = "github.com/mitchellh/copystructure", + sum = "h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=", + version = "v1.0.0", + ) + go_repository( name = "com_github_mitchellh_go_homedir", build_file_proto_mode = "disable_global", @@ -3466,10 +3544,6 @@ def go_deps(): name = "com_github_rivo_uniseg", build_file_proto_mode = "disable_global", importpath = "github.com/rivo/uniseg", - patch_args = ["-p1"], - patches = [ - "//build/patches:com_github_rivo_uniseg.patch", - ], sum = "h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=", version = "v0.4.2", ) @@ -3668,8 +3742,8 @@ def go_deps(): name = "com_github_shopspring_decimal", build_file_proto_mode = "disable", importpath = "github.com/shopspring/decimal", - sum = "h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=", - version = "v0.0.0-20180709203117-cd690d0c9e24", + sum = "h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=", + version = "v1.2.0", ) go_repository( @@ -3822,6 +3896,14 @@ def go_deps(): sum = "h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=", version = "v1.12.0", ) + go_repository( + name = "com_github_spkg_bom", + build_file_proto_mode = "disable", + importpath = "github.com/spkg/bom", + sum = "h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64=", + version = "v1.0.0", + ) + go_repository( name = "com_github_ssgreg_nlreturn_v2", build_file_proto_mode = "disable", @@ -5832,8 +5914,8 @@ def go_deps(): name = "org_golang_x_tools", build_file_proto_mode = "disable_global", importpath = "golang.org/x/tools", - sum = "h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=", - version = "v0.2.0", + sum = "h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=", + version = "v0.5.0", ) go_repository( name = "org_golang_x_xerrors", diff --git a/Makefile b/Makefile index b138bfbbd0f04..6afde9f6420ec 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ dev: checklist check explaintest gogenerate br_unit_test test_part_parser_dev ut # Install the check tools. check-setup:tools/bin/revive -check: parser_yacc check-parallel lint tidy testSuite errdoc check-bazel-prepare +check: parser_yacc check-parallel lint tidy testSuite errdoc license check-bazel-prepare fmt: @echo "gofmt (simplify)" @@ -56,6 +56,12 @@ lint:tools/bin/revive @echo "linting" @tools/bin/revive -formatter friendly -config tools/check/revive.toml $(FILES_TIDB_TESTS) +license: + bazel $(BAZEL_GLOBAL_CONFIG) run $(BAZEL_CMD_CONFIG) \ + --run_under="cd $(CURDIR) && " \ + @com_github_apache_skywalking_eyes//cmd/license-eye:license-eye --run_under="cd $(CURDIR) && " -- -c ./.github/licenserc.yml header check + + tidy: @echo "go mod tidy" ./tools/check/check-tidy.sh diff --git a/WORKSPACE b/WORKSPACE index 627c7dd5c5575..6fce0d77d8da1 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -2,19 +2,19 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( name = "io_bazel_rules_go", - sha256 = "56d8c5a5c91e1af73eca71a6fab2ced959b67c86d12ba37feedb0a2dfea441a6", + sha256 = "dd926a88a564a9246713a9c00b35315f54cbd46b31a26d5d8fb264c07045f05d", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.37.0/rules_go-v0.37.0.zip", - "https://github.com/bazelbuild/rules_go/releases/download/v0.37.0/rules_go-v0.37.0.zip", + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip", ], ) http_archive( name = "bazel_gazelle", - sha256 = "501deb3d5695ab658e82f6f6f549ba681ea3ca2a5fb7911154b5aa45596183fa", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.26.0/bazel-gazelle-v0.26.0.tar.gz", - "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.26.0/bazel-gazelle-v0.26.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", ], ) diff --git a/bindinfo/bind_record.go b/bindinfo/bind_record.go index 6395bbaa278ba..50e8e0ba20784 100644 --- a/bindinfo/bind_record.go +++ b/bindinfo/bind_record.go @@ -115,6 +115,17 @@ type BindRecord struct { Bindings []Binding } +// Copy get the copy of bindRecord +func (br *BindRecord) Copy() *BindRecord { + nbr := &BindRecord{ + OriginalSQL: br.OriginalSQL, + Db: br.Db, + } + nbr.Bindings = make([]Binding, len(br.Bindings)) + copy(nbr.Bindings, br.Bindings) + return nbr +} + // HasEnabledBinding checks if there are any enabled bindings in bind record. func (br *BindRecord) HasEnabledBinding() bool { for _, binding := range br.Bindings { diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 927f3937963a0..daac9609ae5f4 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -556,7 +556,7 @@ func BuildBackupRangeAndSchema( continue } - logger := log.With( + logger := log.L().With( zap.String("db", dbInfo.Name.O), zap.String("table", tableInfo.Name.O), ) diff --git a/br/pkg/backup/schema.go b/br/pkg/backup/schema.go index bb0cf7f884189..19910af1c9b9b 100644 --- a/br/pkg/backup/schema.go +++ b/br/pkg/backup/schema.go @@ -112,12 +112,12 @@ func (ss *Schemas) BackupSchemas( var checksum *checkpoint.ChecksumItem var exists bool = false - if ss.checkpointChecksum != nil { + if ss.checkpointChecksum != nil && schema.tableInfo != nil { checksum, exists = ss.checkpointChecksum[schema.tableInfo.ID] } workerPool.ApplyOnErrorGroup(errg, func() error { if schema.tableInfo != nil { - logger := log.With( + logger := log.L().With( zap.String("db", schema.dbInfo.Name.O), zap.String("table", schema.tableInfo.Name.O), ) diff --git a/br/pkg/lightning/backend/local/BUILD.bazel b/br/pkg/lightning/backend/local/BUILD.bazel index 9524ab5febc2b..b09a1abad85ba 100644 --- a/br/pkg/lightning/backend/local/BUILD.bazel +++ b/br/pkg/lightning/backend/local/BUILD.bazel @@ -69,6 +69,7 @@ go_library( "@org_golang_google_grpc//backoff", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", "@org_golang_x_exp//slices", diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 4f8ca6bf3117a..5b59ce5b37d65 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -69,12 +69,14 @@ import ( "go.uber.org/atomic" "go.uber.org/multierr" "go.uber.org/zap" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "golang.org/x/time/rate" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" ) @@ -148,7 +150,7 @@ func (f *importClientFactoryImpl) makeConn(ctx context.Context, storeID uint64) if err != nil { return nil, errors.Trace(err) } - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) if f.tls.TLSConfig() != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(f.tls.TLSConfig())) } @@ -1726,13 +1728,18 @@ func (local *local) ResolveDuplicateRows(ctx context.Context, tbl table.Table, t return err } + tableIDs := physicalTableIDs(tbl.Meta()) + keyInTable := func(key []byte) bool { + return slices.Contains(tableIDs, tablecodec.DecodeTableID(key)) + } + errLimiter := rate.NewLimiter(1, 1) pool := utils.NewWorkerPool(uint(local.dupeConcurrency), "resolve duplicate rows") err = local.errorMgr.ResolveAllConflictKeys( ctx, tableName, pool, func(ctx context.Context, handleRows [][2][]byte) error { for { - err := local.deleteDuplicateRows(ctx, logger, handleRows, decoder) + err := local.deleteDuplicateRows(ctx, logger, handleRows, decoder, keyInTable) if err == nil { return nil } @@ -1755,7 +1762,13 @@ func (local *local) ResolveDuplicateRows(ctx context.Context, tbl table.Table, t return errors.Trace(err) } -func (local *local) deleteDuplicateRows(ctx context.Context, logger *log.Task, handleRows [][2][]byte, decoder *kv.TableKVDecoder) (err error) { +func (local *local) deleteDuplicateRows( + ctx context.Context, + logger *log.Task, + handleRows [][2][]byte, + decoder *kv.TableKVDecoder, + keyInTable func(key []byte) bool, +) (err error) { // Starts a Delete transaction. txn, err := local.tikvCli.Begin() if err != nil { @@ -1780,6 +1793,12 @@ func (local *local) deleteDuplicateRows(ctx context.Context, logger *log.Task, h // (if the number of duplicates is small this should fit entirely in memory) // (Txn's MemBuf's bufferSizeLimit is currently infinity) for _, handleRow := range handleRows { + // Skip the row key if it's not in the table. + // This can happen if the table has been recreated or truncated, + // and the duplicate key is from the old table. + if !keyInTable(handleRow[0]) { + continue + } logger.Debug("[resolve-dupe] found row to resolve", logutil.Key("handle", handleRow[0]), logutil.Key("row", handleRow[1])) diff --git a/br/pkg/lightning/common/BUILD.bazel b/br/pkg/lightning/common/BUILD.bazel index a5245356b2715..3bd871276d733 100644 --- a/br/pkg/lightning/common/BUILD.bazel +++ b/br/pkg/lightning/common/BUILD.bazel @@ -33,10 +33,51 @@ go_library( "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//status", - "@org_golang_x_sys//unix", "@org_uber_go_zap//:zap", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:aix": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:android": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:illumos": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:js": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "@org_golang_x_sys//unix", + ], + "//conditions:default": [], + }), ) go_test( diff --git a/br/pkg/lightning/common/security.go b/br/pkg/lightning/common/security.go index a48abc48c2c54..8d0c98223d18a 100644 --- a/br/pkg/lightning/common/security.go +++ b/br/pkg/lightning/common/security.go @@ -28,6 +28,7 @@ import ( pd "github.com/tikv/pd/client" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) type TLS struct { @@ -104,7 +105,7 @@ func (tc *TLS) ToGRPCDialOption() grpc.DialOption { if tc.inner != nil { return grpc.WithTransportCredentials(credentials.NewTLS(tc.inner)) } - return grpc.WithInsecure() + return grpc.WithTransportCredentials(insecure.NewCredentials()) } // WrapListener places a TLS layer on top of the existing listener. diff --git a/br/pkg/lightning/common/util.go b/br/pkg/lightning/common/util.go index b9bdf564403de..fbf275a99bfe1 100644 --- a/br/pkg/lightning/common/util.go +++ b/br/pkg/lightning/common/util.go @@ -44,6 +44,8 @@ const ( retryTimeout = 3 * time.Second defaultMaxRetry = 3 + + dbTimeout = 30 * time.Second ) // MySQLConnectParam records the parameters needed to connect to a MySQL database. @@ -74,6 +76,8 @@ func (param *MySQLConnectParam) ToDriverConfig() *mysql.Config { cfg.Params["charset"] = "utf8mb4" cfg.Params["sql_mode"] = fmt.Sprintf("'%s'", param.SQLMode) cfg.MaxAllowedPacket = int(param.MaxAllowedPacket) + cfg.ReadTimeout = dbTimeout + cfg.WriteTimeout = dbTimeout cfg.TLS = param.TLSConfig cfg.AllowFallbackToPlaintext = param.AllowFallbackToPlaintext diff --git a/br/pkg/lightning/config/BUILD.bazel b/br/pkg/lightning/config/BUILD.bazel index b69d2fca0d310..b035b506aebf2 100644 --- a/br/pkg/lightning/config/BUILD.bazel +++ b/br/pkg/lightning/config/BUILD.bazel @@ -25,6 +25,8 @@ go_library( "@com_github_docker_go_units//:go-units", "@com_github_go_sql_driver_mysql//:mysql", "@com_github_pingcap_errors//:errors", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//keepalive", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], diff --git a/br/pkg/lightning/config/config.go b/br/pkg/lightning/config/config.go index 45d5f1fa334a4..d14f12066c0f4 100644 --- a/br/pkg/lightning/config/config.go +++ b/br/pkg/lightning/config/config.go @@ -343,32 +343,64 @@ type MaxError struct { // In TiDB backend, this also includes all possible SQL errors raised from INSERT, // such as unique key conflict when `on-duplicate` is set to `error`. // When tolerated, the row causing the error will be skipped, and adds 1 to the counter. + // The default value is zero, which means that such errors are not tolerated. Type atomic.Int64 `toml:"type" json:"type"` // Conflict is the maximum number of unique key conflicts in local backend accepted. // When tolerated, every pair of conflict adds 1 to the counter. // Those pairs will NOT be deleted from the target. Conflict resolution is performed separately. - // TODO Currently this is hard-coded to infinity. - Conflict atomic.Int64 `toml:"conflict" json:"-"` + // The default value is max int64, which means conflict errors will be recorded as much as possible. + // Sometime the actual number of conflict record logged will be greater than the value configured here, + // because conflict error data are recorded batch by batch. + // If the limit is reached in a single batch, the entire batch of records will be persisted before an error is reported. + Conflict atomic.Int64 `toml:"conflict" json:"conflict"` } func (cfg *MaxError) UnmarshalTOML(v interface{}) error { + defaultValMap := map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 0, + "conflict": math.MaxInt64, + } + // set default value first + cfg.Syntax.Store(defaultValMap["syntax"]) + cfg.Charset.Store(defaultValMap["charset"]) + cfg.Type.Store(defaultValMap["type"]) + cfg.Conflict.Store(defaultValMap["conflict"]) switch val := v.(type) { case int64: // ignore val that is smaller than 0 - if val < 0 { - val = 0 + if val >= 0 { + // only set type error + cfg.Type.Store(val) } - cfg.Syntax.Store(0) - cfg.Charset.Store(math.MaxInt64) - cfg.Type.Store(val) - cfg.Conflict.Store(math.MaxInt64) return nil case map[string]interface{}: - // TODO support stuff like `max-error = { charset = 1000, type = 1000 }` if proved useful. + // support stuff like `max-error = { charset = 1000, type = 1000 }`. + getVal := func(k string, v interface{}) int64 { + defaultVal, ok := defaultValMap[k] + if !ok { + return 0 + } + iVal, ok := v.(int64) + if !ok || iVal < 0 { + return defaultVal + } + return iVal + } + for k, v := range val { + switch k { + case "type": + cfg.Type.Store(getVal(k, v)) + case "conflict": + cfg.Conflict.Store(getVal(k, v)) + } + } + return nil default: + return errors.Errorf("invalid max-error '%v', should be an integer or a map of string:int64", v) } - return errors.Errorf("invalid max-error '%v', should be an integer", v) } // DuplicateResolutionAlgorithm is the config type of how to resolve duplicates. @@ -455,6 +487,9 @@ type CSVConfig struct { TrimLastSep bool `toml:"trim-last-separator" json:"trim-last-separator"` NotNull bool `toml:"not-null" json:"not-null"` BackslashEscape bool `toml:"backslash-escape" json:"backslash-escape"` + // hide these options for lightning configuration file, they can only be used by LOAD DATA + // https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-field-line-handling + StartingBy string `toml:"-" json:"-"` } type MydumperRuntime struct { @@ -802,8 +837,16 @@ func (cfg *Config) LoadFromTOML(data []byte) error { unusedGlobalKeyStrs[key.String()] = struct{}{} } +iterateUnusedKeys: for _, key := range unusedConfigKeys { keyStr := key.String() + switch keyStr { + // these keys are not counted as decoded by toml decoder, but actually they are decoded, + // because the corresponding unmarshal logic handles these key's decoding in a custom way + case "lightning.max-error.type", + "lightning.max-error.conflict": + continue iterateUnusedKeys + } if _, found := unusedGlobalKeyStrs[keyStr]; found { bothUnused = append(bothUnused, keyStr) } else { diff --git a/br/pkg/lightning/config/config_test.go b/br/pkg/lightning/config/config_test.go index ea0cff40a04c7..f590391740ec4 100644 --- a/br/pkg/lightning/config/config_test.go +++ b/br/pkg/lightning/config/config_test.go @@ -19,6 +19,7 @@ import ( "context" "flag" "fmt" + "math" "net" "net/http" "net/http/httptest" @@ -561,6 +562,126 @@ func TestDurationUnmarshal(t *testing.T) { require.Regexp(t, "time: unknown unit .?x.? in duration .?13x20s.?", err.Error()) } +func TestMaxErrorUnmarshal(t *testing.T) { + type testCase struct { + TOMLStr string + ExpectedValues map[string]int64 + ExpectErrStr string + CaseName string + } + for _, tc := range []*testCase{ + { + TOMLStr: `max-error = 123`, + ExpectedValues: map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 123, + "conflict": math.MaxInt64, + }, + CaseName: "Normal_Int", + }, + { + TOMLStr: `max-error = -123`, + ExpectedValues: map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 0, + "conflict": math.MaxInt64, + }, + CaseName: "Abnormal_Negative_Int", + }, + { + TOMLStr: `max-error = "abcde"`, + ExpectErrStr: "invalid max-error 'abcde', should be an integer or a map of string:int64", + CaseName: "Abnormal_String", + }, + { + TOMLStr: `[max-error] +syntax = 1 +charset = 2 +type = 3 +conflict = 4 +`, + ExpectedValues: map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 3, + "conflict": 4, + }, + CaseName: "Normal_Map_All_Set", + }, + { + TOMLStr: `[max-error] +conflict = 1000 +`, + ExpectedValues: map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 0, + "conflict": 1000, + }, + CaseName: "Normal_Map_Partial_Set", + }, + { + TOMLStr: `max-error = { conflict = 1000, type = 123 }`, + ExpectedValues: map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 123, + "conflict": 1000, + }, + CaseName: "Normal_OneLineMap_Partial_Set", + }, + { + TOMLStr: `[max-error] +conflict = 1000 +not_exist = 123 +`, + ExpectedValues: map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 0, + "conflict": 1000, + }, + CaseName: "Normal_Map_Partial_Set_Invalid_Key", + }, + { + TOMLStr: `[max-error] +conflict = 1000 +type = -123 +`, + ExpectedValues: map[string]int64{ + "syntax": 0, + "charset": math.MaxInt64, + "type": 0, + "conflict": 1000, + }, + CaseName: "Normal_Map_Partial_Set_Invalid_Value", + }, + { + TOMLStr: `[max-error] +conflict = 1000 +type = abc +`, + ExpectErrStr: `toml: line 3 (last key "max-error.type"): expected value but found "abc" instead`, + CaseName: "Normal_Map_Partial_Set_Invalid_ValueType", + }, + } { + targetLightningCfg := new(config.Lightning) + err := toml.Unmarshal([]byte(tc.TOMLStr), targetLightningCfg) + if len(tc.ExpectErrStr) > 0 { + require.Errorf(t, err, "test case: %s", tc.CaseName) + require.Equalf(t, tc.ExpectErrStr, err.Error(), "test case: %s", tc.CaseName) + } else { + require.NoErrorf(t, err, "test case: %s", tc.CaseName) + require.Equalf(t, tc.ExpectedValues["syntax"], targetLightningCfg.MaxError.Syntax.Load(), "test case: %s", tc.CaseName) + require.Equalf(t, tc.ExpectedValues["charset"], targetLightningCfg.MaxError.Charset.Load(), "test case: %s", tc.CaseName) + require.Equalf(t, tc.ExpectedValues["type"], targetLightningCfg.MaxError.Type.Load(), "test case: %s", tc.CaseName) + require.Equalf(t, tc.ExpectedValues["conflict"], targetLightningCfg.MaxError.Conflict.Load(), "test case: %s", tc.CaseName) + } + } +} + func TestDurationMarshalJSON(t *testing.T) { duration := config.Duration{} err := duration.UnmarshalText([]byte("13m20s")) @@ -643,7 +764,7 @@ func TestLoadConfig(t *testing.T) { err = taskCfg.Adjust(context.Background()) require.NoError(t, err) equivalentDSN := taskCfg.Checkpoint.MySQLParam.ToDriverConfig().FormatDSN() - expectedDSN := "guest:12345@tcp(172.16.30.11:4001)/?maxAllowedPacket=67108864&charset=utf8mb4&sql_mode=%27ONLY_FULL_GROUP_BY%2CSTRICT_TRANS_TABLES%2CNO_ZERO_IN_DATE%2CNO_ZERO_DATE%2CERROR_FOR_DIVISION_BY_ZERO%2CNO_AUTO_CREATE_USER%2CNO_ENGINE_SUBSTITUTION%27" + expectedDSN := "guest:12345@tcp(172.16.30.11:4001)/?readTimeout=30s&writeTimeout=30s&maxAllowedPacket=67108864&charset=utf8mb4&sql_mode=%27ONLY_FULL_GROUP_BY%2CSTRICT_TRANS_TABLES%2CNO_ZERO_IN_DATE%2CNO_ZERO_DATE%2CERROR_FOR_DIVISION_BY_ZERO%2CNO_AUTO_CREATE_USER%2CNO_ENGINE_SUBSTITUTION%27" require.Equal(t, expectedDSN, equivalentDSN) result := taskCfg.String() diff --git a/br/pkg/lightning/config/const.go b/br/pkg/lightning/config/const.go index 23a38ac41117d..e114eafd8ea88 100644 --- a/br/pkg/lightning/config/const.go +++ b/br/pkg/lightning/config/const.go @@ -15,7 +15,11 @@ package config import ( + "time" + "github.com/docker/go-units" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) const ( @@ -34,3 +38,11 @@ const ( DefaultBatchSize ByteSize = 100 * units.GiB ) + +var ( + DefaultGrpcKeepaliveParams = grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 10 * time.Second, + Timeout: 20 * time.Second, + PermitWithoutStream: false, + }) +) diff --git a/br/pkg/lightning/errormanager/errormanager.go b/br/pkg/lightning/errormanager/errormanager.go index 373ba572779d4..4085226063d38 100644 --- a/br/pkg/lightning/errormanager/errormanager.go +++ b/br/pkg/lightning/errormanager/errormanager.go @@ -194,7 +194,8 @@ func (em *ErrorManager) RecordTypeError( if em.remainingError.Type.Dec() < 0 { threshold := em.configError.Type.Load() if threshold > 0 { - encodeErr = errors.Annotatef(encodeErr, "meet errors exceed the max-error.type threshold '%d'", + encodeErr = errors.Annotatef(encodeErr, + "The number of type errors exceeds the threshold configured by `max-error.type`: '%d'", em.configError.Type.Load()) } return encodeErr @@ -241,17 +242,20 @@ func (em *ErrorManager) RecordDataConflictError( tableName string, conflictInfos []DataConflictInfo, ) error { + var gerr error if len(conflictInfos) == 0 { return nil } if em.remainingError.Conflict.Sub(int64(len(conflictInfos))) < 0 { threshold := em.configError.Conflict.Load() - return errors.Errorf(" meet errors exceed the max-error.conflict threshold '%d'", threshold) + // Still need to record this batch of conflict records, and then return this error at last. + // Otherwise, if the max-error.conflict is set a very small value, non of the conflict errors will be recorded + gerr = errors.Errorf("The number of conflict errors exceeds the threshold configured by `max-error.conflict`: '%d'", threshold) } if em.db == nil { - return nil + return gerr } exec := common.SQLWithRetry{ @@ -259,7 +263,7 @@ func (em *ErrorManager) RecordDataConflictError( Logger: logger, HideQueryLog: redact.NeedRedact(), } - return exec.Transact(ctx, "insert data conflict error record", func(c context.Context, txn *sql.Tx) error { + if err := exec.Transact(ctx, "insert data conflict error record", func(c context.Context, txn *sql.Tx) error { sb := &strings.Builder{} fmt.Fprintf(sb, insertIntoConflictErrorData, em.schemaEscaped) var sqlArgs []interface{} @@ -279,7 +283,10 @@ func (em *ErrorManager) RecordDataConflictError( } _, err := txn.ExecContext(c, sb.String(), sqlArgs...) return err - }) + }); err != nil { + gerr = err + } + return gerr } func (em *ErrorManager) RecordIndexConflictError( @@ -290,17 +297,20 @@ func (em *ErrorManager) RecordIndexConflictError( conflictInfos []DataConflictInfo, rawHandles, rawRows [][]byte, ) error { + var gerr error if len(conflictInfos) == 0 { return nil } if em.remainingError.Conflict.Sub(int64(len(conflictInfos))) < 0 { threshold := em.configError.Conflict.Load() - return errors.Errorf(" meet errors exceed the max-error.conflict threshold %d", threshold) + // Still need to record this batch of conflict records, and then return this error at last. + // Otherwise, if the max-error.conflict is set a very small value, non of the conflict errors will be recorded + gerr = errors.Errorf("The number of conflict errors exceeds the threshold configured by `max-error.conflict`: '%d'", threshold) } if em.db == nil { - return nil + return gerr } exec := common.SQLWithRetry{ @@ -308,7 +318,7 @@ func (em *ErrorManager) RecordIndexConflictError( Logger: logger, HideQueryLog: redact.NeedRedact(), } - return exec.Transact(ctx, "insert index conflict error record", func(c context.Context, txn *sql.Tx) error { + if err := exec.Transact(ctx, "insert index conflict error record", func(c context.Context, txn *sql.Tx) error { sb := &strings.Builder{} fmt.Fprintf(sb, insertIntoConflictErrorIndex, em.schemaEscaped) var sqlArgs []interface{} @@ -331,7 +341,10 @@ func (em *ErrorManager) RecordIndexConflictError( } _, err := txn.ExecContext(c, sb.String(), sqlArgs...) return err - }) + }); err != nil { + gerr = err + } + return gerr } // ResolveAllConflictKeys query all conflicting rows (handle and their diff --git a/br/pkg/lightning/mydump/BUILD.bazel b/br/pkg/lightning/mydump/BUILD.bazel index d265cad78bce6..a4aa1626afc46 100644 --- a/br/pkg/lightning/mydump/BUILD.bazel +++ b/br/pkg/lightning/mydump/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "//util/slice", "//util/table-filter", "@com_github_pingcap_errors//:errors", + "@com_github_spkg_bom//:bom", "@com_github_xitongsys_parquet_go//parquet", "@com_github_xitongsys_parquet_go//reader", "@com_github_xitongsys_parquet_go//source", diff --git a/br/pkg/lightning/mydump/csv_parser.go b/br/pkg/lightning/mydump/csv_parser.go index b7d6c6fc21903..26fb65a493183 100644 --- a/br/pkg/lightning/mydump/csv_parser.go +++ b/br/pkg/lightning/mydump/csv_parser.go @@ -47,9 +47,10 @@ type CSVParser struct { blockParser cfg *config.CSVConfig - comma []byte - quote []byte - newLine []byte + comma []byte + quote []byte + newLine []byte + startingBy []byte charsetConvertor *CharsetConvertor // These variables are used with IndexAnyByte to search a byte slice for the @@ -120,6 +121,12 @@ func NewCSVParser( } unquoteStopSet = append(unquoteStopSet, newLineStopSet...) + if len(cfg.StartingBy) > 0 { + if strings.Contains(cfg.StartingBy, terminator) { + return nil, errors.New("starting-by cannot contain (line) terminator") + } + } + escFlavor := backslashEscapeFlavorNone if cfg.BackslashEscape { escFlavor = backslashEscapeFlavorMySQL @@ -138,6 +145,7 @@ func NewCSVParser( comma: []byte(separator), quote: []byte(delimiter), newLine: []byte(terminator), + startingBy: []byte(cfg.StartingBy), escFlavor: escFlavor, quoteByteSet: makeByteSet(quoteStopSet), unquoteByteSet: makeByteSet(unquoteStopSet), @@ -370,11 +378,43 @@ func (parser *CSVParser) readRecord(dst []string) ([]string, error) { isEmptyLine := true whitespaceLine := true + foundStartingByThisLine := false prevToken := csvTokenNewLine var firstToken csvToken outside: for { + // we should drop + // 1. the whole line if it does not contain startingBy + // 2. any character before startingBy + // since we have checked startingBy does not contain terminator, we can + // split at terminator to check the substring contains startingBy. Even + // if the terminator is inside a quoted field which means it's not the + // end of a line, the substring can still be dropped by rule 2. + if len(parser.startingBy) > 0 && !foundStartingByThisLine { + oldPos := parser.pos + content, _, err := parser.ReadUntilTerminator() + if err != nil { + if !(errors.Cause(err) == io.EOF) { + return nil, err + } + if len(content) == 0 { + return nil, err + } + // if we reached EOF, we should still check the content contains + // startingBy and try to put back and parse it. + } + idx := bytes.Index(content, parser.startingBy) + if idx == -1 { + continue + } + foundStartingByThisLine = true + content = content[idx+len(parser.startingBy):] + content = append(content, parser.newLine...) + parser.buf = append(content, parser.buf...) + parser.pos = oldPos + int64(idx+len(parser.startingBy)) + } + content, firstByte, err := parser.readUntil(&parser.unquoteByteSet) if len(content) > 0 { @@ -415,6 +455,7 @@ outside: } whitespaceLine = false case csvTokenNewLine: + foundStartingByThisLine = false // new line = end of record (ignore empty lines) prevToken = firstToken if isEmptyLine { @@ -578,17 +619,21 @@ func (parser *CSVParser) ReadColumns() error { } // ReadUntilTerminator seeks the file until the terminator token is found, and -// returns the file offset beyond the terminator. -// This function is used in strict-format dividing a CSV file. -func (parser *CSVParser) ReadUntilTerminator() (int64, error) { +// returns +// - the content before terminator +// - the file offset beyond the terminator +// - error +// Note that the terminator string pattern may be the content of a field, which +// means it's inside quotes. Caller should make sure to handle this case. +func (parser *CSVParser) ReadUntilTerminator() ([]byte, int64, error) { for { - _, firstByte, err := parser.readUntil(&parser.newLineByteSet) + content, firstByte, err := parser.readUntil(&parser.newLineByteSet) if err != nil { - return 0, err + return content, 0, err } parser.skipBytes(1) if ok, err := parser.tryReadNewLine(firstByte); ok || err != nil { - return parser.pos, err + return content, parser.pos, err } } } diff --git a/br/pkg/lightning/mydump/csv_parser_test.go b/br/pkg/lightning/mydump/csv_parser_test.go index da06c15ed39d9..adb057679b3a4 100644 --- a/br/pkg/lightning/mydump/csv_parser_test.go +++ b/br/pkg/lightning/mydump/csv_parser_test.go @@ -55,6 +55,35 @@ func runTestCasesCSV(t *testing.T, cfg *config.MydumperRuntime, blockBufSize int } } +func runTestCasesCSVIgnoreNLines(t *testing.T, cfg *config.MydumperRuntime, blockBufSize int64, cases []testCase, ignoreNLines int) { + for _, tc := range cases { + charsetConvertor, err := mydump.NewCharsetConvertor(cfg.DataCharacterSet, cfg.DataInvalidCharReplace) + assert.NoError(t, err) + parser, err := mydump.NewCSVParser(context.Background(), &cfg.CSV, mydump.NewStringReader(tc.input), blockBufSize, ioWorkers, false, charsetConvertor) + assert.NoError(t, err) + + for ignoreNLines > 0 { + // IGNORE N LINES will directly find (line) terminator without checking it's inside quotes + _, _, err = parser.ReadUntilTerminator() + if errors.Cause(err) == io.EOF { + assert.Len(t, tc.expected, 0, "input = %q", tc.input) + return + } + assert.NoError(t, err) + ignoreNLines-- + } + + for i, row := range tc.expected { + comment := fmt.Sprintf("input = %q, row = %d", tc.input, i+1) + e := parser.ReadRow() + assert.NoErrorf(t, e, "input = %q, row = %d, error = %s", tc.input, i+1, errors.ErrorStack(e)) + assert.Equal(t, int64(i)+1, parser.LastRow().RowID, comment) + assert.Equal(t, row, parser.LastRow().Row, comment) + } + assert.ErrorIsf(t, errors.Cause(parser.ReadRow()), io.EOF, "input = %q", tc.input) + } +} + func runFailingTestCasesCSV(t *testing.T, cfg *config.MydumperRuntime, blockBufSize int64, cases []string) { for _, tc := range cases { charsetConvertor, err := mydump.NewCharsetConvertor(cfg.DataCharacterSet, cfg.DataInvalidCharReplace) @@ -935,6 +964,211 @@ func TestTerminator(t *testing.T) { runTestCasesCSV(t, &cfg, 1, testCases) } +func TestStartingBy(t *testing.T) { + cfg := config.MydumperRuntime{ + CSV: config.CSVConfig{ + Separator: ",", + Delimiter: `"`, + Terminator: "\n", + StartingBy: "xxx", + }, + } + testCases := []testCase{ + { + input: `xxx"abc",1 +something xxx"def",2 +"ghi",3`, + expected: [][]types.Datum{ + {types.NewStringDatum("abc"), types.NewStringDatum("1")}, + {types.NewStringDatum("def"), types.NewStringDatum("2")}, + }, + }, + } + runTestCasesCSV(t, &cfg, 1, testCases) + + testCases = []testCase{ + { + input: `xxxabc,1 +something xxxdef,2 +ghi,3 +"bad syntax"aaa`, + expected: [][]types.Datum{ + {types.NewStringDatum("abc"), types.NewStringDatum("1")}, + {types.NewStringDatum("def"), types.NewStringDatum("2")}, + }, + }, + } + runTestCasesCSV(t, &cfg, 1, testCases) + + // test that special characters appears before StartingBy, and StartingBy only takes effect after once + + testCases = []testCase{ + { + input: `xxx"abc",1 +something xxxdef,2 +"ghi",3 +"yyy"xxx"yyy",4 +"yyy",5,xxxyyy,5 +qwe,zzzxxxyyy,6 +"yyyxxx"yyyxxx",7 +yyy",5,xxxxxx,8 +`, + expected: [][]types.Datum{ + {types.NewStringDatum("abc"), types.NewStringDatum("1")}, + {types.NewStringDatum("def"), types.NewStringDatum("2")}, + {types.NewStringDatum("yyy"), types.NewStringDatum("4")}, + {types.NewStringDatum("yyy"), types.NewStringDatum("5")}, + {types.NewStringDatum("yyy"), types.NewStringDatum("6")}, + {types.NewStringDatum("yyyxxx"), types.NewStringDatum("7")}, + {types.NewStringDatum("xxx"), types.NewStringDatum("8")}, + }, + }, + } + runTestCasesCSV(t, &cfg, 1, testCases) + + // test StartingBy contains special characters + + cfg = config.MydumperRuntime{ + CSV: config.CSVConfig{ + Separator: ",", + Delimiter: `"`, + Terminator: "\n", + StartingBy: "x,xx", + }, + } + testCases = []testCase{ + { + input: `x,xx"abc",1 +something x,xxdef,2 +"ghi",3 +"yyy"xxx"yyy",4 +"yyy",5,xxxyyy,5 +qwe,zzzxxxyyy,6 +"yyyxxx"yyyxxx",7 +yyy",5,xx,xxxx,8`, + expected: [][]types.Datum{ + {types.NewStringDatum("abc"), types.NewStringDatum("1")}, + {types.NewStringDatum("def"), types.NewStringDatum("2")}, + {types.NewStringDatum("xx"), types.NewStringDatum("8")}, + }, + }, + } + runTestCasesCSV(t, &cfg, 1, testCases) + + cfg = config.MydumperRuntime{ + CSV: config.CSVConfig{ + Separator: ",", + Delimiter: `"`, + Terminator: "\n", + StartingBy: `x"xx`, + }, + } + testCases = []testCase{ + { + input: `x"xx"abc",1 +something x"xxdef,2 +"ghi",3 +"yyy"xxx"yyy",4 +"yyy",5,xxxyyy,5 +qwe,zzzxxxyyy,6 +"yyyxxx"yyyxxx",7 +yyy",5,xx"xxxx,8 +`, + expected: [][]types.Datum{ + {types.NewStringDatum("abc"), types.NewStringDatum("1")}, + {types.NewStringDatum("def"), types.NewStringDatum("2")}, + {types.NewStringDatum("xx"), types.NewStringDatum("8")}, + }, + }, + } + runTestCasesCSV(t, &cfg, 1, testCases) + + cfg = config.MydumperRuntime{ + CSV: config.CSVConfig{ + Separator: ",", + Delimiter: `"`, + Terminator: "\n", + StartingBy: "x\nxx", + }, + } + _, err := mydump.NewCSVParser(context.Background(), &cfg.CSV, nil, 1, ioWorkers, false, nil) + require.ErrorContains(t, err, "starting-by cannot contain (line) terminator") +} + +func TestCallerCanIgnoreNLines(t *testing.T) { + cfg := config.MydumperRuntime{ + CSV: config.CSVConfig{ + Separator: ",", + Delimiter: `"`, + Terminator: "\n", + }, + } + testCases := []testCase{ + { + input: `1,1 +2,2 +3,3`, + expected: [][]types.Datum{ + {types.NewStringDatum("3"), types.NewStringDatum("3")}, + }, + }, + } + runTestCasesCSVIgnoreNLines(t, &cfg, 1, testCases, 2) + + testCases = []testCase{ + { + input: `"bad syntax"1 +"b",2 +"c",3`, + expected: [][]types.Datum{ + {types.NewStringDatum("c"), types.NewStringDatum("3")}, + }, + }, + } + runTestCasesCSVIgnoreNLines(t, &cfg, 1, testCases, 2) + + cfg = config.MydumperRuntime{ + CSV: config.CSVConfig{ + Separator: ",", + Delimiter: `"`, + Terminator: "\n", + }, + } + testCases = []testCase{ + { + input: `1,1 +2,2 +3,3`, + expected: [][]types.Datum{}, + }, + } + runTestCasesCSVIgnoreNLines(t, &cfg, 1, testCases, 100) + + // test IGNORE N LINES will directly find (line) terminator without checking it's inside quotes + + cfg = config.MydumperRuntime{ + CSV: config.CSVConfig{ + Separator: ",", + Delimiter: `"`, + Terminator: "\n", + }, + } + testCases = []testCase{ + { + input: `"a +",1 +"b +",2 +"c",3`, + expected: [][]types.Datum{ + {types.NewStringDatum("b\n"), types.NewStringDatum("2")}, + {types.NewStringDatum("c"), types.NewStringDatum("3")}, + }, + }, + } + runTestCasesCSVIgnoreNLines(t, &cfg, 1, testCases, 2) +} + func TestCharsetConversion(t *testing.T) { cfg := config.MydumperRuntime{ CSV: config.CSVConfig{ diff --git a/br/pkg/lightning/mydump/parser.go b/br/pkg/lightning/mydump/parser.go index 512c3789cfa7f..0ac82ce189d71 100644 --- a/br/pkg/lightning/mydump/parser.go +++ b/br/pkg/lightning/mydump/parser.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/worker" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/types" + "github.com/spkg/bom" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -285,7 +286,13 @@ func (parser *blockParser) readBlock() error { parser.remainBuf.Write(parser.buf) parser.appendBuf.Reset() parser.appendBuf.Write(parser.remainBuf.Bytes()) - parser.appendBuf.Write(parser.blockBuf[:n]) + blockData := parser.blockBuf[:n] + if parser.pos == 0 { + bomCleanedData := bom.Clean(blockData) + parser.pos += int64(n - len(bomCleanedData)) + blockData = bomCleanedData + } + parser.appendBuf.Write(blockData) parser.buf = parser.appendBuf.Bytes() if parser.metrics != nil { parser.metrics.ChunkParserReadBlockSecondsHistogram.Observe(time.Since(startTime).Seconds()) diff --git a/br/pkg/lightning/mydump/reader.go b/br/pkg/lightning/mydump/reader.go index 4837b35aceab2..3735e97cb48ee 100644 --- a/br/pkg/lightning/mydump/reader.go +++ b/br/pkg/lightning/mydump/reader.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/worker" "github.com/pingcap/tidb/br/pkg/storage" + "github.com/spkg/bom" "go.uber.org/zap" "golang.org/x/text/encoding/simplifiedchinese" ) @@ -83,7 +84,7 @@ func ExportStatement(ctx context.Context, store storage.ExternalStorage, sqlFile } defer fd.Close() - br := bufio.NewReader(fd) + br := bufio.NewReader(bom.NewReader(fd)) data := make([]byte, 0, sqlFile.FileMeta.FileSize+1) buffer := make([]byte, 0, sqlFile.FileMeta.FileSize+1) diff --git a/br/pkg/lightning/mydump/region.go b/br/pkg/lightning/mydump/region.go index da3b4d0af1a53..aba71f666be2e 100644 --- a/br/pkg/lightning/mydump/region.go +++ b/br/pkg/lightning/mydump/region.go @@ -431,7 +431,7 @@ func SplitLargeFile( if err = parser.SetPos(endOffset, prevRowIDMax); err != nil { return 0, nil, nil, err } - pos, err := parser.ReadUntilTerminator() + _, pos, err := parser.ReadUntilTerminator() if err != nil { if !errors.ErrorEqual(err, io.EOF) { return 0, nil, nil, err diff --git a/br/pkg/lightning/restore/BUILD.bazel b/br/pkg/lightning/restore/BUILD.bazel index ef5aeb106585b..06e503e0519db 100644 --- a/br/pkg/lightning/restore/BUILD.bazel +++ b/br/pkg/lightning/restore/BUILD.bazel @@ -80,7 +80,6 @@ go_library( "@com_github_tikv_pd_client//:client", "@io_etcd_go_etcd_client_v3//:client", "@org_golang_google_grpc//:grpc", - "@org_golang_google_grpc//keepalive", "@org_golang_x_exp//maps", "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", diff --git a/br/pkg/lightning/restore/precheck_impl.go b/br/pkg/lightning/restore/precheck_impl.go index f412b101ff08b..8d5142a8b5fd4 100644 --- a/br/pkg/lightning/restore/precheck_impl.go +++ b/br/pkg/lightning/restore/precheck_impl.go @@ -48,7 +48,6 @@ import ( "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" ) type clusterResourceCheckItem struct { @@ -733,11 +732,7 @@ func dialEtcdWithCfg(ctx context.Context, cfg *config.Config) (*clientv3.Client, AutoSyncInterval: 30 * time.Second, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{ - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 10 * time.Second, - Timeout: 3 * time.Second, - PermitWithoutStream: false, - }), + config.DefaultGrpcKeepaliveParams, grpc.WithBlock(), grpc.WithReturnConnectionError(), }, diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go index 37ba113c82eed..311659f6ffa51 100644 --- a/br/pkg/lightning/restore/table_restore.go +++ b/br/pkg/lightning/restore/table_restore.go @@ -812,9 +812,11 @@ func (tr *TableRestore) postProcess( } hasDupe = hasDupe || hasRemoteDupe - if err = rc.backend.ResolveDuplicateRows(ctx, tr.encTable, tr.tableName, rc.cfg.TikvImporter.DuplicateResolution); err != nil { - tr.logger.Error("resolve remote duplicate keys failed", log.ShortError(err)) - return false, err + if hasDupe { + if err = rc.backend.ResolveDuplicateRows(ctx, tr.encTable, tr.tableName, rc.cfg.TikvImporter.DuplicateResolution); err != nil { + tr.logger.Error("resolve remote duplicate keys failed", log.ShortError(err)) + return false, err + } } } diff --git a/br/pkg/lightning/tikv/BUILD.bazel b/br/pkg/lightning/tikv/BUILD.bazel index 596aa52075758..48758bfedaacf 100644 --- a/br/pkg/lightning/tikv/BUILD.bazel +++ b/br/pkg/lightning/tikv/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//br/pkg/lightning/common", + "//br/pkg/lightning/config", "//br/pkg/lightning/log", "//br/pkg/pdutil", "//br/pkg/version", diff --git a/br/pkg/lightning/tikv/tikv.go b/br/pkg/lightning/tikv/tikv.go index 8d2d797d322d1..53c06cc6102f6 100644 --- a/br/pkg/lightning/tikv/tikv.go +++ b/br/pkg/lightning/tikv/tikv.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/kvproto/pkg/debugpb" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/tidb/br/pkg/lightning/common" + "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/version" @@ -88,7 +89,7 @@ func withTiKVConnection(ctx context.Context, tls *common.TLS, tikvAddr string, a // Connect to the ImportSST service on the given TiKV node. // The connection is needed for executing `action` and will be tear down // when this function exits. - conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption()) + conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption(), config.DefaultGrpcKeepaliveParams) if err != nil { return errors.Trace(err) } @@ -172,7 +173,8 @@ var fetchModeRegexp = regexp.MustCompile(`\btikv_config_rocksdb\{cf="default",na // FetchMode obtains the import mode status of the TiKV node. func FetchMode(ctx context.Context, tls *common.TLS, tikvAddr string) (import_sstpb.SwitchMode, error) { - conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption()) + conn, err := grpc.DialContext(ctx, tikvAddr, tls.ToGRPCDialOption(), + config.DefaultGrpcKeepaliveParams) if err != nil { return 0, err } diff --git a/br/pkg/lightning/web/BUILD.bazel b/br/pkg/lightning/web/BUILD.bazel index 842eb48fb3dd3..93cb28cdaf0ce 100644 --- a/br/pkg/lightning/web/BUILD.bazel +++ b/br/pkg/lightning/web/BUILD.bazel @@ -4,7 +4,7 @@ go_library( name = "web", srcs = [ "progress.go", - "res.go", + "res.go", #keep "res_vfsdata.go", ], importpath = "github.com/pingcap/tidb/br/pkg/lightning/web", diff --git a/br/pkg/logutil/rate.go b/br/pkg/logutil/rate.go index 12ef85732efd7..e3e67ed241bff 100644 --- a/br/pkg/logutil/rate.go +++ b/br/pkg/logutil/rate.go @@ -51,5 +51,5 @@ func (r *RateTracer) RateAt(instant time.Time) float64 { // L make a logger with the current speed. func (r *RateTracer) L() *zap.Logger { - return log.With(zap.String("speed", fmt.Sprintf("%.2f ops/s", r.Rate()))) + return log.L().With(zap.String("speed", fmt.Sprintf("%.2f ops/s", r.Rate()))) } diff --git a/br/pkg/restore/BUILD.bazel b/br/pkg/restore/BUILD.bazel index d4f70e278fd2c..35bed1951e9b3 100644 --- a/br/pkg/restore/BUILD.bazel +++ b/br/pkg/restore/BUILD.bazel @@ -88,6 +88,7 @@ go_library( "@org_golang_google_grpc//backoff", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", "@org_golang_x_exp//slices", diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index c0f58817ec0af..18a3dc61879e4 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -64,6 +64,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" ) @@ -1298,7 +1299,7 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo finalStore := store rc.workerPool.ApplyOnErrorGroup(eg, func() error { - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) if rc.tlsConf != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) } @@ -1410,7 +1411,7 @@ func (rc *Client) execChecksum( concurrency uint, loadStatCh chan<- *CreatedTable, ) error { - logger := log.With( + logger := log.L().With( zap.String("db", tbl.OldTable.DB.Name.O), zap.String("table", tbl.OldTable.Info.Name.O), ) diff --git a/br/pkg/restore/import.go b/br/pkg/restore/import.go index 5004639c1a00d..34da5cbea2404 100644 --- a/br/pkg/restore/import.go +++ b/br/pkg/restore/import.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" ) @@ -197,7 +198,7 @@ func (ic *importClient) GetImportClient( if err != nil { return nil, errors.Trace(err) } - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) if ic.tlsConf != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(ic.tlsConf)) } diff --git a/br/pkg/restore/split/BUILD.bazel b/br/pkg/restore/split/BUILD.bazel index ac9eb50eb4d20..5ddd7b7671822 100644 --- a/br/pkg/restore/split/BUILD.bazel +++ b/br/pkg/restore/split/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//br/pkg/conn/util", "//br/pkg/errors", "//br/pkg/httputil", + "//br/pkg/lightning/config", "//br/pkg/logutil", "//br/pkg/redact", "//br/pkg/utils", @@ -32,6 +33,7 @@ go_library( "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//status", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", diff --git a/br/pkg/restore/split/client.go b/br/pkg/restore/split/client.go index fb82135ae2af6..72482a94e87dc 100644 --- a/br/pkg/restore/split/client.go +++ b/br/pkg/restore/split/client.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/httputil" + "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/store/pdtypes" pd "github.com/tikv/pd/client" @@ -35,6 +36,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" ) @@ -200,7 +202,9 @@ func (c *pdClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key if err != nil { return nil, errors.Trace(err) } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + conn, err := grpc.Dial(store.GetAddress(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + config.DefaultGrpcKeepaliveParams) if err != nil { return nil, errors.Trace(err) } @@ -336,11 +340,12 @@ func sendSplitRegionRequest(ctx context.Context, c *pdClient, regionInfo *Region if err != nil { return false, nil, err } - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) if c.tlsConf != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(c.tlsConf)) } - conn, err := grpc.Dial(store.GetAddress(), opt) + conn, err := grpc.Dial(store.GetAddress(), opt, + config.DefaultGrpcKeepaliveParams) if err != nil { return false, nil, err } diff --git a/br/pkg/utils/BUILD.bazel b/br/pkg/utils/BUILD.bazel index 1cad8d5628dee..323b126eb5c6b 100644 --- a/br/pkg/utils/BUILD.bazel +++ b/br/pkg/utils/BUILD.bazel @@ -51,6 +51,7 @@ go_library( "@org_golang_google_grpc//backoff", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", "@org_golang_x_net//http/httpproxy", diff --git a/br/pkg/utils/json.go b/br/pkg/utils/json.go index 736032b4beedc..a29725db6b1b3 100644 --- a/br/pkg/utils/json.go +++ b/br/pkg/utils/json.go @@ -114,6 +114,10 @@ func makeJSONSchema(schema *backuppb.Schema) (*jsonSchema, error) { func fromJSONSchema(jSchema *jsonSchema) (*backuppb.Schema, error) { schema := jSchema.Schema + if schema == nil { + schema = &backuppb.Schema{} + } + var err error schema.Db, err = json.Marshal(jSchema.DB) if err != nil { diff --git a/br/pkg/utils/json_test.go b/br/pkg/utils/json_test.go index 8d8eeb6457332..3f03f287d92f1 100644 --- a/br/pkg/utils/json_test.go +++ b/br/pkg/utils/json_test.go @@ -204,6 +204,44 @@ var testMetaJSONs = [][]byte{ "is_raw_kv": true, "br_version": "BR\nRelease Version: v5.0.0-master\nGit Commit Hash: c0d60dae4998cf9ac40f02e5444731c15f0b2522\nGit Branch: HEAD\nGo Version: go1.13.4\nUTC Build Time: 2021-03-25 08:10:08\nRace Enabled: false" }`), + []byte(`{ + "files": [ + { + "sha256": "3ae857ef9b379d498ae913434f1d47c3e90a55f3a4cd9074950bfbd163d5e5fc", + "start_key": "7480000000000000115f720000000000000000", + "end_key": "7480000000000000115f72ffffffffffffffff00", + "name": "1_20_9_36adb8cedcd7af34708edff520499e712e2cfdcb202f5707dc9305a031d55a98_1675066275424_write.sst", + "end_version": 439108573623222300, + "crc64xor": 16261462091570213000, + "total_kvs": 15, + "total_bytes": 1679, + "cf": "write", + "size": 2514, + "cipher_iv": "56MTbxA4CaNILpirKnBxUw==" + } + ], + "schemas": [ + { + "db": { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "db_name": { + "L": "test", + "O": "test" + }, + "id": 1, + "policy_ref_info": null, + "state": 5 + } + } + ], + "ddls": [], + "cluster_id": 7194351714070942000, + "cluster_version": "\"6.1.0\"\n", + "br_version": "BR\nRelease Version: v6.1.0\nGit Commit Hash: 1a89decdb192cbdce6a7b0020d71128bc964d30f\nGit Branch: heads/refs/tags/v6.1.0\nGo Version: go1.18.2\nUTC Build Time: 2022-06-05 05:09:12\nRace Enabled: false", + "end_version": 439108573623222300, + "new_collations_enabled": "True" + }`), } func TestEncodeAndDecode(t *testing.T) { diff --git a/br/pkg/utils/misc.go b/br/pkg/utils/misc.go index 8078802b2e055..ab62a7b7db534 100644 --- a/br/pkg/utils/misc.go +++ b/br/pkg/utils/misc.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/parser/types" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) // IsTypeCompatible checks whether type target is compatible with type src @@ -84,7 +85,7 @@ func IsTypeCompatible(src types.FieldType, target types.FieldType) bool { } func GRPCConn(ctx context.Context, storeAddr string, tlsConf *tls.Config, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - secureOpt := grpc.WithInsecure() + secureOpt := grpc.WithTransportCredentials(insecure.NewCredentials()) if tlsConf != nil { secureOpt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConf)) } diff --git a/br/pkg/utils/store_manager.go b/br/pkg/utils/store_manager.go index 8dd50a2a57c82..8a89e49022806 100644 --- a/br/pkg/utils/store_manager.go +++ b/br/pkg/utils/store_manager.go @@ -20,6 +20,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" ) @@ -136,7 +137,7 @@ func (mgr *StoreManager) getGrpcConnLocked(ctx context.Context, storeID uint64) if err != nil { return nil, errors.Trace(err) } - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) if mgr.tlsConf != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(mgr.tlsConf)) } diff --git a/br/tests/lightning_bom_file/config.toml b/br/tests/lightning_bom_file/config.toml new file mode 100644 index 0000000000000..291d1b166103a --- /dev/null +++ b/br/tests/lightning_bom_file/config.toml @@ -0,0 +1,2 @@ +[mydumper.csv] +header = true diff --git a/br/tests/lightning_bom_file/data/mytest.testtbl-schema.sql b/br/tests/lightning_bom_file/data/mytest.testtbl-schema.sql new file mode 100644 index 0000000000000..4232788898790 --- /dev/null +++ b/br/tests/lightning_bom_file/data/mytest.testtbl-schema.sql @@ -0,0 +1,5 @@ +CREATE TABLE testtbl ( + id INTEGER, + val1 VARCHAR(40) NOT NULL, + INDEX `idx_val1` (`val1`) +); diff --git a/br/tests/lightning_bom_file/data/mytest.testtbl.csv b/br/tests/lightning_bom_file/data/mytest.testtbl.csv new file mode 100644 index 0000000000000..e0931cce2a480 --- /dev/null +++ b/br/tests/lightning_bom_file/data/mytest.testtbl.csv @@ -0,0 +1,6 @@ +id,val1 +1,"aaa01" +2,"aaa01" +3,"aaa02" +4,"aaa02" +5,"aaa05" diff --git a/br/tests/lightning_bom_file/original_data/mytest.testtbl-schema.sql b/br/tests/lightning_bom_file/original_data/mytest.testtbl-schema.sql new file mode 100644 index 0000000000000..dc1e032a16618 --- /dev/null +++ b/br/tests/lightning_bom_file/original_data/mytest.testtbl-schema.sql @@ -0,0 +1,5 @@ +CREATE TABLE testtbl ( + id INTEGER, + val1 VARCHAR(40) NOT NULL, + INDEX `idx_val1` (`val1`) +); diff --git a/br/tests/lightning_bom_file/original_data/mytest.testtbl.csv b/br/tests/lightning_bom_file/original_data/mytest.testtbl.csv new file mode 100644 index 0000000000000..270c410cd79fd --- /dev/null +++ b/br/tests/lightning_bom_file/original_data/mytest.testtbl.csv @@ -0,0 +1,6 @@ +id,val1 +1,"aaa01" +2,"aaa01" +3,"aaa02" +4,"aaa02" +5,"aaa05" diff --git a/br/tests/lightning_bom_file/run.sh b/br/tests/lightning_bom_file/run.sh new file mode 100755 index 0000000000000..88eada54c74a9 --- /dev/null +++ b/br/tests/lightning_bom_file/run.sh @@ -0,0 +1,56 @@ +#!/bin/sh +# +# Copyright 2023 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux + +mydir=$(dirname "${BASH_SOURCE[0]}") + +original_schema_file="${mydir}/original_data/mytest.testtbl-schema.sql" +original_data_file="${mydir}/original_data/mytest.testtbl.csv" +schema_file="${original_schema_file/original_data/data}" +data_file="${original_data_file/original_data/data}" + +# add the BOM header +printf '\xEF\xBB\xBF' | cat - <( sed '1s/^\xEF\xBB\xBF//' "${original_schema_file}" ) > "${schema_file}" +printf '\xEF\xBB\xBF' | cat - <( sed '1s/^\xEF\xBB\xBF//' "${original_data_file}" ) > "${data_file}" + +# verify the BOM header +if ! grep -q $'^\xEF\xBB\xBF' "${schema_file}"; then + echo "schema file doesn't contain the BOM header" >&2 + exit 1 +fi + +if ! grep -q $'^\xEF\xBB\xBF' "${data_file}"; then + echo "data file doesn't contain the BOM header" >&2 + exit 1 +fi + +row_count=$( sed '1d' "${data_file}" | wc -l | xargs echo ) + +run_lightning --backend tidb + +# Check that everything is correctly imported +run_sql 'SELECT count(*) FROM mytest.testtbl' +check_contains "count(*): ${row_count}" + +check_cluster_version 4 0 0 'local backend' || exit 0 +run_sql "DROP TABLE mytest.testtbl" + +run_lightning --backend local + +# Check that everything is correctly imported +run_sql 'SELECT count(*) FROM mytest.testtbl' +check_contains "count(*): ${row_count}" diff --git a/br/tests/lightning_config_max_error/data/mytest.testtbl-schema.sql b/br/tests/lightning_config_max_error/data/mytest.testtbl-schema.sql new file mode 100644 index 0000000000000..93582d5178139 --- /dev/null +++ b/br/tests/lightning_config_max_error/data/mytest.testtbl-schema.sql @@ -0,0 +1,5 @@ +CREATE TABLE testtbl ( + id INTEGER PRIMARY KEY, + val1 VARCHAR(40) NOT NULL, + INDEX `idx_val1` (`val1`) +); diff --git a/br/tests/lightning_config_max_error/data/mytest.testtbl.csv b/br/tests/lightning_config_max_error/data/mytest.testtbl.csv new file mode 100644 index 0000000000000..021f6bbf7be1c --- /dev/null +++ b/br/tests/lightning_config_max_error/data/mytest.testtbl.csv @@ -0,0 +1,16 @@ +id,val1 +1,"aaa01" +2,"aaa01" +3,"aaa02" +4,"aaa02" +5,"aaa05" +6,"aaa06" +7,"aaa07" +8,"aaa08" +9,"aaa09" +10,"aaa10" +1,"bbb01" +2,"bbb02" +3,"bbb03" +4,"bbb04" +5,"bbb05" diff --git a/br/tests/lightning_config_max_error/err_config.toml b/br/tests/lightning_config_max_error/err_config.toml new file mode 100644 index 0000000000000..79447e685a8f5 --- /dev/null +++ b/br/tests/lightning_config_max_error/err_config.toml @@ -0,0 +1,8 @@ +[lightning.max-error] +conflict = 4 + +[mydumper.csv] +header = true + +[tikv-importer] +duplicate-resolution = 'remove' diff --git a/br/tests/lightning_config_max_error/normal_config.toml b/br/tests/lightning_config_max_error/normal_config.toml new file mode 100644 index 0000000000000..92e08739fe04a --- /dev/null +++ b/br/tests/lightning_config_max_error/normal_config.toml @@ -0,0 +1,8 @@ +[lightning.max-error] +conflict = 20 + +[mydumper.csv] +header = true + +[tikv-importer] +duplicate-resolution = 'remove' diff --git a/br/tests/lightning_config_max_error/normal_config_old_style.toml b/br/tests/lightning_config_max_error/normal_config_old_style.toml new file mode 100644 index 0000000000000..fe402d071f5e0 --- /dev/null +++ b/br/tests/lightning_config_max_error/normal_config_old_style.toml @@ -0,0 +1,8 @@ +[lightning] +max-error = 0 # this actually sets the type error + +[mydumper.csv] +header = true + +[tikv-importer] +duplicate-resolution = 'remove' diff --git a/br/tests/lightning_config_max_error/run.sh b/br/tests/lightning_config_max_error/run.sh new file mode 100755 index 0000000000000..1d850ae55f0d8 --- /dev/null +++ b/br/tests/lightning_config_max_error/run.sh @@ -0,0 +1,81 @@ +#!/bin/sh +# +# Copyright 2023 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux + +check_cluster_version 4 0 0 'local backend' || exit 0 + +mydir=$(dirname "${BASH_SOURCE[0]}") + +data_file="${mydir}/data/mytest.testtbl.csv" + +total_row_count=$( sed '1d' "${data_file}" | wc -l | xargs echo ) +uniq_row_count=$( sed '1d' "${data_file}" | awk -F, '{print $1}' | sort | uniq -c | awk '{print $1}' | grep -c '1' | xargs echo ) +duplicated_row_count=$(( ${total_row_count} - ${uniq_row_count} )) + +run_sql 'DROP TABLE IF EXISTS mytest.testtbl' +run_sql 'DROP TABLE IF EXISTS lightning_task_info.conflict_error_v1' + +stderr_file="/tmp/${TEST_NAME}.stderr" + +set +e +if run_lightning --backend local --config "${mydir}/err_config.toml" 2> "${stderr_file}"; then + echo "The lightning import doesn't fail as expected" >&2 + exit 1 +fi +set -e + +err_msg=$( cat << EOF +tidb lightning encountered error: collect local duplicate rows failed: The number of conflict errors exceeds the threshold configured by \`max-error.conflict\`: '4' +EOF +) +cat "${stderr_file}" +grep -q "${err_msg}" "${stderr_file}" + +run_sql 'SELECT COUNT(*) FROM lightning_task_info.conflict_error_v1' +# Although conflict error number exceeds the max-error limit, +# all the conflict errors are recorded, +# because recording of conflict errors are executed batch by batch (batch size 1024), +# this batch of conflict errors are all recorded +check_contains "COUNT(*): ${duplicated_row_count}" + +# import a second time + +run_sql 'DROP TABLE IF EXISTS mytest.testtbl' +run_sql 'DROP TABLE IF EXISTS lightning_task_info.conflict_error_v1' + +run_lightning --backend local --config "${mydir}/normal_config.toml" + +run_sql 'SELECT COUNT(*) FROM lightning_task_info.conflict_error_v1' +check_contains "COUNT(*): ${duplicated_row_count}" + +# Check remaining records in the target table +run_sql 'SELECT COUNT(*) FROM mytest.testtbl' +check_contains "COUNT(*): ${uniq_row_count}" + +# import a third time + +run_sql 'DROP TABLE IF EXISTS mytest.testtbl' +run_sql 'DROP TABLE IF EXISTS lightning_task_info.conflict_error_v1' + +run_lightning --backend local --config "${mydir}/normal_config_old_style.toml" + +run_sql 'SELECT COUNT(*) FROM lightning_task_info.conflict_error_v1' +check_contains "COUNT(*): ${duplicated_row_count}" + +# Check remaining records in the target table +run_sql 'SELECT COUNT(*) FROM mytest.testtbl' +check_contains "COUNT(*): ${uniq_row_count}" diff --git a/br/tests/lightning_disable_scheduler_by_key_range/run.sh b/br/tests/lightning_disable_scheduler_by_key_range/run.sh index 9df6067c8baf8..2a88f0e0cac8e 100644 --- a/br/tests/lightning_disable_scheduler_by_key_range/run.sh +++ b/br/tests/lightning_disable_scheduler_by_key_range/run.sh @@ -50,16 +50,16 @@ ready_for_import_engine run_curl "https://${PD_ADDR}/pd/api/v1/config/cluster-version" -length=$(run_curl "https://${PD_ADDR}/pd/api/v1/config/region-label/rules" | jq 'select(.[].rule_type == "key-range") | length') +length=$(run_curl "https://${PD_ADDR}/pd/api/v1/config/region-label/rules" | jq '[ .[] | select(.rule_type == "key-range" and .labels[0].key == "schedule") ] | length') if [ "$length" != "1" ]; then - echo "region-label key-range rules should be 1, but got $length" >&2 + echo "region-label key-range schedule rules should be 1, but got $length" >&2 exit 1 fi wait "$shpid" -length=$(run_curl "https://${PD_ADDR}/pd/api/v1/config/region-label/rules" | jq 'select(.[].rule_type == "key-range") | length') +length=$(run_curl "https://${PD_ADDR}/pd/api/v1/config/region-label/rules" | jq '[ .[] | select(.rule_type == "key-range" and .labels[0].key == "schedule") ] | length') if [ -n "$length" ] && [ "$length" -ne 0 ]; then - echo "region-label key-range rules should be 0, but got $length" >&2 + echo "region-label key-range schedule rules should be 0, but got $length" >&2 exit 1 fi diff --git a/br/tests/lightning_issue_40657/config.toml b/br/tests/lightning_issue_40657/config.toml new file mode 100644 index 0000000000000..74561bc05f026 --- /dev/null +++ b/br/tests/lightning_issue_40657/config.toml @@ -0,0 +1,6 @@ +[tikv-importer] +backend = "local" +duplicate-resolution = "remove" + +[mydumper.csv] +header = true diff --git a/br/tests/lightning_issue_40657/data1/test.t-schema.sql b/br/tests/lightning_issue_40657/data1/test.t-schema.sql new file mode 100644 index 0000000000000..ef7136b531abc --- /dev/null +++ b/br/tests/lightning_issue_40657/data1/test.t-schema.sql @@ -0,0 +1,6 @@ +CREATE TABLE `t` ( + `id` int(11) NOT NULL, + `name` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */, + UNIQUE KEY `uni_name` (`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; diff --git a/br/tests/lightning_issue_40657/data1/test.t.0.csv b/br/tests/lightning_issue_40657/data1/test.t.0.csv new file mode 100644 index 0000000000000..2987cee08b206 --- /dev/null +++ b/br/tests/lightning_issue_40657/data1/test.t.0.csv @@ -0,0 +1,6 @@ +id,name +1,"aaa01" +2,"aaa02" +3,"aaa03" +4,"aaa04" +5,"aaa04" diff --git a/br/tests/lightning_issue_40657/data2/test.t-schema.sql b/br/tests/lightning_issue_40657/data2/test.t-schema.sql new file mode 100644 index 0000000000000..ef7136b531abc --- /dev/null +++ b/br/tests/lightning_issue_40657/data2/test.t-schema.sql @@ -0,0 +1,6 @@ +CREATE TABLE `t` ( + `id` int(11) NOT NULL, + `name` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */, + UNIQUE KEY `uni_name` (`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; diff --git a/br/tests/lightning_issue_40657/data2/test.t.0.csv b/br/tests/lightning_issue_40657/data2/test.t.0.csv new file mode 100644 index 0000000000000..f64aebd0630d9 --- /dev/null +++ b/br/tests/lightning_issue_40657/data2/test.t.0.csv @@ -0,0 +1,6 @@ +id,name +1,"aaa01" +2,"aaa02" +3,"aaa03" +4,"aaa04" +5,"aaa05" diff --git a/br/tests/lightning_issue_40657/run.sh b/br/tests/lightning_issue_40657/run.sh new file mode 100644 index 0000000000000..a20600b79d14b --- /dev/null +++ b/br/tests/lightning_issue_40657/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# +# Copyright 2023 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux + +check_cluster_version 5 2 0 'duplicate detection' || exit 0 + +run_lightning -d "tests/$TEST_NAME/data1" +run_sql 'admin check table test.t' +run_sql 'select count(*) from test.t' +check_contains 'count(*): 3' +run_sql 'select count(*) from lightning_task_info.conflict_error_v1' +check_contains 'count(*): 2' + +run_sql 'truncate table test.t' +run_lightning -d "tests/$TEST_NAME/data2" +run_sql 'admin check table test.t' +run_sql 'select count(*) from test.t' +check_contains 'count(*): 5' diff --git a/br/tests/lightning_reload_cert/run.sh b/br/tests/lightning_reload_cert/run.sh index e06ef8d7fbf51..be0c5ff40421e 100644 --- a/br/tests/lightning_reload_cert/run.sh +++ b/br/tests/lightning_reload_cert/run.sh @@ -29,7 +29,7 @@ shpid="$!" sleep 15 ok=0 for _ in {0..60}; do - if grep -Fq "connection closed before server preface received" "$TEST_DIR"/lightning.log; then + if grep -Fq "connection error" "$TEST_DIR"/lightning.log; then ok=1 break fi diff --git a/build/BUILD.bazel b/build/BUILD.bazel index 23cf263d525e3..3c2a569dd80d8 100644 --- a/build/BUILD.bazel +++ b/build/BUILD.bazel @@ -55,6 +55,7 @@ STATICHECK_ANALYZERS = [ "S1039", "S1040", "SA1019", + "SA1029", "SA2000", "SA2001", "SA2003", diff --git a/build/linter/BUILD.bazel b/build/linter/BUILD.bazel new file mode 100644 index 0000000000000..e5407284430fa --- /dev/null +++ b/build/linter/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "linter", + srcs = ["linter.go"], + importpath = "github.com/pingcap/tidb/build/linter", + visibility = ["//visibility:public"], + deps = ["@com_github_apache_skywalking_eyes//pkg/config"], +) diff --git a/build/linter/linter.go b/build/linter/linter.go new file mode 100644 index 0000000000000..794a7c37039ef --- /dev/null +++ b/build/linter/linter.go @@ -0,0 +1,20 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linter + +import ( + // it is necessary to make skywalking-eye into gomod. + _ "github.com/apache/skywalking-eyes/pkg/config" +) diff --git a/build/nogo_config.json b/build/nogo_config.json index 97a1a1feed50e..90a5b7cae853f 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -1,7 +1,7 @@ { "all_revive": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/rules_go_work-*": "ignore generated code", ".*_/testmain\\.go$": "ignore code" @@ -9,32 +9,32 @@ }, "asciicheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "br/pkg/lightning/web/res_vfsdata.go": "ignore code" } }, "asmdecl": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "assign": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "atomic": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "atomicalign": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, @@ -46,31 +46,31 @@ }, "bools": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "buildtag": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "printf": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "unreachable": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "composites": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "br/pkg/glue/console_glue_test.go": "ignore code", "br/pkg/restore/db_test.go": "ignore code", @@ -79,45 +79,45 @@ }, "copylocks": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo code" } }, "ctrlflow": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "deadcode": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "deepequalerrors": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "durationcheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", "/rules_go_work-*": "ignore generated code", ".*_generated\\.go$": "ignore generated code" } }, "errorsas": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "errcheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", ".*_test\\.go$": "ignore generated code", "util/logutil": "ignore util/logutil code", @@ -131,20 +131,20 @@ }, "exportloopref": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "filepermission": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", ".*_/testmain\\.go$": "ignore code" } }, "fieldalignment": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", ".*_/testmain\\.go$": "ignore code", ".*_test\\.go$": "ignore test code" @@ -188,13 +188,13 @@ }, "findcall": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "forcetypeassert": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" }, "only_files": { @@ -219,7 +219,7 @@ }, "gofmt": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo code", "/rules_go_work-*": "ignore generated code", @@ -230,7 +230,7 @@ "gci": { "exclude_files": { "external/": "no need to vet third party code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo code", ".*\\.pb\\.go$": "generated code", @@ -260,37 +260,37 @@ }, "httpresponse": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "ifaceassert": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "ineffassign": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "inspect": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "loopclosure": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "lostcancel": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, @@ -298,33 +298,33 @@ "exclude_files": { "/cgo/": "ignore cgo code", ".*_test\\.go$": "ignore generated code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "misspell": { "exclude_files": { "/cgo/": "ignore cgo code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "nilfunc": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "nilness": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "/cgo/": "ignore cgo" } }, "noloopclosure": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" }, "only_files": { @@ -339,13 +339,13 @@ }, "pkgfact": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "revive": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", "GOROOT/": "ignore code", "/cgo/": "ignore cgo", "tools/": "ignore tool code", @@ -411,49 +411,49 @@ }, "shift": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "sortslice": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "stdmethods": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "stringintconv": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "structtag": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "testinggoroutine": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "tests": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "unconvert": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*\\.pb\\.go$": "generated code", "parser/parser.go": "generated code", "/cgo/": "no need to vet third party code for cgo", @@ -465,27 +465,27 @@ }, "unmarshal": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "unsafeptr": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/digester.go": "ignore code" } }, "unusedresult": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/digester_test.go": "ignore code" } }, "rowserrcheck": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "server/tidb_test.go": "ignore test code", "server/tidb_serial_test.go": "ignore test code", @@ -497,250 +497,250 @@ }, "S1000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1002": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1003": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1004": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1005": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1006": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1007": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1008": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1009": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1010": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1011": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1012": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1013": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1014": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1015": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1016": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1017": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1018": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1019": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/parser.go": "ignore code" } }, "S1020": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1021": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "tools/check/ut.go": "ignore code" } }, "S1022": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1023": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/parser.go": "ignore code" } }, "S1024": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1025": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1026": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1027": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1028": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1029": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1030": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1031": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1032": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1033": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1034": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1035": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1036": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1037": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1038": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1039": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "S1040": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/parser.go": "ignore generated code" } @@ -748,159 +748,167 @@ "SA1019": { "exclude_files": { "/build/": "no need to linter code", - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", - ".*_test\\.go$": "ignore test code" + ".*_test\\.go$": "ignore test code", + "br/pkg/restore/split/client.go": "github.com/golang/protobuf deprecated", + "br/pkg/streamhelper/advancer_cliext.go": "github.com/golang/protobuf deprecated", + "br/pkg/lightning/checkpoints/checkpoints.go": "cfg.TikvImporter.Addr is deprecated", + "br/pkg/lightning/checkpoints/glue_checkpoint.go": "cfg.TikvImporter.Addr is deprecated" }, "only_files": { "util/gctuner": "util/gctuner", "util/cgroup": "util/cgroup code", "util/watcher": "util/watcher", - "br/pkg/lightning/restore/": "br/pkg/lightning/restore/", - "br/pkg/lightning/mydump/": "br/pkg/lightning/mydump/", + "br/pkg/": "br/pkg", "executor/aggregate.go": "executor/aggregate.go", - "types/json_binary_functions.go": "types/json_binary_functions.go", - "types/json_binary_test.go": "types/json_binary_test.go", + "types/": "types", "ddl/": "enable to ddl", "expression/builtin_cast.go": "enable expression/builtin_cast.go", "planner/core/plan.go": "planner/core/plan.go", - "server/conn.go": "server/conn.go", - "server/conn_stmt.go": "server/conn_stmt.go", - "server/conn_test.go": "server/conn_test.go", "extension/": "extension code", "resourcemanager/": "resourcemanager code", - "keyspace/": "keyspace code" + "keyspace/": "keyspace code", + "server/": "server code", + "meta": "meta code" } }, - "SA2000": { + "SA1029": { "exclude_files": { "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + ".*_test\\.go$": "ignore test code" + } + }, + "SA2000": { + "exclude_files": { + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA2001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA2003": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA3000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA3001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA4009": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5002": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5003": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5004": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5005": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5007": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5008": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5009": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5010": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5011": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA5012": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA6000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA6001": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "SA6005": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, "prealloc": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/yy_parser.go": "ignore generated code", "/cgo/": "no need to vet third party code for cgo" @@ -908,7 +916,7 @@ }, "predeclared": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code", "parser/yy_parser.go": "ignore generated code", "parser/parser.go": "ignore generated code", @@ -917,7 +925,7 @@ }, "U1000": { "exclude_files": { - "/external/": "no need to vet third party code", + "external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } } diff --git a/build/patches/com_github_rivo_uniseg.patch b/build/patches/com_github_rivo_uniseg.patch deleted file mode 100644 index 43c2c40933b11..0000000000000 --- a/build/patches/com_github_rivo_uniseg.patch +++ /dev/null @@ -1,542 +0,0 @@ -From 1492043a155839cb863210d4f564be3fa640c0d9 Mon Sep 17 00:00:00 2001 -From: Weizhen Wang -Date: Sat, 8 Oct 2022 11:41:06 +0800 -Subject: [PATCH] update - -Signed-off-by: Weizhen Wang ---- - BUILD.bazel | 27 +++++ - WORKSPACE | 2 + - gen_breaktest.go | 213 -------------------------------------- - gen_properties.go | 256 ---------------------------------------------- - 4 files changed, 29 insertions(+), 469 deletions(-) - create mode 100644 BUILD.bazel - create mode 100644 WORKSPACE - delete mode 100644 gen_breaktest.go - delete mode 100644 gen_properties.go - -diff --git a/BUILD.bazel b/BUILD.bazel -new file mode 100644 -index 0000000..a1e5c89 ---- /dev/null -+++ b/BUILD.bazel -@@ -0,0 +1,27 @@ -+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") -+ -+go_library( -+ name = "uniseg", -+ srcs = [ -+ "doc.go", -+ "eastasianwidth.go", -+ "emojipresentation.go", -+ "grapheme.go", -+ "graphemeproperties.go", -+ "graphemerules.go", -+ "line.go", -+ "lineproperties.go", -+ "linerules.go", -+ "properties.go", -+ "sentence.go", -+ "sentenceproperties.go", -+ "sentencerules.go", -+ "step.go", -+ "width.go", -+ "word.go", -+ "wordproperties.go", -+ "wordrules.go", -+ ], -+ importpath = "github.com/rivo/uniseg", -+ visibility = ["//visibility:public"], -+) -diff --git a/WORKSPACE b/WORKSPACE -new file mode 100644 -index 0000000..d596273 ---- /dev/null -+++ b/WORKSPACE -@@ -0,0 +1,2 @@ -+# DO NOT EDIT: automatically generated WORKSPACE file for go_repository rule -+workspace(name = "com_github_rivo_uniseg") -diff --git a/gen_breaktest.go b/gen_breaktest.go -deleted file mode 100644 -index e613c4c..0000000 ---- a/gen_breaktest.go -+++ /dev/null -@@ -1,213 +0,0 @@ --//go:build generate -- --// This program generates a Go containing a slice of test cases based on the --// Unicode Character Database auxiliary data files. The command line arguments --// are as follows: --// --// 1. The name of the Unicode data file (just the filename, without extension). --// 2. The name of the locally generated Go file. --// 3. The name of the slice containing the test cases. --// 4. The name of the generator, for logging purposes. --// --//go:generate go run gen_breaktest.go GraphemeBreakTest graphemebreak_test.go graphemeBreakTestCases graphemes --//go:generate go run gen_breaktest.go WordBreakTest wordbreak_test.go wordBreakTestCases words --//go:generate go run gen_breaktest.go SentenceBreakTest sentencebreak_test.go sentenceBreakTestCases sentences --//go:generate go run gen_breaktest.go LineBreakTest linebreak_test.go lineBreakTestCases lines -- --package main -- --import ( -- "bufio" -- "bytes" -- "errors" -- "fmt" -- "go/format" -- "io/ioutil" -- "log" -- "net/http" -- "os" -- "time" --) -- --// We want to test against a specific version rather than the latest. When the --// package is upgraded to a new version, change these to generate new tests. --const ( -- testCaseURL = `https://www.unicode.org/Public/14.0.0/ucd/auxiliary/%s.txt` --) -- --func main() { -- if len(os.Args) < 5 { -- fmt.Println("Not enough arguments, see code for details") -- os.Exit(1) -- } -- -- log.SetPrefix("gen_breaktest (" + os.Args[4] + "): ") -- log.SetFlags(0) -- -- // Read text of testcases and parse into Go source code. -- src, err := parse(fmt.Sprintf(testCaseURL, os.Args[1])) -- if err != nil { -- log.Fatal(err) -- } -- -- // Format the Go code. -- formatted, err := format.Source(src) -- if err != nil { -- log.Fatalln("gofmt:", err) -- } -- -- // Write it out. -- log.Print("Writing to ", os.Args[2]) -- if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil { -- log.Fatal(err) -- } --} -- --// parse reads a break text file, either from a local file or from a URL. It --// parses the file data into Go source code representing the test cases. --func parse(url string) ([]byte, error) { -- log.Printf("Parsing %s", url) -- res, err := http.Get(url) -- if err != nil { -- return nil, err -- } -- body := res.Body -- defer body.Close() -- -- buf := new(bytes.Buffer) -- buf.Grow(120 << 10) -- buf.WriteString(`package uniseg -- --// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. -- --// ` + os.Args[3] + ` are Grapheme testcases taken from --// ` + url + ` --// on ` + time.Now().Format("January 2, 2006") + `. See --// https://www.unicode.org/license.html for the Unicode license agreement. --var ` + os.Args[3] + ` = []testCase { --`) -- -- sc := bufio.NewScanner(body) -- num := 1 -- var line []byte -- original := make([]byte, 0, 64) -- expected := make([]byte, 0, 64) -- for sc.Scan() { -- num++ -- line = sc.Bytes() -- if len(line) == 0 || line[0] == '#' { -- continue -- } -- var comment []byte -- if i := bytes.IndexByte(line, '#'); i >= 0 { -- comment = bytes.TrimSpace(line[i+1:]) -- line = bytes.TrimSpace(line[:i]) -- } -- original, expected, err := parseRuneSequence(line, original[:0], expected[:0]) -- if err != nil { -- return nil, fmt.Errorf(`line %d: %v: %q`, num, err, line) -- } -- fmt.Fprintf(buf, "\t{original: \"%s\", expected: %s}, // %s\n", original, expected, comment) -- } -- if err := sc.Err(); err != nil { -- return nil, err -- } -- -- // Check for final "# EOF", useful check if we're streaming via HTTP -- if !bytes.Equal(line, []byte("# EOF")) { -- return nil, fmt.Errorf(`line %d: exected "# EOF" as final line, got %q`, num, line) -- } -- buf.WriteString("}\n") -- return buf.Bytes(), nil --} -- --// Used by parseRuneSequence to match input via bytes.HasPrefix. --var ( -- prefixBreak = []byte("÷ ") -- prefixDontBreak = []byte("× ") -- breakOk = []byte("÷") -- breakNo = []byte("×") --) -- --// parseRuneSequence parses a rune + breaking opportunity sequence from b --// and appends the Go code for testcase.original to orig --// and appends the Go code for testcase.expected to exp. --// It retuns the new orig and exp slices. --// --// E.g. for the input b="÷ 0020 × 0308 ÷ 1F1E6 ÷" --// it will append --// "\u0020\u0308\U0001F1E6" --// and "[][]rune{{0x0020,0x0308},{0x1F1E6},}" --// to orig and exp respectively. --// --// The formatting of exp is expected to be cleaned up by gofmt or format.Source. --// Note we explicitly require the sequence to start with ÷ and we implicitly --// require it to end with ÷. --func parseRuneSequence(b, orig, exp []byte) ([]byte, []byte, error) { -- // Check for and remove first ÷ or ×. -- if !bytes.HasPrefix(b, prefixBreak) && !bytes.HasPrefix(b, prefixDontBreak) { -- return nil, nil, errors.New("expected ÷ or × as first character") -- } -- if bytes.HasPrefix(b, prefixBreak) { -- b = b[len(prefixBreak):] -- } else { -- b = b[len(prefixDontBreak):] -- } -- -- boundary := true -- exp = append(exp, "[][]rune{"...) -- for len(b) > 0 { -- if boundary { -- exp = append(exp, '{') -- } -- exp = append(exp, "0x"...) -- // Find end of hex digits. -- var i int -- for i = 0; i < len(b) && b[i] != ' '; i++ { -- if d := b[i]; ('0' <= d || d <= '9') || -- ('A' <= d || d <= 'F') || -- ('a' <= d || d <= 'f') { -- continue -- } -- return nil, nil, errors.New("bad hex digit") -- } -- switch i { -- case 4: -- orig = append(orig, "\\u"...) -- case 5: -- orig = append(orig, "\\U000"...) -- default: -- return nil, nil, errors.New("unsupport code point hex length") -- } -- orig = append(orig, b[:i]...) -- exp = append(exp, b[:i]...) -- b = b[i:] -- -- // Check for space between hex and ÷ or ×. -- if len(b) < 1 || b[0] != ' ' { -- return nil, nil, errors.New("bad input") -- } -- b = b[1:] -- -- // Check for next boundary. -- switch { -- case bytes.HasPrefix(b, breakOk): -- boundary = true -- b = b[len(breakOk):] -- case bytes.HasPrefix(b, breakNo): -- boundary = false -- b = b[len(breakNo):] -- default: -- return nil, nil, errors.New("missing ÷ or ×") -- } -- if boundary { -- exp = append(exp, '}') -- } -- exp = append(exp, ',') -- if len(b) > 0 && b[0] == ' ' { -- b = b[1:] -- } -- } -- exp = append(exp, '}') -- return orig, exp, nil --} -diff --git a/gen_properties.go b/gen_properties.go -deleted file mode 100644 -index 999d5ef..0000000 ---- a/gen_properties.go -+++ /dev/null -@@ -1,256 +0,0 @@ --//go:build generate -- --// This program generates a property file in Go file from Unicode Character --// Database auxiliary data files. The command line arguments are as follows: --// --// 1. The name of the Unicode data file (just the filename, without extension). --// Can be "-" (to skip) if the emoji flag is included. --// 2. The name of the locally generated Go file. --// 3. The name of the slice mapping code points to properties. --// 4. The name of the generator, for logging purposes. --// 5. (Optional) Flags, comma-separated. The following flags are available: --// - "emojis=": include the specified emoji properties (e.g. --// "Extended_Pictographic"). --// - "gencat": include general category properties. --// --//go:generate go run gen_properties.go auxiliary/GraphemeBreakProperty graphemeproperties.go graphemeCodePoints graphemes emojis=Extended_Pictographic --//go:generate go run gen_properties.go auxiliary/WordBreakProperty wordproperties.go workBreakCodePoints words emojis=Extended_Pictographic --//go:generate go run gen_properties.go auxiliary/SentenceBreakProperty sentenceproperties.go sentenceBreakCodePoints sentences --//go:generate go run gen_properties.go LineBreak lineproperties.go lineBreakCodePoints lines gencat --//go:generate go run gen_properties.go EastAsianWidth eastasianwidth.go eastAsianWidth eastasianwidth --//go:generate go run gen_properties.go - emojipresentation.go emojiPresentation emojipresentation emojis=Emoji_Presentation --package main -- --import ( -- "bufio" -- "bytes" -- "errors" -- "fmt" -- "go/format" -- "io/ioutil" -- "log" -- "net/http" -- "os" -- "regexp" -- "sort" -- "strconv" -- "strings" -- "time" --) -- --// We want to test against a specific version rather than the latest. When the --// package is upgraded to a new version, change these to generate new tests. --const ( -- propertyURL = `https://www.unicode.org/Public/14.0.0/ucd/%s.txt` -- emojiURL = `https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt` --) -- --// The regular expression for a line containing a code point range property. --var propertyPattern = regexp.MustCompile(`^([0-9A-F]{4,6})(\.\.([0-9A-F]{4,6}))?\s*;\s*([A-Za-z0-9_]+)\s*#\s(.+)$`) -- --func main() { -- if len(os.Args) < 5 { -- fmt.Println("Not enough arguments, see code for details") -- os.Exit(1) -- } -- -- log.SetPrefix("gen_properties (" + os.Args[4] + "): ") -- log.SetFlags(0) -- -- // Parse flags. -- flags := make(map[string]string) -- if len(os.Args) >= 6 { -- for _, flag := range strings.Split(os.Args[5], ",") { -- flagFields := strings.Split(flag, "=") -- if len(flagFields) == 1 { -- flags[flagFields[0]] = "yes" -- } else { -- flags[flagFields[0]] = flagFields[1] -- } -- } -- } -- -- // Parse the text file and generate Go source code from it. -- _, includeGeneralCategory := flags["gencat"] -- var mainURL string -- if os.Args[1] != "-" { -- mainURL = fmt.Sprintf(propertyURL, os.Args[1]) -- } -- src, err := parse(mainURL, flags["emojis"], includeGeneralCategory) -- if err != nil { -- log.Fatal(err) -- } -- -- // Format the Go code. -- formatted, err := format.Source([]byte(src)) -- if err != nil { -- log.Fatal("gofmt:", err) -- } -- -- // Save it to the (local) target file. -- log.Print("Writing to ", os.Args[2]) -- if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil { -- log.Fatal(err) -- } --} -- --// parse parses the Unicode Properties text files located at the given URLs and --// returns their equivalent Go source code to be used in the uniseg package. If --// "emojiProperty" is not an empty string, emoji code points for that emoji --// property (e.g. "Extended_Pictographic") will be included. In those cases, you --// may pass an empty "propertyURL" to skip parsing the main properties file. If --// "includeGeneralCategory" is true, the Unicode General Category property will --// be extracted from the comments and included in the output. --func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (string, error) { -- if propertyURL == "" && emojiProperty == "" { -- return "", errors.New("no properties to parse") -- } -- -- // Temporary buffer to hold properties. -- var properties [][4]string -- -- // Open the first URL. -- if propertyURL != "" { -- log.Printf("Parsing %s", propertyURL) -- res, err := http.Get(propertyURL) -- if err != nil { -- return "", err -- } -- in1 := res.Body -- defer in1.Close() -- -- // Parse it. -- scanner := bufio.NewScanner(in1) -- num := 0 -- for scanner.Scan() { -- num++ -- line := strings.TrimSpace(scanner.Text()) -- -- // Skip comments and empty lines. -- if strings.HasPrefix(line, "#") || line == "" { -- continue -- } -- -- // Everything else must be a code point range, a property and a comment. -- from, to, property, comment, err := parseProperty(line) -- if err != nil { -- return "", fmt.Errorf("%s line %d: %v", os.Args[4], num, err) -- } -- properties = append(properties, [4]string{from, to, property, comment}) -- } -- if err := scanner.Err(); err != nil { -- return "", err -- } -- } -- -- // Open the second URL. -- if emojiProperty != "" { -- log.Printf("Parsing %s", emojiURL) -- res, err := http.Get(emojiURL) -- if err != nil { -- return "", err -- } -- in2 := res.Body -- defer in2.Close() -- -- // Parse it. -- scanner := bufio.NewScanner(in2) -- num := 0 -- for scanner.Scan() { -- num++ -- line := scanner.Text() -- -- // Skip comments, empty lines, and everything not containing -- // "Extended_Pictographic". -- if strings.HasPrefix(line, "#") || line == "" || !strings.Contains(line, emojiProperty) { -- continue -- } -- -- // Everything else must be a code point range, a property and a comment. -- from, to, property, comment, err := parseProperty(line) -- if err != nil { -- return "", fmt.Errorf("emojis line %d: %v", num, err) -- } -- properties = append(properties, [4]string{from, to, property, comment}) -- } -- if err := scanner.Err(); err != nil { -- return "", err -- } -- } -- -- // Sort properties. -- sort.Slice(properties, func(i, j int) bool { -- left, _ := strconv.ParseUint(properties[i][0], 16, 64) -- right, _ := strconv.ParseUint(properties[j][0], 16, 64) -- return left < right -- }) -- -- // Header. -- var ( -- buf bytes.Buffer -- emojiComment string -- ) -- columns := 3 -- if includeGeneralCategory { -- columns = 4 -- } -- if emojiURL != "" { -- emojiComment = ` --// and --// ` + emojiURL + ` --// ("Extended_Pictographic" only)` -- } -- buf.WriteString(`package uniseg -- --// Code generated via go generate from gen_properties.go. DO NOT EDIT. -- --// ` + os.Args[3] + ` are taken from --// ` + propertyURL + emojiComment + ` --// on ` + time.Now().Format("January 2, 2006") + `. See https://www.unicode.org/license.html for the Unicode --// license agreement. --var ` + os.Args[3] + ` = [][` + strconv.Itoa(columns) + `]int{ -- `) -- -- // Properties. -- for _, prop := range properties { -- if includeGeneralCategory { -- generalCategory := "gc" + prop[3][:2] -- if generalCategory == "gcL&" { -- generalCategory = "gcLC" -- } -- prop[3] = prop[3][3:] -- fmt.Fprintf(&buf, "{0x%s,0x%s,%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), generalCategory, prop[3]) -- } else { -- fmt.Fprintf(&buf, "{0x%s,0x%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), prop[3]) -- } -- } -- -- // Tail. -- buf.WriteString("}") -- -- return buf.String(), nil --} -- --// parseProperty parses a line of the Unicode properties text file containing a --// property for a code point range and returns it along with its comment. --func parseProperty(line string) (from, to, property, comment string, err error) { -- fields := propertyPattern.FindStringSubmatch(line) -- if fields == nil { -- err = errors.New("no property found") -- return -- } -- from = fields[1] -- to = fields[3] -- if to == "" { -- to = from -- } -- property = fields[4] -- comment = fields[5] -- return --} -- --// translateProperty translates a property name as used in the Unicode data file --// to a variable used in the Go code. --func translateProperty(prefix, property string) string { -- return prefix + strings.ReplaceAll(property, "_", "") --} --- -2.38.0 - diff --git a/cmd/explaintest/r/collation_check_use_collation_disabled.result b/cmd/explaintest/r/collation_check_use_collation_disabled.result index 2c0bd306f445b..9e633133b1f4f 100644 --- a/cmd/explaintest/r/collation_check_use_collation_disabled.result +++ b/cmd/explaintest/r/collation_check_use_collation_disabled.result @@ -154,4 +154,28 @@ insert into t1 values ('-1'); insert into t2 values (0x2d31, ''); select * from t1, t2 where t1.a in (t2.b, 3); a b c +drop table if exists t0; +drop table if exists t1; +CREATE TABLE t0(c0 BOOL, c1 INT); +CREATE TABLE t1 LIKE t0; +CREATE VIEW v0(c0) AS SELECT IS_IPV4(t0.c1) FROM t0, t1; +INSERT INTO t0(c0, c1) VALUES (true, 0); +INSERT INTO t1(c0, c1) VALUES (true, 2); +SELECT v0.c0 FROM v0; +c0 +0 +SELECT (v0.c0)NOT LIKE(BINARY v0.c0) FROM v0; +(v0.c0)NOT LIKE(BINARY v0.c0) +0 +SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0); +c0 +desc format='brief' SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0); +id estRows task access object operator info +Projection 80000000.00 root is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20)))->Column#7 +└─HashJoin 80000000.00 root CARTESIAN inner join + ├─Selection(Build) 8000.00 root not(like(cast(is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20))), var_string(20)), cast(is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20))), binary(1)), 92)) + │ └─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t0 keep order:false, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo use test diff --git a/cmd/explaintest/r/collation_check_use_collation_enabled.result b/cmd/explaintest/r/collation_check_use_collation_enabled.result index 838c6beba6535..3f1113fbcd868 100644 --- a/cmd/explaintest/r/collation_check_use_collation_enabled.result +++ b/cmd/explaintest/r/collation_check_use_collation_enabled.result @@ -173,4 +173,28 @@ insert into t1 values ('-1'); insert into t2 values (0x2d31, ''); select * from t1, t2 where t1.a in (t2.b, 3); a b c +drop table if exists t0; +drop table if exists t1; +CREATE TABLE t0(c0 BOOL, c1 INT); +CREATE TABLE t1 LIKE t0; +CREATE VIEW v0(c0) AS SELECT IS_IPV4(t0.c1) FROM t0, t1; +INSERT INTO t0(c0, c1) VALUES (true, 0); +INSERT INTO t1(c0, c1) VALUES (true, 2); +SELECT v0.c0 FROM v0; +c0 +0 +SELECT (v0.c0)NOT LIKE(BINARY v0.c0) FROM v0; +(v0.c0)NOT LIKE(BINARY v0.c0) +0 +SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0); +c0 +desc format='brief' SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0); +id estRows task access object operator info +Projection 80000000.00 root is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20)))->Column#7 +└─HashJoin 80000000.00 root CARTESIAN inner join + ├─Selection(Build) 8000.00 root not(like(cast(is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20))), var_string(20)), cast(is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20))), binary(1)), 92)) + │ └─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t0 keep order:false, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo use test diff --git a/cmd/explaintest/r/subquery.result b/cmd/explaintest/r/subquery.result index 0cf9302f425c0..ea5a17f2ff3e3 100644 --- a/cmd/explaintest/r/subquery.result +++ b/cmd/explaintest/r/subquery.result @@ -56,13 +56,11 @@ insert into exam values(1, 'math', 100); set names utf8 collate utf8_general_ci; explain format = 'brief' select * from stu where stu.name not in (select 'guo' from exam where exam.stu_id = stu.id); id estRows task access object operator info -Apply 10000.00 root CARTESIAN anti semi join, other cond:eq(test.stu.name, Column#8) +HashJoin 8000.00 root anti semi join, equal:[eq(test.stu.id, test.exam.stu_id)], other cond:eq(test.stu.name, "guo") ├─TableReader(Build) 10000.00 root data:TableFullScan -│ └─TableFullScan 10000.00 cop[tikv] table:stu keep order:false, stats:pseudo -└─Projection(Probe) 100000.00 root guo->Column#8 - └─TableReader 100000.00 root data:Selection - └─Selection 100000.00 cop[tikv] eq(test.exam.stu_id, test.stu.id) - └─TableFullScan 100000000.00 cop[tikv] table:exam keep order:false, stats:pseudo +│ └─TableFullScan 10000.00 cop[tikv] table:exam keep order:false, stats:pseudo +└─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:stu keep order:false, stats:pseudo select * from stu where stu.name not in (select 'guo' from exam where exam.stu_id = stu.id); id name set names utf8mb4; diff --git a/cmd/explaintest/t/collation_check_use_collation.test b/cmd/explaintest/t/collation_check_use_collation.test index 62fbea05ae628..adcd8695b38c0 100644 --- a/cmd/explaintest/t/collation_check_use_collation.test +++ b/cmd/explaintest/t/collation_check_use_collation.test @@ -110,5 +110,19 @@ insert into t1 values ('-1'); insert into t2 values (0x2d31, ''); select * from t1, t2 where t1.a in (t2.b, 3); +# issue 38736 +drop table if exists t0; +drop table if exists t1; +CREATE TABLE t0(c0 BOOL, c1 INT); +CREATE TABLE t1 LIKE t0; +CREATE VIEW v0(c0) AS SELECT IS_IPV4(t0.c1) FROM t0, t1; +INSERT INTO t0(c0, c1) VALUES (true, 0); +INSERT INTO t1(c0, c1) VALUES (true, 2); + +SELECT v0.c0 FROM v0; +SELECT (v0.c0)NOT LIKE(BINARY v0.c0) FROM v0; +SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0); +desc format='brief' SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0); + # cleanup environment use test diff --git a/cmd/pluginpkg/pluginpkg b/cmd/pluginpkg/pluginpkg deleted file mode 100755 index 9da90a4758831..0000000000000 Binary files a/cmd/pluginpkg/pluginpkg and /dev/null differ diff --git a/ddl/BUILD.bazel b/ddl/BUILD.bazel index b3ba9639cb7f0..e9e2bc70336d4 100644 --- a/ddl/BUILD.bazel +++ b/ddl/BUILD.bazel @@ -149,7 +149,6 @@ go_test( srcs = [ "attributes_sql_test.go", "backfilling_test.go", - "callback_test.go", "cancel_test.go", "cluster_test.go", "column_change_test.go", @@ -176,7 +175,6 @@ go_test( "foreign_key_test.go", "index_change_test.go", "index_cop_test.go", - "index_merge_tmp_test.go", "index_modify_test.go", "integration_test.go", "job_table_test.go", @@ -210,7 +208,7 @@ go_test( deps = [ "//autoid_service", "//config", - "//ddl/ingest", + "//ddl/internal/callback", "//ddl/placement", "//ddl/resourcegroup", "//ddl/schematracker", diff --git a/ddl/backfilling.go b/ddl/backfilling.go index 51d156ebf91e3..5487126d39207 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -493,7 +493,7 @@ func (w *backfillWorker) run(d *ddlCtx, bf backfiller, job *model.Job) { // splitTableRanges uses PD region's key ranges to split the backfilling table key range space, // to speed up backfilling data in table with disperse handle. // The `t` should be a non-partitioned table or a partition. -func splitTableRanges(t table.PhysicalTable, store kv.Storage, startKey, endKey kv.Key) ([]kv.KeyRange, error) { +func splitTableRanges(t table.PhysicalTable, store kv.Storage, startKey, endKey kv.Key, limit int) ([]kv.KeyRange, error) { logutil.BgLogger().Info("[ddl] split table range from PD", zap.Int64("physicalTableID", t.GetPhysicalID()), zap.String("start key", hex.EncodeToString(startKey)), @@ -508,7 +508,7 @@ func splitTableRanges(t table.PhysicalTable, store kv.Storage, startKey, endKey maxSleep := 10000 // ms bo := backoff.NewBackofferWithVars(context.Background(), maxSleep, nil) rc := copr.NewRegionCache(s.GetRegionCache()) - ranges, err := rc.SplitRegionRanges(bo, []kv.KeyRange{kvRange}) + ranges, err := rc.SplitRegionRanges(bo, []kv.KeyRange{kvRange}, limit) if err != nil { return nil, errors.Trace(err) } @@ -989,7 +989,7 @@ func (dc *ddlCtx) writePhysicalTableRecord(sessPool *sessionPool, t table.Physic } for { - kvRanges, err := splitTableRanges(t, reorgInfo.d.store, startKey, endKey) + kvRanges, err := splitTableRanges(t, reorgInfo.d.store, startKey, endKey, backfillTaskChanSize) if err != nil { return errors.Trace(err) } @@ -1101,7 +1101,7 @@ func (*ddlCtx) splitTableToBackfillJobs(sess *session, reorgInfo *reorgInfo, pTb isFirstOps := true bJobs := make([]*BackfillJob, 0, genTaskBatch) for { - kvRanges, err := splitTableRanges(pTbl, reorgInfo.d.store, startKey, endKey) + kvRanges, err := splitTableRanges(pTbl, reorgInfo.d.store, startKey, endKey, genTaskBatch) if err != nil { return errors.Trace(err) } diff --git a/ddl/cancel_test.go b/ddl/cancel_test.go index 3a5c461ad8461..3f02029ffced7 100644 --- a/ddl/cancel_test.go +++ b/ddl/cancel_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" @@ -264,7 +265,7 @@ func TestCancel(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockBackfillSlow")) }() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} i := atomicutil.NewInt64(0) cancel := atomicutil.NewBool(false) cancelResult := atomicutil.NewBool(false) @@ -282,12 +283,12 @@ func TestCancel(t *testing.T) { } dom.DDL().SetHook(hook.Clone()) - restHook := func(h *ddl.TestDDLCallback) { + restHook := func(h *callback.TestDDLCallback) { h.OnJobRunBeforeExported = nil h.OnJobUpdatedExported.Store(nil) dom.DDL().SetHook(h.Clone()) } - registHook := func(h *ddl.TestDDLCallback, onJobRunBefore bool) { + registHook := func(h *callback.TestDDLCallback, onJobRunBefore bool) { if onJobRunBefore { h.OnJobRunBeforeExported = hookFunc } else { diff --git a/ddl/cluster.go b/ddl/cluster.go index 74ac8c05ca098..32695ff01b819 100644 --- a/ddl/cluster.go +++ b/ddl/cluster.go @@ -165,7 +165,8 @@ func isFlashbackSupportedDDLAction(action model.ActionType) bool { switch action { case model.ActionSetTiFlashReplica, model.ActionUpdateTiFlashReplicaStatus, model.ActionAlterPlacementPolicy, model.ActionAlterTablePlacement, model.ActionAlterTablePartitionPlacement, model.ActionCreatePlacementPolicy, - model.ActionDropPlacementPolicy, model.ActionModifySchemaDefaultPlacement: + model.ActionDropPlacementPolicy, model.ActionModifySchemaDefaultPlacement, + model.ActionAlterTableAttributes, model.ActionAlterTablePartitionAttributes: return false default: return true diff --git a/ddl/cluster_test.go b/ddl/cluster_test.go index e2a4302e044ce..55c780d55e536 100644 --- a/ddl/cluster_test.go +++ b/ddl/cluster_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/meta" @@ -84,7 +85,7 @@ func TestFlashbackCloseAndResetPDSchedule(t *testing.T) { defer resetGC() tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionFlashbackCluster, job.Type) if job.SchemaState == model.StateWriteReorganization { @@ -136,7 +137,7 @@ func TestAddDDLDuringFlashback(t *testing.T) { defer resetGC() tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionFlashbackCluster, job.Type) if job.SchemaState == model.StateWriteOnly { @@ -175,7 +176,7 @@ func TestGlobalVariablesOnFlashback(t *testing.T) { defer resetGC() tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionFlashbackCluster, job.Type) if job.SchemaState == model.StateWriteReorganization { diff --git a/ddl/column_change_test.go b/ddl/column_change_test.go index be393dd488668..76a3b377a5abe 100644 --- a/ddl/column_change_test.go +++ b/ddl/column_change_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" @@ -48,7 +49,7 @@ func TestColumnAdd(t *testing.T) { tk.MustExec("insert t values (1, 2);") d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} ct := testNewContext(store) // set up hook @@ -149,7 +150,7 @@ func TestModifyAutoRandColumnWithMetaKeyChanged(t *testing.T) { tk.MustExec("create table t (a bigint primary key clustered AUTO_RANDOM(5));") d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} var errCount int32 = 3 var genAutoRandErr error @@ -457,7 +458,7 @@ func TestIssue40135(t *testing.T) { tk.MustExec("CREATE TABLE t40135 ( a tinyint DEFAULT NULL, b varchar(32) DEFAULT 'md') PARTITION BY HASH (a) PARTITIONS 2") one := true - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if one { diff --git a/ddl/column_modify_test.go b/ddl/column_modify_test.go index a8bb6e669a68f..574de0ee8f08a 100644 --- a/ddl/column_modify_test.go +++ b/ddl/column_modify_test.go @@ -24,7 +24,7 @@ import ( "time" "github.com/pingcap/errors" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" testddlutil "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -664,7 +664,7 @@ func TestTransactionWithWriteOnlyColumn(t *testing.T) { }, } - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -872,7 +872,7 @@ func TestAddGeneratedColumnAndInsert(t *testing.T) { tk1.MustExec("use test") d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} ctx := mock.NewContext() ctx.Store = store times := 0 @@ -916,7 +916,7 @@ func TestColumnTypeChangeGenUniqueChangingName(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} var checkErr error assertChangingColName := "_col$_c2_0" assertChangingIdxName := "_idx$_idx_0" diff --git a/ddl/column_test.go b/ddl/column_test.go index e6c48b1121595..d378c03e297b5 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/pingcap/errors" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -672,7 +672,7 @@ func TestAddColumn(t *testing.T) { checkOK := false - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { if checkOK { return @@ -740,7 +740,7 @@ func TestAddColumns(t *testing.T) { err = txn.Commit(context.Background()) require.NoError(t, err) - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { mu.Lock() defer mu.Unlock() @@ -810,7 +810,7 @@ func TestDropColumnInColumnTest(t *testing.T) { var mu sync.Mutex d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { mu.Lock() defer mu.Unlock() @@ -872,7 +872,7 @@ func TestDropColumns(t *testing.T) { var mu sync.Mutex d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { mu.Lock() defer mu.Unlock() @@ -998,7 +998,7 @@ func TestWriteDataWriteOnlyMode(t *testing.T) { originalCallback := dom.DDL().GetHook() defer dom.DDL().SetHook(originalCallback) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { return @@ -1009,7 +1009,7 @@ func TestWriteDataWriteOnlyMode(t *testing.T) { dom.DDL().SetHook(hook) tk.MustExec("alter table t change column `col1` `col1` varchar(20)") - hook = &ddl.TestDDLCallback{Do: dom} + hook = &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { return diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go index 308a815773ce9..0aa303c984398 100644 --- a/ddl/column_type_change_test.go +++ b/ddl/column_type_change_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -121,7 +122,7 @@ func TestColumnTypeChangeStateBetweenInteger(t *testing.T) { require.Equal(t, 2, len(tbl.Cols())) require.NotNil(t, external.GetModifyColumn(t, tk, "test", "t", "c2", false)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -185,7 +186,7 @@ func TestRollbackColumnTypeChangeBetweenInteger(t *testing.T) { require.Equal(t, 2, len(tbl.Cols())) require.NotNil(t, external.GetModifyColumn(t, tk, "test", "t", "c2", false)) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} // Mock roll back at model.StateNone. customizeHookRollbackAtState(hook, tbl, model.StateNone) dom.DDL().SetHook(hook) @@ -217,7 +218,7 @@ func TestRollbackColumnTypeChangeBetweenInteger(t *testing.T) { assertRollBackedColUnchanged(t, tk) } -func customizeHookRollbackAtState(hook *ddl.TestDDLCallback, tbl table.Table, state model.SchemaState) { +func customizeHookRollbackAtState(hook *callback.TestDDLCallback, tbl table.Table, state model.SchemaState) { hook.OnJobRunBeforeExported = func(job *model.Job) { if tbl.Meta().ID != job.TableID { return @@ -934,7 +935,7 @@ func TestColumnTypeChangeIgnoreDisplayLength(t *testing.T) { assertHasAlterWriteReorg := func(tbl table.Table) { // Restore assertResult to false. assertResult = false - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if tbl.Meta().ID != job.TableID { return @@ -1600,7 +1601,7 @@ func TestChangingColOriginDefaultValue(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") originalHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var ( once bool checkErr error @@ -1679,7 +1680,7 @@ func TestChangingColOriginDefaultValueAfterAddColAndCastSucc(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") originalHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var ( once bool checkErr error @@ -1764,7 +1765,7 @@ func TestChangingColOriginDefaultValueAfterAddColAndCastFail(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") originalHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -1893,7 +1894,7 @@ func TestDDLExitWhenCancelMeetPanic(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockExceedErrorLimit")) }() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var jobID int64 hook.OnJobRunBeforeExported = func(job *model.Job) { if jobID != 0 { @@ -1968,7 +1969,7 @@ func TestCancelCTCInReorgStateWillCauseGoroutineLeak(t *testing.T) { tk.MustExec("insert into ctc_goroutine_leak values(1),(2),(3)") tbl := external.GetTableByName(t, tk, "test", "ctc_goroutine_leak") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var jobID int64 hook.OnJobRunBeforeExported = func(job *model.Job) { if jobID != 0 { @@ -2210,7 +2211,7 @@ func TestCastDateToTimestampInReorgAttribute(t *testing.T) { var checkErr1 error var checkErr2 error - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr1 != nil || checkErr2 != nil || tbl.Meta().ID != job.TableID { return diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index da49688ccc608..2dc12c6cda9e8 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/executor" @@ -74,7 +75,7 @@ func TestShowCreateTable(t *testing.T) { "CREATE TABLE `t2` (\n `a` int(11) DEFAULT NULL,\n `b` varchar(10) COLLATE utf8mb4_general_ci DEFAULT NULL,\n `c` varchar(1) COLLATE utf8mb4_general_ci DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci"}, } prevState := model.StateNone - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} currTestCaseOffset := 0 onJobUpdatedExportedFunc := func(job *model.Job) { if job.SchemaState == prevState || checkErr != nil { @@ -143,7 +144,7 @@ func TestDropNotNullColumn(t *testing.T) { var checkErr error d := dom.DDL() originalCallback := d.GetHook() - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} sqlNum := 0 onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { @@ -222,7 +223,7 @@ func TestTwoStates(t *testing.T) { key(c1, c2))`) tk.MustExec("insert into t values(1, 'a', 'N', '2017-07-01')") - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} prevState := model.StateNone require.NoError(t, testInfo.parseSQLs(parser.New())) @@ -809,7 +810,7 @@ func runTestInSchemaState( // Make sure these SQLs use the plan of index scan. tk.MustExec("drop stats t") - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} prevState := model.StateNone var checkErr error se, err := session.CreateSession(store) @@ -872,7 +873,7 @@ func TestShowIndex(t *testing.T) { tk.MustExec("use test_db_state") tk.MustExec(`create table t(c1 int primary key nonclustered, c2 int)`) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} prevState := model.StateNone showIndexSQL := `show index from t` var checkErr error @@ -1325,7 +1326,7 @@ func TestParallelAlterAndDropSchema(t *testing.T) { } func prepareTestControlParallelExecSQL(t *testing.T, store kv.Storage, dom *domain.Domain) (*testkit.TestKit, *testkit.TestKit, chan struct{}, ddl.Callback) { - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} times := 0 callback.OnJobRunBeforeExported = func(job *model.Job) { if times != 0 { @@ -1433,7 +1434,7 @@ func dbChangeTestParallelExecSQL(t *testing.T, store kv.Storage, dom *domain.Dom var err2, err3 error var wg util.WaitGroupWrapper - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} once := sync.Once{} onJobUpdatedExportedFunc := func(job *model.Job) { // sleep a while, let other job enqueue. @@ -1531,7 +1532,7 @@ func TestParallelDDLBeforeRunDDLJob(t *testing.T) { tk2 := testkit.NewTestKit(t, store) tk2.MustExec("use test_db_state") - intercept := &ddl.TestInterceptor{} + intercept := &callback.TestInterceptor{} var sessionToStart sync.WaitGroup // sessionToStart is a waitgroup to wait for two session to get the same information schema sessionToStart.Add(2) @@ -1574,7 +1575,7 @@ func TestParallelDDLBeforeRunDDLJob(t *testing.T) { wg.Wait() - intercept = &ddl.TestInterceptor{} + intercept = &callback.TestInterceptor{} d.(ddl.DDLForTest).SetInterceptor(intercept) } @@ -1666,7 +1667,7 @@ func TestCreateExpressionIndex(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { return @@ -1741,7 +1742,7 @@ func TestCreateUniqueExpressionIndex(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { return @@ -1850,7 +1851,7 @@ func TestDropExpressionIndex(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if checkErr != nil { return @@ -1922,7 +1923,7 @@ func TestParallelRenameTable(t *testing.T) { d2 := dom.DDL() originalCallback := d2.GetHook() defer d2.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} callback.OnJobRunBeforeExported = func(job *model.Job) { switch job.SchemaState { case model.StateNone: @@ -2037,7 +2038,7 @@ func TestConcurrentSetDefaultValue(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} skip := false callback.OnJobRunBeforeExported = func(job *model.Job) { switch job.SchemaState { diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index a893f15899f41..ad2d0066c56dc 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/errors" _ "github.com/pingcap/tidb/autoid_service" "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/schematracker" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -687,7 +687,7 @@ func TestUpdateMultipleTable(t *testing.T) { tk2.MustExec("use test") d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { if job.SchemaState == model.StateWriteOnly { tk2.MustExec("update t1, t2 set t1.c1 = 8, t2.c2 = 10 where t1.c2 = t2.c1") diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index 9b39d309aeb19..0049e2340a2af 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -2533,7 +2534,7 @@ func TestExchangePartitionHook(t *testing.T) { tk.MustExec(`insert into pt values (0), (4), (7)`) tk.MustExec("insert into nt values (1)") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} dom.DDL().SetHook(hook) hookFunc := func(job *model.Job) { @@ -3816,7 +3817,7 @@ func TestTruncatePartitionMultipleTimes(t *testing.T) { dom := domain.GetDomain(tk.Session()) originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} dom.DDL().SetHook(hook) injected := false hook.OnJobRunBeforeExported = func(job *model.Job) { @@ -4626,7 +4627,7 @@ func TestIssue40135Ver2(t *testing.T) { tk.MustExec("CREATE TABLE t40135 ( a int DEFAULT NULL, b varchar(32) DEFAULT 'md', index(a)) PARTITION BY HASH (a) PARTITIONS 6") tk.MustExec("insert into t40135 values (1, 'md'), (2, 'ma'), (3, 'md'), (4, 'ma'), (5, 'md'), (6, 'ma')") one := true - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error var wg sync.WaitGroup wg.Add(1) @@ -4942,7 +4943,7 @@ func TestDropPartitionKeyColumn(t *testing.T) { } type TestReorgDDLCallback struct { - *ddl.TestDDLCallback + *callback.TestDDLCallback syncChan chan bool } @@ -4971,7 +4972,7 @@ func TestReorgPartitionConcurrent(t *testing.T) { defer dom.DDL().SetHook(originHook) syncOnChanged := make(chan bool) defer close(syncOnChanged) - hook := &TestReorgDDLCallback{TestDDLCallback: &ddl.TestDDLCallback{Do: dom}, syncChan: syncOnChanged} + hook := &TestReorgDDLCallback{TestDDLCallback: &callback.TestDDLCallback{Do: dom}, syncChan: syncOnChanged} dom.DDL().SetHook(hook) wait := make(chan bool) @@ -5148,7 +5149,7 @@ func TestReorgPartitionFailConcurrent(t *testing.T) { dom := domain.GetDomain(tk.Session()) originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} dom.DDL().SetHook(hook) wait := make(chan bool) @@ -5290,7 +5291,7 @@ func TestReorgPartitionFailInject(t *testing.T) { dom := domain.GetDomain(tk.Session()) originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} dom.DDL().SetHook(hook) wait := make(chan bool) diff --git a/ddl/db_table_test.go b/ddl/db_table_test.go index 33fe6f4337055..96a0e3524ae3c 100644 --- a/ddl/db_table_test.go +++ b/ddl/db_table_test.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" testddlutil "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -185,7 +186,7 @@ func TestTransactionOnAddDropColumn(t *testing.T) { originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -888,7 +889,7 @@ func TestAddColumn2(t *testing.T) { originHook := dom.DDL().GetHook() defer dom.DDL().SetHook(originHook) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var writeOnlyTable table.Table hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState == model.StateWriteOnly { diff --git a/ddl/db_test.go b/ddl/db_test.go index f9d5ef000ba24..f6a2d47d23739 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" ddlutil "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -269,7 +270,7 @@ func TestIssue22307(t *testing.T) { tk.MustExec("create table t (a int, b int)") tk.MustExec("insert into t values(1, 1);") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr1, checkErr2 error hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { @@ -570,7 +571,7 @@ func TestAddExpressionIndexRollback(t *testing.T) { tk1.MustExec("use test") d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var currJob *model.Job ctx := mock.NewContext() ctx.Store = store @@ -958,7 +959,7 @@ func TestDDLJobErrorCount(t *testing.T) { }() var jobID int64 - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { jobID = job.ID } @@ -1090,7 +1091,7 @@ func TestCancelJobWriteConflict(t *testing.T) { var cancelErr error var rs []sqlexec.RecordSet - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} d := dom.DDL() originalHook := d.GetHook() d.SetHook(hook) @@ -1503,7 +1504,7 @@ func TestDDLBlockedCreateView(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t(a int)") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} first := true hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { @@ -1528,7 +1529,7 @@ func TestHashPartitionAddColumn(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t(a int, b int) partition by hash(a) partitions 4") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateWriteOnly { return @@ -1551,7 +1552,7 @@ func TestSetInvalidDefaultValueAfterModifyColumn(t *testing.T) { var wg sync.WaitGroup var checkErr error one := false - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateDeleteOnly { return @@ -1588,7 +1589,7 @@ func TestMDLTruncateTable(t *testing.T) { var wg sync.WaitGroup - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} wg.Add(2) var timetk2 time.Time var timetk3 time.Time diff --git a/ddl/ddl.go b/ddl/ddl.go index 2e6e753e31fdf..2a482e22dc9f7 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -21,7 +21,6 @@ package ddl import ( "context" "encoding/json" - "flag" "fmt" "runtime" "strconv" @@ -1250,12 +1249,6 @@ var ( RunInGoTest bool ) -func init() { - if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { - RunInGoTest = true - } -} - // GetDropOrTruncateTableInfoFromJobsByStore implements GetDropOrTruncateTableInfoFromJobs func GetDropOrTruncateTableInfoFromJobsByStore(jobs []*model.Job, gcSafePoint uint64, getTable func(uint64, int64, int64) (*model.TableInfo, error), fn func(*model.Job, *model.TableInfo) (bool, error)) (bool, error) { for _, job := range jobs { diff --git a/ddl/ddl_tiflash_api.go b/ddl/ddl_tiflash_api.go index 4b8fca2a91c0f..1ade909b93ee9 100644 --- a/ddl/ddl_tiflash_api.go +++ b/ddl/ddl_tiflash_api.go @@ -424,6 +424,14 @@ func (d *ddl) refreshTiFlashTicker(ctx sessionctx.Context, pollTiFlashContext *T return err } } + + failpoint.Inject("OneTiFlashStoreDown", func() { + for storeID, store := range pollTiFlashContext.TiFlashStores { + store.Store.StateName = "Down" + pollTiFlashContext.TiFlashStores[storeID] = store + break + } + }) pollTiFlashContext.PollCounter++ // Start to process every table. diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index e07d1661f7d99..0471740d41ddf 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -97,7 +98,7 @@ func TestParallelDDL(t *testing.T) { // set hook to execute jobs after all jobs are in queue. jobCnt := 11 - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} once := sync.Once{} var checkErr error tc.OnJobRunBeforeExported = func(job *model.Job) { diff --git a/ddl/fail_test.go b/ddl/fail_test.go index 39437b43a2b73..3c4ca0769bc1e 100644 --- a/ddl/fail_test.go +++ b/ddl/fail_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" @@ -38,7 +38,7 @@ func TestFailBeforeDecodeArgs(t *testing.T) { tableID = int64(tableIDi) d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} first := true stateCnt := 0 diff --git a/ddl/foreign_key_test.go b/ddl/foreign_key_test.go index 627c924b21871..032adfb296120 100644 --- a/ddl/foreign_key_test.go +++ b/ddl/foreign_key_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" @@ -125,7 +126,7 @@ func TestForeignKey(t *testing.T) { var mu sync.Mutex checkOK := false var hookErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if job.State != model.JobStateDone { return @@ -167,7 +168,7 @@ func TestForeignKey(t *testing.T) { checkOK = false mu.Unlock() // fix data race pr/#9491 - tc2 := &ddl.TestDDLCallback{} + tc2 := &callback.TestDDLCallback{} onJobUpdatedExportedFunc2 := func(job *model.Job) { if job.State != model.JobStateDone { return @@ -224,7 +225,7 @@ func TestTruncateOrDropTableWithForeignKeyReferred2(t *testing.T) { var wg sync.WaitGroup var truncateErr, dropErr error testTruncate := true - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateNone { return @@ -280,7 +281,7 @@ func TestDropIndexNeededInForeignKey2(t *testing.T) { var wg sync.WaitGroup var dropErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StatePublic || job.Type != model.ActionDropIndex { return @@ -319,7 +320,7 @@ func TestDropDatabaseWithForeignKeyReferred2(t *testing.T) { tk.MustExec("create database test2") var wg sync.WaitGroup var dropErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StateNone { return @@ -360,7 +361,7 @@ func TestAddForeignKey2(t *testing.T) { tk.MustExec("create table t2 (id int key, b int, index(b));") var wg sync.WaitGroup var addErr error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.SchemaState != model.StatePublic || job.Type != model.ActionDropIndex { return @@ -400,7 +401,7 @@ func TestAddForeignKey3(t *testing.T) { var insertErrs []error var deleteErrs []error - tc := &ddl.TestDDLCallback{} + tc := &callback.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { if job.Type != model.ActionAddForeignKey { return diff --git a/ddl/index.go b/ddl/index.go index f634f23bd3f05..888c48f2311ed 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -791,6 +791,9 @@ func doReorgWorkForCreateIndexMultiSchema(w *worker, d *ddlCtx, t *meta.Meta, jo done, ver, err = doReorgWorkForCreateIndex(w, d, t, job, tbl, indexInfo) if done { job.MarkNonRevertible() + if err == nil { + ver, err = updateVersionAndTableInfo(d, t, job, tbl.Meta(), true) + } } // We need another round to wait for all the others sub-jobs to finish. return false, ver, err @@ -877,7 +880,6 @@ func doReorgWorkForCreateIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Jo return false, ver, err } indexInfo.BackfillState = model.BackfillStateInapplicable // Prevent double-write on this index. - ver, err = updateVersionAndTableInfo(d, t, job, tbl.Meta(), true) return true, ver, err default: return false, 0, dbterror.ErrInvalidDDLState.GenWithStackByArgs("backfill", indexInfo.BackfillState) diff --git a/ddl/index_change_test.go b/ddl/index_change_test.go index f9dcc99154dc5..dc1b98f205f08 100644 --- a/ddl/index_change_test.go +++ b/ddl/index_change_test.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -41,7 +42,7 @@ func TestIndexChange(t *testing.T) { tk.MustExec("insert t values (1, 1), (2, 2), (3, 3);") d := dom.DDL() - tc := &ddl.TestDDLCallback{Do: dom} + tc := &callback.TestDDLCallback{Do: dom} // set up hook prevState := model.StateNone addIndexDone := false diff --git a/ddl/indexmergetest/BUILD.bazel b/ddl/indexmergetest/BUILD.bazel new file mode 100644 index 0000000000000..25dfef99ecb3f --- /dev/null +++ b/ddl/indexmergetest/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "indexmergetest_test", + timeout = "moderate", + srcs = [ + "main_test.go", + "merge_test.go", + ], + flaky = True, + shard_count = 4, + deps = [ + "//config", + "//ddl", + "//ddl/ingest", + "//ddl/internal/callback", + "//ddl/testutil", + "//domain", + "//kv", + "//meta/autoid", + "//parser/model", + "//tablecodec", + "//testkit", + "//testkit/testsetup", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//assert", + "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//tikv", + "@org_uber_go_goleak//:goleak", + ], +) diff --git a/ddl/indexmergetest/main_test.go b/ddl/indexmergetest/main_test.go new file mode 100644 index 0000000000000..b4de8700ce167 --- /dev/null +++ b/ddl/indexmergetest/main_test.go @@ -0,0 +1,56 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexmergetest + +import ( + "testing" + "time" + + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/testkit/testsetup" + "github.com/tikv/client-go/v2/tikv" + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + testsetup.SetupForCommonTest() + tikv.EnableFailpoints() + + domain.SchemaOutOfDateRetryInterval.Store(50 * time.Millisecond) + domain.SchemaOutOfDateRetryTimes.Store(50) + + autoid.SetStep(5000) + ddl.RunInGoTest = true + + config.UpdateGlobal(func(conf *config.Config) { + conf.Instance.SlowThreshold = 10000 + conf.TiKVClient.AsyncCommit.SafeWindow = 0 + conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 + conf.Experimental.AllowsExpressionIndex = true + }) + + opts := []goleak.Option{ + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), + goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + } + + goleak.VerifyTestMain(m, opts...) +} diff --git a/ddl/index_merge_tmp_test.go b/ddl/indexmergetest/merge_test.go similarity index 94% rename from ddl/index_merge_tmp_test.go rename to ddl/indexmergetest/merge_test.go index b637a55d2925f..a31b3edcc23a4 100644 --- a/ddl/index_merge_tmp_test.go +++ b/ddl/indexmergetest/merge_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ddl_test +package indexmergetest import ( "testing" @@ -21,15 +21,14 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/ddl/ingest" - "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/ddl/internal/callback" + "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/util/logutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) func TestAddIndexMergeProcess(t *testing.T) { @@ -47,12 +46,12 @@ func TestAddIndexMergeProcess(t *testing.T) { var checkErr error var runDML, backfillDone bool originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { if !runDML && job.Type == model.ActionAddIndex && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil || idx.BackfillState != model.BackfillStateRunning { return } @@ -94,13 +93,13 @@ func TestAddPrimaryKeyMergeProcess(t *testing.T) { var checkErr error var runDML, backfillDone bool originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: nil, // We'll reload the schema manually. } onJobUpdatedExportedFunc := func(job *model.Job) { if !runDML && job.Type == model.ActionAddPrimaryKey && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "primary") + idx := testutil.FindIdxInfo(dom, "test", "t", "primary") if idx == nil || idx.BackfillState != model.BackfillStateRunning || job.SnapshotVer == 0 { return } @@ -143,12 +142,12 @@ func TestAddIndexMergeVersionIndexValue(t *testing.T) { var runDML bool var tblID, idxID int64 originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { if !runDML && job.Type == model.ActionAddIndex && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil || idx.BackfillState != model.BackfillStateReadyToMerge { return } @@ -198,14 +197,14 @@ func TestAddIndexMergeIndexUntouchedValue(t *testing.T) { var runInsert bool var runUpdate bool originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { if job.Type != model.ActionAddIndex || job.SchemaState != model.StateWriteReorganization { return } - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil { return } @@ -243,15 +242,6 @@ func TestAddIndexMergeIndexUntouchedValue(t *testing.T) { tk.MustQuery("select * from t ignore index (idx);").Check(testkit.Rows("1 1 a a", "100 2 a a")) } -func findIdxInfo(dom *domain.Domain, dbName, tbName, idxName string) *model.IndexInfo { - tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) - if err != nil { - logutil.BgLogger().Warn("cannot find table", zap.String("dbName", dbName), zap.String("tbName", tbName)) - return nil - } - return tbl.Meta().FindIndexByName(idxName) -} - // TestCreateUniqueIndexKeyExist this case will test below things: // Create one unique index idx((a*b+1)); // insert (0, 6) and delete it; @@ -279,7 +269,7 @@ func TestCreateUniqueIndexKeyExist(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if t.Failed() { return @@ -346,7 +336,7 @@ func TestAddIndexMergeIndexUpdateOnDeleteOnly(t *testing.T) { var checkErrs []error originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedBefore := func(job *model.Job) { @@ -383,7 +373,7 @@ func TestAddIndexMergeDeleteUniqueOnWriteOnly(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if t.Failed() { return @@ -441,7 +431,7 @@ func TestAddIndexMergeDoubleDelete(t *testing.T) { d := dom.DDL() originalCallback := d.GetHook() defer d.SetHook(originalCallback) - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} onJobUpdatedExportedFunc := func(job *model.Job) { if t.Failed() { return @@ -485,7 +475,7 @@ func TestAddIndexMergeConflictWithPessimistic(t *testing.T) { tk.MustExec("set @@global.tidb_enable_metadata_lock = 0;") originHook := dom.DDL().GetHook() - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} runPessimisticTxn := false callback.OnJobRunBeforeExported = func(job *model.Job) { @@ -498,7 +488,7 @@ func TestAddIndexMergeConflictWithPessimistic(t *testing.T) { assert.NoError(t, err) } if !runPessimisticTxn && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "test", "t", "idx") + idx := testutil.FindIdxInfo(dom, "test", "t", "idx") if idx == nil { return } diff --git a/ddl/integration_test.go b/ddl/integration_test.go index 29e69aa855274..264e755889899 100644 --- a/ddl/integration_test.go +++ b/ddl/integration_test.go @@ -18,7 +18,7 @@ import ( "fmt" "testing" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" @@ -86,7 +86,7 @@ func TestDDLStatementsBackFill(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test;") needReorg := false - callback := &ddl.TestDDLCallback{ + callback := &callback.TestDDLCallback{ Do: dom, } onJobUpdatedExportedFunc := func(job *model.Job) { diff --git a/ddl/internal/callback/BUILD.bazel b/ddl/internal/callback/BUILD.bazel new file mode 100644 index 0000000000000..e0bb07c3b2c21 --- /dev/null +++ b/ddl/internal/callback/BUILD.bazel @@ -0,0 +1,51 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "callback", + srcs = ["callback.go"], + importpath = "github.com/pingcap/tidb/ddl/internal/callback", + visibility = ["//ddl:__subpackages__"], + deps = [ + "//ddl", + "//infoschema", + "//parser/model", + "//sessionctx", + "//util/logutil", + "@org_uber_go_zap//:zap", + ], +) + +go_test( + name = "callback_test", + srcs = ["callback_test.go"], + embed = [":callback"], + deps = [ + "//ddl", + "@com_github_stretchr_testify//require", + ], +) + +go_library( + name = "ddlcallback", + srcs = ["callback.go"], + importpath = "github.com/pingcap/tidb/ddl/internal/ddlcallback", + visibility = ["//ddl:__subpackages__"], + deps = [ + "//ddl", + "//infoschema", + "//parser/model", + "//sessionctx", + "//util/logutil", + "@org_uber_go_zap//:zap", + ], +) + +go_test( + name = "ddlcallback_test", + srcs = ["callback_test.go"], + embed = [":ddlcallback"], + deps = [ + "//ddl", + "@com_github_stretchr_testify//require", + ], +) diff --git a/ddl/callback_test.go b/ddl/internal/callback/callback.go similarity index 93% rename from ddl/callback_test.go rename to ddl/internal/callback/callback.go index 5a97e8212689e..a3c84d774dd5c 100644 --- a/ddl/callback_test.go +++ b/ddl/internal/callback/callback.go @@ -1,4 +1,4 @@ -// Copyright 2015 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,27 +12,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ddl +package callback import ( "context" "sync/atomic" - "testing" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/logutil" - "github.com/stretchr/testify/require" "go.uber.org/zap" ) +// TestInterceptor is a test interceptor in the ddl type TestInterceptor struct { - *BaseInterceptor + *ddl.BaseInterceptor OnGetInfoSchemaExported func(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema } +// OnGetInfoSchema is to run when to call GetInfoSchema func (ti *TestInterceptor) OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema { if ti.OnGetInfoSchemaExported != nil { return ti.OnGetInfoSchemaExported(ctx, is) @@ -43,10 +44,10 @@ func (ti *TestInterceptor) OnGetInfoSchema(ctx sessionctx.Context, is infoschema // TestDDLCallback is used to customize user callback themselves. type TestDDLCallback struct { - *BaseCallback + *ddl.BaseCallback // We recommended to pass the domain parameter to the test ddl callback, it will ensure // domain to reload schema before your ddl stepping into the next state change. - Do DomainReloader + Do ddl.DomainReloader onJobRunBefore func(*model.Job) OnJobRunBeforeExported func(*model.Job) @@ -149,11 +150,3 @@ func (tc *TestDDLCallback) OnGetJobAfter(jobType string, job *model.Job) { func (tc *TestDDLCallback) Clone() *TestDDLCallback { return &*tc } - -func TestCallback(t *testing.T) { - cb := &BaseCallback{} - require.Nil(t, cb.OnChanged(nil)) - cb.OnJobRunBefore(nil) - cb.OnJobUpdated(nil) - cb.OnWatched(context.TODO()) -} diff --git a/ddl/internal/callback/callback_test.go b/ddl/internal/callback/callback_test.go new file mode 100644 index 0000000000000..f611394909e48 --- /dev/null +++ b/ddl/internal/callback/callback_test.go @@ -0,0 +1,31 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package callback + +import ( + "context" + "testing" + + "github.com/pingcap/tidb/ddl" + "github.com/stretchr/testify/require" +) + +func TestCallback(t *testing.T) { + cb := &ddl.BaseCallback{} + require.Nil(t, cb.OnChanged(nil)) + cb.OnJobRunBefore(nil) + cb.OnJobUpdated(nil) + cb.OnWatched(context.TODO()) +} diff --git a/ddl/job_table_test.go b/ddl/job_table_test.go index d869dcecc2c0e..9f1150241bbd1 100644 --- a/ddl/job_table_test.go +++ b/ddl/job_table_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -62,7 +63,7 @@ func TestDDLScheduling(t *testing.T) { "ALTER TABLE e EXCHANGE PARTITION p1 WITH TABLE e3;", } - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} var wg util.WaitGroupWrapper wg.Add(1) var once sync.Once diff --git a/ddl/modify_column_test.go b/ddl/modify_column_test.go index 6eb8e633be007..583c0a435b4ec 100644 --- a/ddl/modify_column_test.go +++ b/ddl/modify_column_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/ast" @@ -72,7 +73,7 @@ func TestModifyColumnReorgInfo(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t1") // Check insert null before job first update. - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error var currJob *model.Job var elements []*meta.Element @@ -198,7 +199,7 @@ func TestModifyColumnNullToNotNull(t *testing.T) { tbl := external.GetTableByName(t, tk1, "test", "t1") // Check insert null before job first update. - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} tk1.MustExec("delete from t1") once := sync.Once{} var checkErr error @@ -253,7 +254,7 @@ func TestModifyColumnNullToNotNullWithChangingVal(t *testing.T) { tbl := external.GetTableByName(t, tk1, "test", "t1") // Check insert null before job first update. - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} tk1.MustExec("delete from t1") once := sync.Once{} var checkErr error diff --git a/ddl/multi_schema_change_test.go b/ddl/multi_schema_change_test.go index d9facec4642cf..1f6a52bcce244 100644 --- a/ddl/multi_schema_change_test.go +++ b/ddl/multi_schema_change_test.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" @@ -386,7 +387,7 @@ func TestMultiSchemaChangeRenameColumns(t *testing.T) { tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int default 1, b int default 2)") tk.MustExec("insert into t values ()") - hook1 := &ddl.TestDDLCallback{Do: dom} + hook1 := &callback.TestDDLCallback{Do: dom} hook1.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionMultiSchemaChange, job.Type) if job.MultiSchemaInfo.SubJobs[0].SchemaState == model.StateWriteReorganization { @@ -457,7 +458,7 @@ func TestMultiSchemaChangeAlterColumns(t *testing.T) { // Test dml stmts when do alter tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int default 1, b int default 2)") - hook1 := &ddl.TestDDLCallback{Do: dom} + hook1 := &callback.TestDDLCallback{Do: dom} hook1.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionMultiSchemaChange, job.Type) if job.MultiSchemaInfo.SubJobs[0].SchemaState == model.StateWriteOnly { @@ -972,7 +973,7 @@ func TestMultiSchemaChangeAlterIndex(t *testing.T) { tk.MustExec("insert into t values (1, 2);") originHook := dom.DDL().GetHook() var checked bool - callback := &ddl.TestDDLCallback{Do: dom} + callback := &callback.TestDDLCallback{Do: dom} onJobUpdatedExportedFunc := func(job *model.Job) { assert.NotNil(t, job.MultiSchemaInfo) // "modify column a tinyint" in write-reorg. @@ -1042,7 +1043,7 @@ func TestMultiSchemaChangeAdminShowDDLJobs(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { assert.Equal(t, model.ActionMultiSchemaChange, job.Type) if job.MultiSchemaInfo.SubJobs[0].SchemaState == model.StateDeleteOnly { @@ -1129,7 +1130,7 @@ func TestMultiSchemaChangeWithExpressionIndex(t *testing.T) { tk.MustQuery("select * from t;").Check(testkit.Rows("1 2", "2 1")) originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -1191,7 +1192,7 @@ func TestMultiSchemaChangeSchemaVersion(t *testing.T) { schemaVerMap := map[int64]struct{}{} originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobSchemaStateChanged = func(schemaVer int64) { if schemaVer != 0 { // No same return schemaVer during multi-schema change @@ -1231,7 +1232,7 @@ func TestMultiSchemaChangeMixedWithUpdate(t *testing.T) { "'2020-01-01 10:00:00', 'wer', '10:00:00', 2.1, 12, 'qwer', 12, 'asdf');") originHook := dom.DDL().GetHook() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var checkErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if checkErr != nil { @@ -1266,7 +1267,7 @@ type cancelOnceHook struct { pred func(job *model.Job) bool s sessionctx.Context - ddl.TestDDLCallback + callback.TestDDLCallback } func (c *cancelOnceHook) OnJobUpdated(job *model.Job) { @@ -1299,7 +1300,7 @@ func newCancelJobHook(t *testing.T, store kv.Storage, dom *domain.Domain, return &cancelOnceHook{ store: store, pred: pred, - TestDDLCallback: ddl.TestDDLCallback{Do: dom}, + TestDDLCallback: callback.TestDDLCallback{Do: dom}, s: tk.Session(), } } diff --git a/ddl/mv_index_test.go b/ddl/mv_index_test.go index 964211ad76740..10fbe2971377a 100644 --- a/ddl/mv_index_test.go +++ b/ddl/mv_index_test.go @@ -19,7 +19,7 @@ import ( "strings" "testing" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" @@ -45,7 +45,7 @@ func TestMultiValuedIndexOnlineDDL(t *testing.T) { internalTK := testkit.NewTestKit(t, store) internalTK.MustExec("use test") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} n := 100 hook.OnJobRunBeforeExported = func(job *model.Job) { internalTK.MustExec(fmt.Sprintf("insert into t values (%d, '[%d, %d, %d]')", n, n, n+1, n+2)) diff --git a/ddl/placement_policy_test.go b/ddl/placement_policy_test.go index 559cc0ff59a46..327c7e02cf0b6 100644 --- a/ddl/placement_policy_test.go +++ b/ddl/placement_policy_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" @@ -135,7 +136,7 @@ func TestPlacementPolicy(t *testing.T) { tk.MustExec("use test") tk.MustExec("drop placement policy if exists x") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var policyID int64 onJobUpdatedExportedFunc := func(job *model.Job) { if policyID != 0 { diff --git a/ddl/repair_table_test.go b/ddl/repair_table_test.go index 6881c6ce5f019..8b16f9bfbc69d 100644 --- a/ddl/repair_table_test.go +++ b/ddl/repair_table_test.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -101,7 +101,7 @@ func TestRepairTable(t *testing.T) { // Repaired tableInfo has been filtered by `domain.InfoSchema()`, so get it in repairInfo. originTableInfo, _ := domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin") - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var repairErr error hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type != model.ActionRepairTable { diff --git a/ddl/resource_group_test.go b/ddl/resource_group_test.go index 789e81f99f0fb..3bf33b04d9012 100644 --- a/ddl/resource_group_test.go +++ b/ddl/resource_group_test.go @@ -19,7 +19,7 @@ import ( "strconv" "testing" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/resourcegroup" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" @@ -36,7 +36,7 @@ func TestResourceGroupBasic(t *testing.T) { tk.MustExec("use test") re := require.New(t) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var groupID int64 onJobUpdatedExportedFunc := func(job *model.Job) { // job.SchemaID will be assigned when the group is created. diff --git a/ddl/rollingback_test.go b/ddl/rollingback_test.go index f1850eb80dcbc..ee5894441175f 100644 --- a/ddl/rollingback_test.go +++ b/ddl/rollingback_test.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/external" @@ -50,7 +51,7 @@ func TestCancelAddIndexJobError(t *testing.T) { require.NotNil(t, tbl) d := dom.DDL() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var ( checkErr error jobID int64 diff --git a/ddl/serial_test.go b/ddl/serial_test.go index 970f60a95ff96..668b675a0b185 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" @@ -445,7 +446,7 @@ func TestCancelAddIndexPanic(t *testing.T) { oldReorgWaitTimeout := ddl.ReorgWaitTimeout ddl.ReorgWaitTimeout = 50 * time.Millisecond defer func() { ddl.ReorgWaitTimeout = oldReorgWaitTimeout }() - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type == model.ActionAddIndex && job.State == model.JobStateRunning && job.SchemaState == model.StateWriteReorganization && job.SnapshotVer != 0 { tkCancel.MustQuery(fmt.Sprintf("admin cancel ddl jobs %d", job.ID)) @@ -684,7 +685,7 @@ func TestRecoverTableByJobIDFail(t *testing.T) { tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // set hook - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type == model.ActionRecoverTable { require.NoError(t, failpoint.Enable("tikvclient/mockCommitError", `return(true)`)) @@ -743,7 +744,7 @@ func TestRecoverTableByTableNameFail(t *testing.T) { tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) // set hook - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type == model.ActionRecoverTable { require.NoError(t, failpoint.Enable("tikvclient/mockCommitError", `return(true)`)) @@ -816,7 +817,7 @@ func TestCanceledJobTakeTime(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t_cjtt(a int)") - hook := &ddl.TestDDLCallback{} + hook := &callback.TestDDLCallback{} once := sync.Once{} hook.OnJobRunBeforeExported = func(job *model.Job) { once.Do(func() { diff --git a/ddl/table_modify_test.go b/ddl/table_modify_test.go index 590fea8ad973d..7f0b23e3fd894 100644 --- a/ddl/table_modify_test.go +++ b/ddl/table_modify_test.go @@ -20,6 +20,7 @@ import ( "time" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/infoschema" @@ -205,7 +206,7 @@ func TestConcurrentLockTables(t *testing.T) { } func testParallelExecSQL(t *testing.T, store kv.Storage, dom *domain.Domain, sql1, sql2 string, se1, se2 session.Session, f func(t *testing.T, err1, err2 error)) { - callback := &ddl.TestDDLCallback{} + callback := &callback.TestDDLCallback{} times := 0 callback.OnJobRunBeforeExported = func(job *model.Job) { if times != 0 { diff --git a/ddl/testutil/BUILD.bazel b/ddl/testutil/BUILD.bazel index 3562ca3b34571..052a747f427ba 100644 --- a/ddl/testutil/BUILD.bazel +++ b/ddl/testutil/BUILD.bazel @@ -14,6 +14,8 @@ go_library( "//table", "//table/tables", "//types", + "//util/logutil", "@com_github_pingcap_errors//:errors", + "@org_uber_go_zap//:zap", ], ) diff --git a/ddl/testutil/testutil.go b/ddl/testutil/testutil.go index 642579ba00ea7..52adf6b750f73 100644 --- a/ddl/testutil/testutil.go +++ b/ddl/testutil/testutil.go @@ -26,6 +26,8 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" ) // SessionExecInGoroutine export for testing. @@ -82,3 +84,13 @@ func ExtractAllTableHandles(se session.Session, dbName, tbName string) ([]int64, }) return allHandles, err } + +// FindIdxInfo is to get IndexInfo by index name. +func FindIdxInfo(dom *domain.Domain, dbName, tbName, idxName string) *model.IndexInfo { + tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) + if err != nil { + logutil.BgLogger().Warn("cannot find table", zap.String("dbName", dbName), zap.String("tbName", tbName)) + return nil + } + return tbl.Meta().FindIndexByName(idxName) +} diff --git a/ddl/tiflash_replica_test.go b/ddl/tiflash_replica_test.go index c66061ad64b3b..abd7275e4669b 100644 --- a/ddl/tiflash_replica_test.go +++ b/ddl/tiflash_replica_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/internal/callback" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" @@ -286,7 +286,7 @@ func TestCreateTableWithLike2(t *testing.T) { tbl1 := external.GetTableByName(t, tk, "test", "t1") doneCh := make(chan error, 2) - hook := &ddl.TestDDLCallback{Do: dom} + hook := &callback.TestDDLCallback{Do: dom} var onceChecker sync.Map hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type != model.ActionAddColumn && job.Type != model.ActionDropColumn && diff --git a/ddl/tiflashtest/ddl_tiflash_test.go b/ddl/tiflashtest/ddl_tiflash_test.go index d1d0368138b18..c3ec3a1d2b0fb 100644 --- a/ddl/tiflashtest/ddl_tiflash_test.go +++ b/ddl/tiflashtest/ddl_tiflash_test.go @@ -1334,3 +1334,23 @@ func TestTiFlashAvailableAfterAddPartition(t *testing.T) { require.NotNil(t, pi) require.Equal(t, len(pi.Definitions), 2) } + +func TestTiFlashAvailableAfterDownOneStore(t *testing.T) { + s, teardown := createTiFlashContext(t) + defer teardown() + tk := testkit.NewTestKit(t, s.store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists ddltiflash") + tk.MustExec("create table ddltiflash(z int) PARTITION BY RANGE(z) (PARTITION p0 VALUES LESS THAN (10))") + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/OneTiFlashStoreDown", `return`)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/domain/infosync/OneTiFlashStoreDown", `return`)) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/OneTiFlashStoreDown")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/domain/infosync/OneTiFlashStoreDown")) + }() + + tk.MustExec("alter table ddltiflash set tiflash replica 1") + time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) + CheckTableAvailable(s.dom, t, 1, []string{}) +} diff --git a/domain/BUILD.bazel b/domain/BUILD.bazel index 859943b6c6672..ccbf75dd48ee6 100644 --- a/domain/BUILD.bazel +++ b/domain/BUILD.bazel @@ -45,7 +45,6 @@ go_library( "//privilege/privileges", "//sessionctx", "//sessionctx/sessionstates", - "//sessionctx/stmtctx", "//sessionctx/variable", "//statistics/handle", "//telemetry", diff --git a/domain/historical_stats.go b/domain/historical_stats.go index 07e82bafeb58c..6d4125b75f5d7 100644 --- a/domain/historical_stats.go +++ b/domain/historical_stats.go @@ -16,10 +16,13 @@ package domain import ( "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics/handle" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" ) var ( @@ -35,7 +38,21 @@ type HistoricalStatsWorker struct { // SendTblToDumpHistoricalStats send tableID to worker to dump historical stats func (w *HistoricalStatsWorker) SendTblToDumpHistoricalStats(tableID int64) { - w.tblCH <- tableID + send := enableDumpHistoricalStats.Load() + failpoint.Inject("sendHistoricalStats", func(val failpoint.Value) { + if val.(bool) { + send = true + } + }) + if !send { + return + } + select { + case w.tblCH <- tableID: + return + default: + logutil.BgLogger().Warn("discard dump historical stats task", zap.Int64("table-id", tableID)) + } } // DumpHistoricalStats dump stats by given tableID diff --git a/domain/infosync/tiflash_manager.go b/domain/infosync/tiflash_manager.go index 4d01c64de002d..d5cc46f95db95 100644 --- a/domain/infosync/tiflash_manager.go +++ b/domain/infosync/tiflash_manager.go @@ -31,6 +31,7 @@ import ( "github.com/gorilla/mux" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/tablecodec" @@ -89,10 +90,19 @@ func getTiFlashPeerWithoutLagCount(tiFlashStores map[int64]helper.StoreStat, tab for _, store := range tiFlashStores { regionReplica := make(map[int64]int) err := helper.CollectTiFlashStatus(store.Store.StatusAddress, tableID, ®ionReplica) + failpoint.Inject("OneTiFlashStoreDown", func() { + if store.Store.StateName == "Down" { + err = errors.New("mock TiFlasah down") + } + }) if err != nil { logutil.BgLogger().Error("Fail to get peer status from TiFlash.", zap.Int64("tableID", tableID)) - return 0, err + // Just skip down or offline or tomestone stores, because PD will migrate regions from these stores. + if store.Store.StateName == "Up" || store.Store.StateName == "Disconnected" { + return 0, err + } + continue } flashPeerCount += len(regionReplica) } diff --git a/domain/plan_replayer.go b/domain/plan_replayer.go index 0f72a1ac8a575..8bbc26cf79ec2 100644 --- a/domain/plan_replayer.go +++ b/domain/plan_replayer.go @@ -34,8 +34,8 @@ import ( "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/replayer" @@ -164,7 +164,18 @@ func insertPlanReplayerSuccessStatusRecord(ctx context.Context, sctx sessionctx. record.SQLDigest, record.PlanDigest, record.OriginSQL, record.Token, instance)) if err != nil { logutil.BgLogger().Warn("insert mysql.plan_replayer_status record failed", + zap.String("sql", record.OriginSQL), zap.Error(err)) + // try insert record without original sql + _, err = exec.ExecuteInternal(ctx, fmt.Sprintf( + "insert into mysql.plan_replayer_status (sql_digest, plan_digest, token, instance) values ('%s','%s','%s','%s')", + record.SQLDigest, record.PlanDigest, record.Token, instance)) + if err != nil { + logutil.BgLogger().Warn("insert mysql.plan_replayer_status record failed", + zap.String("sqlDigest", record.SQLDigest), + zap.String("planDigest", record.PlanDigest), + zap.Error(err)) + } } } @@ -379,6 +390,7 @@ func (w *planReplayerTaskDumpWorker) handleTask(task *PlanReplayerDumpTask) { occupy := true handleTask := true defer func() { + util.Recover(metrics.LabelDomain, "PlanReplayerTaskDumpWorker", nil, false) logutil.BgLogger().Debug("[plan-replayer-capture] handle task", zap.String("sql-digest", sqlDigest), zap.String("plan-digest", planDigest), @@ -431,7 +443,6 @@ func (w *planReplayerTaskDumpWorker) HandleTask(task *PlanReplayerDumpTask) (suc } task.Zf = file task.FileName = fileName - task.EncodedPlan, _ = task.EncodePlan(task.SessionVars.StmtCtx, false) if task.InExecute && len(task.NormalizedSQL) > 0 { p := parser.New() stmts, _, err := p.ParseSQL(task.NormalizedSQL) @@ -538,7 +549,6 @@ type PlanReplayerDumpTask struct { replayer.PlanReplayerTaskKey // tmp variables stored during the query - EncodePlan func(*stmtctx.StatementContext, bool) (string, string) TblStats map[int64]interface{} InExecute bool NormalizedSQL string diff --git a/domain/plan_replayer_dump.go b/domain/plan_replayer_dump.go index 01ab473e16a90..5559dd3915b52 100644 --- a/domain/plan_replayer_dump.go +++ b/domain/plan_replayer_dump.go @@ -282,7 +282,14 @@ func DumpPlanReplayerInfo(ctx context.Context, sctx sessionctx.Context, // For capture task, we dump stats in storage only if EnableHistoricalStatsForCapture is disabled. // For manual plan replayer dump command, we directly dump stats in storage - if !variable.EnableHistoricalStatsForCapture.Load() || !task.IsCapture { + if task.IsCapture { + if !task.IsContinuesCapture && variable.EnableHistoricalStatsForCapture.Load() { + // Dump stats + if err = dumpStats(zw, pairs, do); err != nil { + return err + } + } + } else { // Dump stats if err = dumpStats(zw, pairs, do); err != nil { return err diff --git a/executor/BUILD.bazel b/executor/BUILD.bazel index 35703034b1214..c8881bec70917 100644 --- a/executor/BUILD.bazel +++ b/executor/BUILD.bazel @@ -192,7 +192,6 @@ go_library( "//util/servermemorylimit", "//util/set", "//util/size", - "//util/slice", "//util/sqlexec", "//util/stmtsummary", "//util/stringutil", @@ -239,6 +238,7 @@ go_library( "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//status", "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", diff --git a/executor/adapter.go b/executor/adapter.go index 46d925ff82ac2..fe62c57efe04b 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -1413,17 +1413,7 @@ func (a *ExecStmt) observePhaseDurations(internal bool, commitDetails *util.Comm // 4. update the `PrevStmt` in session variable. // 5. reset `DurationParse` in session variable. func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults bool) { - se := a.Ctx - if !se.GetSessionVars().InRestrictedSQL && se.GetSessionVars().IsPlanReplayerCaptureEnabled() { - stmtNode := a.GetStmtNode() - if se.GetSessionVars().EnablePlanReplayedContinuesCapture { - if checkPlanReplayerContinuesCaptureValidStmt(stmtNode) { - checkPlanReplayerContinuesCapture(se, stmtNode, txnTS) - } - } else { - checkPlanReplayerCaptureTask(se, stmtNode, txnTS) - } - } + a.checkPlanReplayerCapture(txnTS) sessVars := a.Ctx.GetSessionVars() execDetail := sessVars.StmtCtx.GetExecDetails() @@ -1486,6 +1476,23 @@ func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults boo } } +func (a *ExecStmt) checkPlanReplayerCapture(txnTS uint64) { + if kv.GetInternalSourceType(a.GoCtx) == kv.InternalTxnStats { + return + } + se := a.Ctx + if !se.GetSessionVars().InRestrictedSQL && se.GetSessionVars().IsPlanReplayerCaptureEnabled() { + stmtNode := a.GetStmtNode() + if se.GetSessionVars().EnablePlanReplayedContinuesCapture { + if checkPlanReplayerContinuesCaptureValidStmt(stmtNode) { + checkPlanReplayerContinuesCapture(se, stmtNode, txnTS) + } + } else { + checkPlanReplayerCaptureTask(se, stmtNode, txnTS) + } + } +} + // CloseRecordSet will finish the execution of current statement and do some record work func (a *ExecStmt) CloseRecordSet(txnStartTS uint64, lastErr error) { a.FinishExecuteStmt(txnStartTS, lastErr, false) @@ -2113,7 +2120,6 @@ func sendPlanReplayerDumpTask(key replayer.PlanReplayerTaskKey, sctx sessionctx. dumpTask := &domain.PlanReplayerDumpTask{ PlanReplayerTaskKey: key, StartTS: startTS, - EncodePlan: GetEncodedPlan, TblStats: stmtCtx.TableStats, SessionBindings: handle.GetAllBindRecord(), SessionVars: sctx.GetSessionVars(), @@ -2122,6 +2128,7 @@ func sendPlanReplayerDumpTask(key replayer.PlanReplayerTaskKey, sctx sessionctx. IsCapture: true, IsContinuesCapture: isContinuesCapture, } + dumpTask.EncodedPlan, _ = GetEncodedPlan(stmtCtx, false) if _, ok := stmtNode.(*ast.ExecuteStmt); ok { nsql, _ := sctx.GetSessionVars().StmtCtx.SQLDigest() dumpTask.InExecute = true diff --git a/executor/executor_test.go b/executor/executor_test.go index 7e6a51799d778..85d0dad290bb0 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -204,6 +204,15 @@ func TestPlanReplayerCapture(t *testing.T) { func TestPlanReplayerContinuesCapture(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) + + tk.MustExec("set @@global.tidb_enable_historical_stats='OFF'") + _, err := tk.Exec("set @@global.tidb_enable_plan_replayer_continues_capture='ON'") + require.Error(t, err) + require.Equal(t, err.Error(), "tidb_enable_historical_stats should be enabled before enabling tidb_enable_plan_replayer_continues_capture") + + tk.MustExec("set @@global.tidb_enable_historical_stats='ON'") + tk.MustExec("set @@global.tidb_enable_plan_replayer_continues_capture='ON'") + prHandle := dom.GetPlanReplayerHandle() tk.MustExec("delete from mysql.plan_replayer_status;") tk.MustExec("use test") diff --git a/executor/fktest/foreign_key_test.go b/executor/fktest/foreign_key_test.go index fb29d391aaf09..6e2e1d83662f1 100644 --- a/executor/fktest/foreign_key_test.go +++ b/executor/fktest/foreign_key_test.go @@ -2839,3 +2839,19 @@ func TestForeignKeyAndMultiValuedIndex(t *testing.T) { tk.MustExec("admin check table t1") tk.MustExec("admin check table t2") } + +func TestForeignKeyAndSessionVariable(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set @@foreign_key_checks=1") + tk.MustExec("use test") + tk.MustExec("create table t1 (t timestamp, index(t));") + tk.MustExec("create table t2 (t timestamp, foreign key (t) references t1(t) on delete cascade);") + tk.MustExec("set @@time_zone='+8:00';") + tk.MustExec("insert into t1 values ('2023-01-28 10:29:16');") + tk.MustExec("insert into t2 values ('2023-01-28 10:29:16');") + tk.MustExec("set @@time_zone='+6:00';") + tk.MustExec("delete from t1;") + tk.MustQuery("select * from t1").Check(testkit.Rows()) + tk.MustQuery("select * from t2").Check(testkit.Rows()) +} diff --git a/executor/historical_stats_test.go b/executor/historical_stats_test.go index 0b00d3182f019..becb1e82212f8 100644 --- a/executor/historical_stats_test.go +++ b/executor/historical_stats_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics/handle" @@ -30,6 +31,8 @@ import ( ) func TestRecordHistoryStatsAfterAnalyze(t *testing.T) { + failpoint.Enable("github.com/pingcap/tidb/domain/sendHistoricalStats", "return(true)") + defer failpoint.Disable("github.com/pingcap/tidb/domain/sendHistoricalStats") store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) @@ -150,6 +153,8 @@ func TestRecordHistoryStatsMetaAfterAnalyze(t *testing.T) { } func TestGCHistoryStatsAfterDropTable(t *testing.T) { + failpoint.Enable("github.com/pingcap/tidb/domain/sendHistoricalStats", "return(true)") + defer failpoint.Disable("github.com/pingcap/tidb/domain/sendHistoricalStats") store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) tk.MustExec("set global tidb_enable_historical_stats = 1") @@ -174,6 +179,7 @@ func TestGCHistoryStatsAfterDropTable(t *testing.T) { tableInfo.Meta().ID)).Check(testkit.Rows("1")) // drop the table and gc stats tk.MustExec("drop table t") + is = dom.InfoSchema() h.GCStats(is, 0) // assert stats_history tables delete the record of dropped table @@ -183,7 +189,56 @@ func TestGCHistoryStatsAfterDropTable(t *testing.T) { tableInfo.Meta().ID)).Check(testkit.Rows("0")) } +func TestAssertHistoricalStatsAfterAlterTable(t *testing.T) { + failpoint.Enable("github.com/pingcap/tidb/domain/sendHistoricalStats", "return(true)") + defer failpoint.Disable("github.com/pingcap/tidb/domain/sendHistoricalStats") + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set global tidb_enable_historical_stats = 1") + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b varchar(10),c int, KEY `idx` (`c`))") + tk.MustExec("analyze table test.t") + is := dom.InfoSchema() + tableInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + // dump historical stats + h := dom.StatsHandle() + hsWorker := dom.GetHistoricalStatsWorker() + tblID := hsWorker.GetOneHistoricalStatsTable() + err = hsWorker.DumpHistoricalStats(tblID, h) + require.Nil(t, err) + + time.Sleep(1 * time.Second) + snapshot := oracle.GoTimeToTS(time.Now()) + jsTable, err := h.DumpHistoricalStatsBySnapshot("test", tableInfo.Meta(), snapshot) + require.NoError(t, err) + require.NotNil(t, jsTable) + require.NotEqual(t, jsTable.Version, uint64(0)) + originVersion := jsTable.Version + + // assert historical stats non-change after drop column + tk.MustExec("alter table t drop column b") + h.GCStats(is, 0) + snapshot = oracle.GoTimeToTS(time.Now()) + jsTable, err = h.DumpHistoricalStatsBySnapshot("test", tableInfo.Meta(), snapshot) + require.NoError(t, err) + require.NotNil(t, jsTable) + require.Equal(t, jsTable.Version, originVersion) + + // assert historical stats non-change after drop index + tk.MustExec("alter table t drop index idx") + h.GCStats(is, 0) + snapshot = oracle.GoTimeToTS(time.Now()) + jsTable, err = h.DumpHistoricalStatsBySnapshot("test", tableInfo.Meta(), snapshot) + require.NoError(t, err) + require.NotNil(t, jsTable) + require.Equal(t, jsTable.Version, originVersion) +} + func TestGCOutdatedHistoryStats(t *testing.T) { + failpoint.Enable("github.com/pingcap/tidb/domain/sendHistoricalStats", "return(true)") + defer failpoint.Disable("github.com/pingcap/tidb/domain/sendHistoricalStats") store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) tk.MustExec("set global tidb_enable_historical_stats = 1") @@ -219,6 +274,8 @@ func TestGCOutdatedHistoryStats(t *testing.T) { } func TestPartitionTableHistoricalStats(t *testing.T) { + failpoint.Enable("github.com/pingcap/tidb/domain/sendHistoricalStats", "return(true)") + defer failpoint.Disable("github.com/pingcap/tidb/domain/sendHistoricalStats") store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) tk.MustExec("set global tidb_enable_historical_stats = 1") @@ -246,6 +303,8 @@ PARTITION p0 VALUES LESS THAN (6) } func TestDumpHistoricalStatsByTable(t *testing.T) { + failpoint.Enable("github.com/pingcap/tidb/domain/sendHistoricalStats", "return(true)") + defer failpoint.Disable("github.com/pingcap/tidb/domain/sendHistoricalStats") store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) tk.MustExec("set global tidb_enable_historical_stats = 1") diff --git a/executor/infoschema_cluster_table_test.go b/executor/infoschema_cluster_table_test.go index be2f04cb5c6ac..b1a6d4c57f4f8 100644 --- a/executor/infoschema_cluster_table_test.go +++ b/executor/infoschema_cluster_table_test.go @@ -290,7 +290,7 @@ func TestTableStorageStats(t *testing.T) { "test 2", )) rows := tk.MustQuery("select TABLE_NAME from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql';").Rows() - result := 45 + result := 46 require.Len(t, rows, result) // More tests about the privileges. diff --git a/executor/load_stats.go b/executor/load_stats.go index d59651e29200a..e292eb817519d 100644 --- a/executor/load_stats.go +++ b/executor/load_stats.go @@ -52,7 +52,7 @@ func (k loadStatsVarKeyType) String() string { const LoadStatsVarKey loadStatsVarKeyType = 0 // Next implements the Executor Next interface. -func (e *LoadStatsExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *LoadStatsExec) Next(_ context.Context, req *chunk.Chunk) error { req.GrowAndReset(e.maxChunkSize) if len(e.info.Path) == 0 { return errors.New("Load Stats: file path is empty") @@ -72,7 +72,7 @@ func (e *LoadStatsExec) Close() error { } // Open implements the Executor Open interface. -func (e *LoadStatsExec) Open(ctx context.Context) error { +func (e *LoadStatsExec) Open(_ context.Context) error { return nil } diff --git a/executor/lock_stats.go b/executor/lock_stats.go index 540670e8119fc..ccf6df123db07 100644 --- a/executor/lock_stats.go +++ b/executor/lock_stats.go @@ -44,7 +44,7 @@ func (k lockStatsVarKeyType) String() string { const LockStatsVarKey lockStatsVarKeyType = 0 // Next implements the Executor Next interface. -func (e *LockStatsExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *LockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { do := domain.GetDomain(e.ctx) is := do.InfoSchema() h := do.StatsHandle() @@ -87,7 +87,7 @@ func (e *LockStatsExec) Close() error { } // Open implements the Executor Open interface. -func (e *LockStatsExec) Open(ctx context.Context) error { +func (e *LockStatsExec) Open(_ context.Context) error { return nil } @@ -109,7 +109,7 @@ func (k unlockStatsVarKeyType) String() string { const UnlockStatsVarKey unlockStatsVarKeyType = 0 // Next implements the Executor Next interface. -func (e *UnlockStatsExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *UnlockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { do := domain.GetDomain(e.ctx) is := do.InfoSchema() h := do.StatsHandle() @@ -152,6 +152,6 @@ func (e *UnlockStatsExec) Close() error { } // Open implements the Executor Open interface. -func (e *UnlockStatsExec) Open(ctx context.Context) error { +func (e *UnlockStatsExec) Open(_ context.Context) error { return nil } diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index 3c1530ed714cf..a299438da0777 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -49,6 +49,7 @@ import ( "golang.org/x/exp/slices" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) const clusterLogBatchSize = 256 @@ -431,7 +432,7 @@ func (e *clusterLogRetriever) startRetrieving( serversInfo []infoschema.ServerInfo, req *diagnosticspb.SearchLogRequest) ([]chan logStreamResult, error) { // gRPC options - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) security := config.GetGlobalConfig().Security if len(security.ClusterSSLCA) != 0 { clusterSecurity := security.ClusterSecurity() diff --git a/executor/memtable_reader_test.go b/executor/memtable_reader_test.go index 870a4193fb3b2..f6d98d4ec24fc 100644 --- a/executor/memtable_reader_test.go +++ b/executor/memtable_reader_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/fn" "github.com/pingcap/sysutil" + "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/util/pdapi" pmodel "github.com/prometheus/common/model" @@ -56,7 +57,7 @@ func TestMetricTableData(t *testing.T) { } matrix = append(matrix, &pmodel.SampleStream{Metric: metric, Values: []pmodel.SamplePair{v1}}) - ctx := context.WithValue(context.Background(), "__mockMetricsPromData", matrix) + ctx := context.WithValue(context.Background(), executor.MockMetricsPromDataKey{}, matrix) ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool { return fpname == fpName }) diff --git a/executor/metrics_reader.go b/executor/metrics_reader.go index 314616785d60f..d9e0bd39f1128 100644 --- a/executor/metrics_reader.go +++ b/executor/metrics_reader.go @@ -89,9 +89,12 @@ func (e *MetricRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) return totalRows, nil } +// MockMetricsPromDataKey is for test +type MockMetricsPromDataKey struct{} + func (e *MetricRetriever) queryMetric(ctx context.Context, sctx sessionctx.Context, queryRange promv1.Range, quantile float64) (result pmodel.Value, err error) { failpoint.InjectContext(ctx, "mockMetricsPromData", func() { - failpoint.Return(ctx.Value("__mockMetricsPromData").(pmodel.Matrix), nil) + failpoint.Return(ctx.Value(MockMetricsPromDataKey{}).(pmodel.Matrix), nil) }) // Add retry to avoid network error. diff --git a/executor/seqtest/prepared_test.go b/executor/seqtest/prepared_test.go index 12a99a7b90ca7..5811a386f137d 100644 --- a/executor/seqtest/prepared_test.go +++ b/executor/seqtest/prepared_test.go @@ -334,7 +334,7 @@ func TestPrepareWithAggregation(t *testing.T) { tk.MustExec(fmt.Sprintf(`set @@tidb_enable_prepared_plan_cache=%v`, flag)) se, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk.SetSession(se) @@ -599,7 +599,7 @@ func TestPrepareDealloc(t *testing.T) { tk.MustExec(`set @@tidb_enable_prepared_plan_cache=true`) se, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: plannercore.NewLRUPlanCache(3, 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: plannercore.NewLRUPlanCache(3, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk.SetSession(se) @@ -806,3 +806,63 @@ func TestIssue38323(t *testing.T) { tk.MustExec("set @a = 1;") tk.MustQuery("execute stmt using @a, @a").Check(tk.MustQuery("explain select * from t where 1 = id and 1 = k group by id, k").Rows()) } + +func TestSetPlanCacheLimitSwitch(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustQuery("select @@session.tidb_enable_plan_cache_for_param_limit").Check(testkit.Rows("1")) + tk.MustQuery("select @@global.tidb_enable_plan_cache_for_param_limit").Check(testkit.Rows("1")) + + tk.MustExec("set @@session.tidb_enable_plan_cache_for_param_limit = OFF;") + tk.MustQuery("select @@session.tidb_enable_plan_cache_for_param_limit").Check(testkit.Rows("0")) + + tk.MustExec("set @@session.tidb_enable_plan_cache_for_param_limit = 1;") + tk.MustQuery("select @@session.tidb_enable_plan_cache_for_param_limit").Check(testkit.Rows("1")) + + tk.MustExec("set @@global.tidb_enable_plan_cache_for_param_limit = off;") + tk.MustQuery("select @@global.tidb_enable_plan_cache_for_param_limit").Check(testkit.Rows("0")) + + tk.MustExec("set @@global.tidb_enable_plan_cache_for_param_limit = ON;") + tk.MustQuery("select @@global.tidb_enable_plan_cache_for_param_limit").Check(testkit.Rows("1")) +} + +func TestPlanCacheLimitSwitchEffective(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, key(a))") + + checkIfCached := func(res string) { + tk.MustExec("set @a = 1") + tk.MustExec("execute stmt using @a") + tk.MustExec("execute stmt using @a") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows(res)) + } + + // before prepare + tk.MustExec("set @@session.tidb_enable_plan_cache_for_param_limit = OFF") + tk.MustExec("prepare stmt from 'select * from t limit ?'") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip plan-cache: query has 'limit ?' is un-cacheable")) + checkIfCached("0") + tk.MustExec("deallocate prepare stmt") + + // after prepare + tk.MustExec("set @@session.tidb_enable_plan_cache_for_param_limit = ON") + tk.MustExec("prepare stmt from 'select * from t limit ?'") + tk.MustExec("set @@session.tidb_enable_plan_cache_for_param_limit = OFF") + checkIfCached("0") + tk.MustExec("execute stmt using @a") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip plan-cache: the switch 'tidb_enable_plan_cache_for_param_limit' is off")) + tk.MustExec("deallocate prepare stmt") + + // after execute + tk.MustExec("set @@session.tidb_enable_plan_cache_for_param_limit = ON") + tk.MustExec("prepare stmt from 'select * from t limit ?'") + checkIfCached("1") + tk.MustExec("set @@session.tidb_enable_plan_cache_for_param_limit = OFF") + checkIfCached("0") + tk.MustExec("deallocate prepare stmt") +} diff --git a/executor/set_test.go b/executor/set_test.go index 1b2b4186bb4a3..01a2fc7979efc 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -645,7 +645,7 @@ func TestSetVar(t *testing.T) { tk.MustQuery("select @@tidb_enable_tso_follower_proxy").Check(testkit.Rows("0")) require.Error(t, tk.ExecToErr("set tidb_enable_tso_follower_proxy = 1")) - tk.MustQuery("select @@tidb_enable_historical_stats").Check(testkit.Rows("0")) + tk.MustQuery("select @@tidb_enable_historical_stats").Check(testkit.Rows("1")) tk.MustExec("set global tidb_enable_historical_stats = 1") tk.MustQuery("select @@tidb_enable_historical_stats").Check(testkit.Rows("1")) tk.MustExec("set global tidb_enable_historical_stats = 0") diff --git a/executor/show.go b/executor/show.go index 36a9b8485822b..130f743d914bb 100644 --- a/executor/show.go +++ b/executor/show.go @@ -69,7 +69,6 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/sem" "github.com/pingcap/tidb/util/set" - "github.com/pingcap/tidb/util/slice" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/stringutil" "golang.org/x/exp/slices" @@ -317,7 +316,10 @@ func (e *ShowExec) fetchShowBind() error { } else { tmp = domain.GetDomain(e.ctx).BindHandle().GetAllBindRecord() } - bindRecords := slice.Copy(tmp) + bindRecords := make([]*bindinfo.BindRecord, 0) + for _, bindRecord := range tmp { + bindRecords = append(bindRecords, bindRecord.Copy()) + } // Remove the invalid bindRecord. ind := 0 for _, bindData := range bindRecords { diff --git a/executor/slow_query.go b/executor/slow_query.go index b83a480f85857..e8a2731a476e7 100644 --- a/executor/slow_query.go +++ b/executor/slow_query.go @@ -52,6 +52,8 @@ import ( "golang.org/x/exp/slices" ) +type signalsKey struct{} + // ParseSlowLogBatchSize is the batch size of slow-log lines for a worker to parse, exported for testing. var ParseSlowLogBatchSize = 64 @@ -474,7 +476,7 @@ func (e *slowQueryRetriever) parseSlowLog(ctx context.Context, sctx sessionctx.C } failpoint.Inject("mockReadSlowLogSlow", func(val failpoint.Value) { if val.(bool) { - signals := ctx.Value("signals").([]chan int) + signals := ctx.Value(signalsKey{}).([]chan int) signals[0] <- 1 <-signals[1] } diff --git a/executor/slow_query_test.go b/executor/slow_query_test.go index d696afa3c945d..fe2a5b68a329a 100644 --- a/executor/slow_query_test.go +++ b/executor/slow_query_test.go @@ -666,7 +666,7 @@ select * from t;` retriever, err := newSlowQueryRetriever() require.NoError(t, err) var signal1, signal2 = make(chan int, 1), make(chan int, 1) - ctx := context.WithValue(context.Background(), "signals", []chan int{signal1, signal2}) + ctx := context.WithValue(context.Background(), signalsKey{}, []chan int{signal1, signal2}) ctx, cancel := context.WithCancel(ctx) err = failpoint.Enable("github.com/pingcap/tidb/executor/mockReadSlowLogSlow", "return(true)") require.NoError(t, err) diff --git a/executor/tiflashtest/tiflash_test.go b/executor/tiflashtest/tiflash_test.go index e8cd94d889188..fec246a5c5057 100644 --- a/executor/tiflashtest/tiflash_test.go +++ b/executor/tiflashtest/tiflash_test.go @@ -1270,7 +1270,7 @@ func TestDisaggregatedTiFlash(t *testing.T) { tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") err = tk.ExecToErr("select * from t;") - require.Contains(t, err.Error(), "Please check tiflash_compute node is available") + require.Contains(t, err.Error(), "tiflash_compute node is unavailable") config.UpdateGlobal(func(conf *config.Config) { conf.DisaggregatedTiFlash = false @@ -1304,9 +1304,6 @@ func TestDisaggregatedTiFlashQuery(t *testing.T) { require.NoError(t, err) tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") - needCheckTiFlashComputeNode := "false" - failpoint.Enable("github.com/pingcap/tidb/planner/core/testDisaggregatedTiFlashQuery", fmt.Sprintf("return(%s)", needCheckTiFlashComputeNode)) - defer failpoint.Disable("github.com/pingcap/tidb/planner/core/testDisaggregatedTiFlashQuery") tk.MustExec("explain select max( tbl_1.col_1 ) as r0 , sum( tbl_1.col_1 ) as r1 , sum( tbl_1.col_8 ) as r2 from tbl_1 where tbl_1.col_8 != 68 or tbl_1.col_3 between null and 939 order by r0,r1,r2;") tk.MustExec("set @@tidb_partition_prune_mode = 'static';") diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go index dec5d06983679..bcb27a1233da7 100644 --- a/expression/builtin_compare.go +++ b/expression/builtin_compare.go @@ -1565,33 +1565,17 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express arg0Type, arg1Type := args[0].GetType(), args[1].GetType() arg0IsInt := arg0Type.EvalType() == types.ETInt arg1IsInt := arg1Type.EvalType() == types.ETInt - arg0IsString := arg0Type.EvalType() == types.ETString - arg1IsString := arg1Type.EvalType() == types.ETString arg0, arg0IsCon := args[0].(*Constant) arg1, arg1IsCon := args[1].(*Constant) isExceptional, finalArg0, finalArg1 := false, args[0], args[1] isPositiveInfinite, isNegativeInfinite := false, false - if MaybeOverOptimized4PlanCache(ctx, args) { - // To keep the result be compatible with MySQL, refine `int non-constant str constant` - // here and skip this refine operation in all other cases for safety. - if (arg0IsInt && !arg0IsCon && arg1IsString && arg1IsCon) || (arg1IsInt && !arg1IsCon && arg0IsString && arg0IsCon) { - var reason error - if arg1IsString { - reason = errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg1.String()) - } else { // arg0IsString - reason = errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg0.String()) - } - ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(reason) - RemoveMutableConst(ctx, args) - } else { - return args - } - } else if !ctx.GetSessionVars().StmtCtx.UseCache { - // We should remove the mutable constant for correctness, because its value may be changed. - RemoveMutableConst(ctx, args) - } // int non-constant [cmp] non-int constant if arg0IsInt && !arg0IsCon && !arg1IsInt && arg1IsCon { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg1}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg1.String())) + RemoveMutableConst(ctx, args) + } + arg1, isExceptional = RefineComparedConstant(ctx, *arg0Type, arg1, c.op) // Why check not null flag // eg: int_col > const_val(which is less than min_int32) @@ -1619,6 +1603,11 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express } // non-int constant [cmp] int non-constant if arg1IsInt && !arg1IsCon && !arg0IsInt && arg0IsCon { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg0}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg0.String())) + RemoveMutableConst(ctx, args) + } + arg0, isExceptional = RefineComparedConstant(ctx, *arg1Type, arg0, symmetricOp[c.op]) if !isExceptional || (isExceptional && mysql.HasNotNullFlag(arg1Type.GetFlag())) { finalArg0 = arg0 @@ -1636,6 +1625,11 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express } // int constant [cmp] year type if arg0IsCon && arg0IsInt && arg1Type.GetType() == mysql.TypeYear && !arg0.Value.IsNull() { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg0}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to YEAR", arg0.String())) + RemoveMutableConst(ctx, args) + } + adjusted, failed := types.AdjustYear(arg0.Value.GetInt64(), false) if failed == nil { arg0.Value.SetInt64(adjusted) @@ -1644,6 +1638,11 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express } // year type [cmp] int constant if arg1IsCon && arg1IsInt && arg0Type.GetType() == mysql.TypeYear && !arg1.Value.IsNull() { + if MaybeOverOptimized4PlanCache(ctx, []Expression{arg1}) { + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to YEAR", arg1.String())) + RemoveMutableConst(ctx, args) + } + adjusted, failed := types.AdjustYear(arg1.Value.GetInt64(), false) if failed == nil { arg1.Value.SetInt64(adjusted) diff --git a/expression/collation.go b/expression/collation.go index eebab0aa5bc1f..8b11d3198a40e 100644 --- a/expression/collation.go +++ b/expression/collation.go @@ -299,14 +299,6 @@ func deriveCollation(ctx sessionctx.Context, funcName string, args []Expression, return ec, nil } -// DeriveCollationFromExprs derives collation information from these expressions. -// Deprecated, use CheckAndDeriveCollationFromExprs instead. -// TODO: remove this function after the all usage is replaced by CheckAndDeriveCollationFromExprs -func DeriveCollationFromExprs(ctx sessionctx.Context, exprs ...Expression) (dstCharset, dstCollation string) { - collation := inferCollation(exprs...) - return collation.Charset, collation.Collation -} - // CheckAndDeriveCollationFromExprs derives collation information from these expressions, return error if derives collation error. func CheckAndDeriveCollationFromExprs(ctx sessionctx.Context, funcName string, evalType types.EvalType, args ...Expression) (et *ExprCollation, err error) { ec := inferCollation(args...) diff --git a/expression/integration_serial_test.go b/expression/integration_serial_test.go index b70b7be4a5070..c50aa687659a9 100644 --- a/expression/integration_serial_test.go +++ b/expression/integration_serial_test.go @@ -3790,7 +3790,7 @@ func TestPreparePlanCacheOnCachedTable(t *testing.T) { var err error se, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk.SetSession(se) diff --git a/expression/integration_test.go b/expression/integration_test.go index a49df593d201c..5555de7e0aa62 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -7893,3 +7893,12 @@ func TestIssue39146(t *testing.T) { tk.MustExec("set @@tidb_enable_vectorized_expression = off;") tk.MustQuery(`select str_to_date(substr(dest,1,6),'%H%i%s') from sun;`).Check(testkit.Rows("20:23:10")) } + +func TestIssue40536(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("CREATE TABLE `6bf9e76d-ab44-4031-8a07-418b10741580` (\n `e0b5f703-6cfe-49b4-bc21-16a6455e43a7` set('7','va','ung60','ow','1g','gxwz5','uhnh','k','5la1','q8d9c','1f') NOT NULL DEFAULT '7,1g,uhnh,5la1,q8d9c',\n `fbc3527f-9617-4b9d-a5dc-4be31c00d8a5` datetime DEFAULT '6449-09-28 14:39:04',\n PRIMARY KEY (`e0b5f703-6cfe-49b4-bc21-16a6455e43a7`) /*T![clustered_index] CLUSTERED */\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;") + tk.MustExec("CREATE TABLE `8919f3f4-25be-4a1a-904a-bb5e863d8fc8` (\n `9804d5f2-cbc7-43b7-b241-ea2656dc941a` enum('s951','36d','ua65','49yru','6l2em','4ea','jf2d2','vprsc','3yl7n','hz','ov') DEFAULT '4ea',\n `323cdbcb-0c14-4362-90ab-ea42caaed6a5` year(4) NOT NULL DEFAULT '1983',\n `b9b70f39-1a02-4114-9d7d-fa6259c1b691` time DEFAULT '20:18:04',\n PRIMARY KEY (`323cdbcb-0c14-4362-90ab-ea42caaed6a5`) /*T![clustered_index] CLUSTERED */,\n KEY `a704d6bb-772b-44ea-8cb0-6f7491c1aaa6` (`323cdbcb-0c14-4362-90ab-ea42caaed6a5`,`9804d5f2-cbc7-43b7-b241-ea2656dc941a`)\n) ENGINE=InnoDB DEFAULT CHARSET=ascii COLLATE=ascii_bin;") + tk.MustExec("delete from `6bf9e76d-ab44-4031-8a07-418b10741580` where not( `6bf9e76d-ab44-4031-8a07-418b10741580`.`e0b5f703-6cfe-49b4-bc21-16a6455e43a7` in ( select `9804d5f2-cbc7-43b7-b241-ea2656dc941a` from `8919f3f4-25be-4a1a-904a-bb5e863d8fc8` where `6bf9e76d-ab44-4031-8a07-418b10741580`.`e0b5f703-6cfe-49b4-bc21-16a6455e43a7` in ( '1f' ) and `6bf9e76d-ab44-4031-8a07-418b10741580`.`e0b5f703-6cfe-49b4-bc21-16a6455e43a7` in ( '1g' ,'va' ,'uhnh' ) ) ) and not( IsNull( `6bf9e76d-ab44-4031-8a07-418b10741580`.`e0b5f703-6cfe-49b4-bc21-16a6455e43a7` ) );\n") +} diff --git a/expression/util.go b/expression/util.go index 3f4b826239a1d..929dce489791d 100644 --- a/expression/util.go +++ b/expression/util.go @@ -415,7 +415,6 @@ func ColumnSubstituteImpl(expr Expression, schema *Schema, newExprs []Expression if v.InOperand { newExpr = SetExprColumnInOperand(newExpr) } - newExpr.SetCoercibility(v.Coercibility()) return true, false, newExpr case *ScalarFunction: substituted := false @@ -438,7 +437,11 @@ func ColumnSubstituteImpl(expr Expression, schema *Schema, newExprs []Expression // cowExprRef is a copy-on-write util, args array allocation happens only // when expr in args is changed refExprArr := cowExprRef{v.GetArgs(), nil} - _, coll := DeriveCollationFromExprs(v.GetCtx(), v.GetArgs()...) + oldCollEt, err := CheckAndDeriveCollationFromExprs(v.GetCtx(), v.FuncName.L, v.RetType.EvalType(), v.GetArgs()...) + if err != nil { + logutil.BgLogger().Error("Unexpected error happened during ColumnSubstitution", zap.Stack("stack")) + return false, false, v + } var tmpArgForCollCheck []Expression if collate.NewCollationEnabled() { tmpArgForCollCheck = make([]Expression, len(v.GetArgs())) @@ -454,9 +457,18 @@ func ColumnSubstituteImpl(expr Expression, schema *Schema, newExprs []Expression changed = false copy(tmpArgForCollCheck, refExprArr.Result()) tmpArgForCollCheck[idx] = newFuncExpr - _, newColl := DeriveCollationFromExprs(v.GetCtx(), tmpArgForCollCheck...) - if coll == newColl { - changed = checkCollationStrictness(coll, newFuncExpr.GetType().GetCollate()) + newCollEt, err := CheckAndDeriveCollationFromExprs(v.GetCtx(), v.FuncName.L, v.RetType.EvalType(), tmpArgForCollCheck...) + if err != nil { + logutil.BgLogger().Error("Unexpected error happened during ColumnSubstitution", zap.Stack("stack")) + return false, failed, v + } + if oldCollEt.Collation == newCollEt.Collation { + if newFuncExpr.GetType().GetCollate() == arg.GetType().GetCollate() && newFuncExpr.Coercibility() == arg.Coercibility() { + // It's safe to use the new expression, otherwise some cases in projection push-down will be wrong. + changed = true + } else { + changed = checkCollationStrictness(oldCollEt.Collation, newFuncExpr.GetType().GetCollate()) + } } } hasFail = hasFail || failed || oldChanged != changed diff --git a/go.mod b/go.mod index a11045833f165..88bc1f0fd6e53 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/Shopify/sarama v1.29.0 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 + github.com/apache/skywalking-eyes v0.4.0 github.com/ashanbrown/makezero v1.1.1 github.com/aws/aws-sdk-go v1.44.48 github.com/blacktear23/go-proxyprotocol v1.0.2 @@ -86,6 +87,7 @@ require ( github.com/soheilhy/cmux v0.1.5 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 + github.com/spkg/bom v1.0.0 github.com/stathat/consistent v1.0.0 github.com/stretchr/testify v1.8.1 github.com/tdakkota/asciicheck v0.1.1 @@ -119,7 +121,7 @@ require ( golang.org/x/term v0.4.0 golang.org/x/text v0.6.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.2.0 + golang.org/x/tools v0.5.0 google.golang.org/api v0.103.0 google.golang.org/grpc v1.51.0 gopkg.in/yaml.v2 v2.4.0 @@ -137,12 +139,16 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1 // indirect github.com/DataDog/zstd v1.4.5 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.2 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bmatcuk/doublestar/v2 v2.0.4 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect @@ -169,6 +175,7 @@ require ( github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/licensecheck v0.3.1 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect @@ -179,6 +186,8 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/huandu/xstrings v1.3.1 // indirect + github.com/imdario/mergo v0.3.11 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -201,6 +210,8 @@ require ( github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect @@ -221,10 +232,12 @@ require ( github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect diff --git a/go.sum b/go.sum index 8bfacd2bddec7..d74e95aeb8783 100644 --- a/go.sum +++ b/go.sum @@ -409,8 +409,14 @@ github.com/Jeffail/gabs/v2 v2.5.1 h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk github.com/Jeffail/gabs/v2 v2.5.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= @@ -436,6 +442,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 h1:Q/yk4z/cHUVZfgTqtD09qeYBxHwshQAjVRX73qs8UH0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/skywalking-eyes v0.4.0 h1:O13kdRU6FCEZevfD01mdhTgCZLLfPZIQ0GXZrLl7FpQ= +github.com/apache/skywalking-eyes v0.4.0/go.mod h1:WblDbBgOLsLN0FJEBa9xj6PhuUA/J6spKYVTG4/F8Ls= github.com/apache/thrift v0.0.0-20181112125854-24918abba929/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 h1:Jz3KVLYY5+JO7rDiX0sAuRGtuv2vG01r17Y9nLMWNUw= github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -463,6 +471,8 @@ github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blacktear23/go-proxyprotocol v1.0.2 h1:zR7PZeoU0wAkElcIXenFiy3R56WB6A+UEVi4c6RH8wo= github.com/blacktear23/go-proxyprotocol v1.0.2/go.mod h1:FSCbgnRZrQXazBLL5snfBbrcFSMtcmUDhSRb9OfFA1o= +github.com/bmatcuk/doublestar/v2 v2.0.4 h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI= +github.com/bmatcuk/doublestar/v2 v2.0.4/go.mod h1:QMmcs3H2AUQICWhfzLXz+IYln8lRQmTZRptLie8RgRw= github.com/carlmjohnson/flagext v0.21.0 h1:/c4uK3ie786Z7caXLcIMvePNSSiH3bQVGDvmGLMme60= github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac= github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= @@ -543,6 +553,7 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -618,8 +629,8 @@ github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -753,10 +764,14 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/licensecheck v0.3.1 h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs= +github.com/google/licensecheck v0.3.1/go.mod h1:ORkR35t/JjW+emNKtfJDII0zlciG9JgbT7SmsohlHmY= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -781,6 +796,7 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20211122183932-1daafda22083 h1:c8EUapQFi+kjzedr4c6WqbwMdmB95+oDBWZ5XFHFYxY= github.com/google/pprof v0.0.0-20211122183932-1daafda22083/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -866,12 +882,16 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= @@ -1023,6 +1043,8 @@ github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/le github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.10/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -1031,6 +1053,8 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -1210,6 +1234,8 @@ github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8 github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4c5eA= github.com/shirou/gopsutil/v3 v3.22.9/go.mod h1:bBYl1kjgEJpWpxeHmLI+dVHWtyAwfcmSBLDsp2TNT8A= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= @@ -1223,6 +1249,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1237,8 +1264,12 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -1247,6 +1278,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spkg/bom v1.0.0 h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64= +github.com/spkg/bom v1.0.0/go.mod h1:lAz2VbTuYNcvs7iaFF8WW0ufXrHShJ7ck1fYFFbVXJs= github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U= github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1423,6 +1456,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1430,6 +1464,7 @@ golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1539,6 +1574,7 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220517181318-183a9ca12b87/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1680,6 +1716,7 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1796,8 +1833,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2080,6 +2117,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/infoschema/BUILD.bazel b/infoschema/BUILD.bazel index a92608a8df1b3..f827394be7904 100644 --- a/infoschema/BUILD.bazel +++ b/infoschema/BUILD.bazel @@ -56,6 +56,7 @@ go_library( "@com_github_tikv_client_go_v2//tikv", "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], diff --git a/infoschema/cluster_tables_test.go b/infoschema/cluster_tables_test.go index 781f52bebe519..ce73bb726ae4c 100644 --- a/infoschema/cluster_tables_test.go +++ b/infoschema/cluster_tables_test.go @@ -1053,6 +1053,8 @@ func TestSetBindingStatusBySQLDigest(t *testing.T) { tk.MustExec(sql) tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1")) tk.MustGetErrMsg("set binding enabled for sql digest '2'", "can't find any binding for '2'") + tk.MustGetErrMsg("set binding enabled for sql digest ''", "sql digest is empty") + tk.MustGetErrMsg("set binding disabled for sql digest ''", "sql digest is empty") } func TestCreateBindingWhenCloseStmtSummaryTable(t *testing.T) { diff --git a/infoschema/tables.go b/infoschema/tables.go index 35b199be043cd..d10410d01840a 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -58,6 +58,7 @@ import ( "golang.org/x/exp/slices" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) const ( @@ -2338,7 +2339,7 @@ func serverInfoItemToRows(items []*diagnosticspb.ServerInfoItem, tp, addr string } func getServerInfoByGRPC(ctx context.Context, address string, tp diagnosticspb.ServerInfoType) ([]*diagnosticspb.ServerInfoItem, error) { - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) security := config.GetGlobalConfig().Security if len(security.ClusterSSLCA) != 0 { clusterSecurity := security.ClusterSecurity() diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index e2f04758dda6e..f796345bbd8e7 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -56,8 +56,7 @@ func newTestKitWithRoot(t *testing.T, store kv.Storage) *testkit.TestKit { func newTestKitWithPlanCache(t *testing.T, store kv.Storage) *testkit.TestKit { tk := testkit.NewTestKit(t, store) - se, err := session.CreateSession4TestWithOpt(store, &session.Opt{PreparedPlanCache: plannercore.NewLRUPlanCache(100, - 0.1, math.MaxUint64, plannercore.PickPlanFromBucket, tk.Session())}) + se, err := session.CreateSession4TestWithOpt(store, &session.Opt{PreparedPlanCache: plannercore.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session())}) require.NoError(t, err) tk.SetSession(se) tk.RefreshConnectionID() diff --git a/kv/option.go b/kv/option.go index 80d0f7792f172..d779ff61ac215 100644 --- a/kv/option.go +++ b/kv/option.go @@ -15,6 +15,8 @@ package kv import ( + "context" + "github.com/tikv/client-go/v2/util" ) @@ -136,6 +138,15 @@ type RequestSource = util.RequestSource // WithInternalSourceType create context with internal source. var WithInternalSourceType = util.WithInternalSourceType +// GetInternalSourceType get internal source +func GetInternalSourceType(ctx context.Context) string { + v := ctx.Value(util.RequestSourceKey) + if v == nil { + return "" + } + return v.(util.RequestSource).RequestSourceType +} + const ( // InternalTxnOthers is the type of requests that consume low resources. // This reduces the size of metrics. diff --git a/meta/autoid/BUILD.bazel b/meta/autoid/BUILD.bazel index b67f7f7c223c7..d6bcc1ef94689 100644 --- a/meta/autoid/BUILD.bazel +++ b/meta/autoid/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "@io_etcd_go_etcd_client_v3//:client", "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_uber_go_zap//:zap", ], ) diff --git a/meta/autoid/autoid_service.go b/meta/autoid/autoid_service.go index 0b0f4946b3d0c..314ac3beef679 100644 --- a/meta/autoid/autoid_service.go +++ b/meta/autoid/autoid_service.go @@ -30,6 +30,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) var _ Allocator = &singlePointAlloc{} @@ -83,7 +84,7 @@ func (d *clientDiscover) GetClient(ctx context.Context) (autoid.AutoIDAllocClien } addr := string(resp.Kvs[0].Value) - opt := grpc.WithInsecure() + opt := grpc.WithTransportCredentials(insecure.NewCredentials()) security := config.GetGlobalConfig().Security if len(security.ClusterSSLCA) != 0 { clusterSecurity := security.ClusterSecurity() diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index 25042b63a8bab..f8b125746a804 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -18049,6 +18049,108 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "The TTL task statuses in each worker", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 100 + }, + "hiddenSeries": false, + "id": 294, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "running", + "color": "#5794F2" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(tidb_server_ttl_task_status{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}) by (type, instance)", + "interval": "", + "legendFormat": "{{ instance }} {{ type }}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "TTL Task Count By Status", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "title": "TTL", diff --git a/metrics/grafana/tidb_runtime.json b/metrics/grafana/tidb_runtime.json index 0324f6851de98..d17b1d90a104e 100644 --- a/metrics/grafana/tidb_runtime.json +++ b/metrics/grafana/tidb_runtime.json @@ -1384,6 +1384,131 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "the time goroutines have spent in the scheduler", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 57 + }, + "id": 30, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.999, sum(rate(go_sched_latencies_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[30s])) by (le, instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "999", + "refId": "A", + "step": 10 + }, + { + "expr": "histogram_quantile(0.9999, sum(rate(go_sched_latencies_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[30s])) by (le, instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "9999", + "refId": "B", + "step": 10 + }, + { + "expr": "histogram_quantile(0.99999, sum(rate(go_sched_latencies_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[30s])) by (le, instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "99999", + "refId": "C", + "step": 10 + }, + { + "expr": "histogram_quantile(0.999999, sum(rate(go_sched_latencies_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[30s])) by (le, instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "999999", + "refId": "D", + "step": 10 + }, + { + "expr": "histogram_quantile(1, sum(rate(go_sched_latencies_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[30s])) by (le, instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "max", + "refId": "E", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "goroutine scheduler lattency", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "repeat": "instance", diff --git a/metrics/metrics.go b/metrics/metrics.go index 633aa551564bc..68a2729f3483c 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -214,6 +214,7 @@ func RegisterMetrics() { prometheus.MustRegister(TTLQueryDuration) prometheus.MustRegister(TTLProcessedExpiredRowsCounter) prometheus.MustRegister(TTLJobStatus) + prometheus.MustRegister(TTLTaskStatus) prometheus.MustRegister(TTLPhaseTime) prometheus.MustRegister(EMACPUUsageGauge) diff --git a/metrics/ttl.go b/metrics/ttl.go index ab7e47e615e28..754744e93d1d8 100644 --- a/metrics/ttl.go +++ b/metrics/ttl.go @@ -43,6 +43,14 @@ var ( Help: "The jobs count in the specified status", }, []string{LblType}) + TTLTaskStatus = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "tidb", + Subsystem: "server", + Name: "ttl_task_status", + Help: "The tasks count in the specified status", + }, []string{LblType}) + TTLPhaseTime = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "tidb", diff --git a/parser/parser.go b/parser/parser.go index 0412b37bce71c..5aab37d65a5ef 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -15582,19 +15582,22 @@ yynewstate: { field := yyS[yypt-0].item.(*ast.SelectField) field.Offset = parser.startOffset(&yyS[yypt]) + if field.Expr != nil && field.AsName.O == "" { + endOffset := parser.yylval.offset + field.SetText(parser.lexer.client, strings.TrimSpace(parser.src[field.Offset:endOffset])) + } parser.yyVAL.item = []*ast.SelectField{field} } case 689: { fl := yyS[yypt-2].item.([]*ast.SelectField) - last := fl[len(fl)-1] - if last.Expr != nil && last.AsName.O == "" { - lastEnd := parser.endOffset(&yyS[yypt-1]) - last.SetText(parser.lexer.client, parser.src[last.Offset:lastEnd]) + field := yyS[yypt-0].item.(*ast.SelectField) + field.Offset = parser.startOffset(&yyS[yypt]) + if field.Expr != nil && field.AsName.O == "" { + endOffset := parser.yylval.offset + field.SetText(parser.lexer.client, strings.TrimSpace(parser.src[field.Offset:endOffset])) } - newField := yyS[yypt-0].item.(*ast.SelectField) - newField.Offset = parser.startOffset(&yyS[yypt]) - parser.yyVAL.item = append(fl, newField) + parser.yyVAL.item = append(fl, field) } case 690: { @@ -17543,30 +17546,6 @@ yynewstate: if yyS[yypt-1].item != nil { st.LockInfo = yyS[yypt-1].item.(*ast.SelectLockInfo) } - lastField := st.Fields.Fields[len(st.Fields.Fields)-1] - if lastField.Expr != nil && lastField.AsName.O == "" { - src := parser.src - var lastEnd int - if yyS[yypt-5].item != nil { - lastEnd = yyS[yypt-5].offset - 1 - } else if yyS[yypt-4].item != nil { - lastEnd = yyS[yypt-4].offset - 1 - } else if yyS[yypt-3].item != nil { - lastEnd = yyS[yypt-3].offset - 1 - } else if yyS[yypt-2].item != nil { - lastEnd = yyS[yypt-2].offset - 1 - } else if st.LockInfo != nil && st.LockInfo.LockType != ast.SelectLockNone { - lastEnd = yyS[yypt-1].offset - 1 - } else if yyS[yypt-0].item != nil { - lastEnd = yyS[yypt].offset - 1 - } else { - lastEnd = len(src) - if src[lastEnd-1] == ';' { - lastEnd-- - } - } - lastField.SetText(parser.lexer.client, src[lastField.Offset:lastEnd]) - } if yyS[yypt-5].item != nil { st.Where = yyS[yypt-5].item.(ast.ExprNode) } diff --git a/parser/parser.y b/parser/parser.y index 51015c919f364..11b53153ed9e7 100644 --- a/parser/parser.y +++ b/parser/parser.y @@ -5903,19 +5903,22 @@ FieldList: { field := $1.(*ast.SelectField) field.Offset = parser.startOffset(&yyS[yypt]) + if field.Expr != nil && field.AsName.O == "" { + endOffset := parser.yylval.offset + field.SetText(parser.lexer.client, strings.TrimSpace(parser.src[field.Offset:endOffset])) + } $$ = []*ast.SelectField{field} } | FieldList ',' Field { fl := $1.([]*ast.SelectField) - last := fl[len(fl)-1] - if last.Expr != nil && last.AsName.O == "" { - lastEnd := parser.endOffset(&yyS[yypt-1]) - last.SetText(parser.lexer.client, parser.src[last.Offset:lastEnd]) + field := $3.(*ast.SelectField) + field.Offset = parser.startOffset(&yyS[yypt]) + if field.Expr != nil && field.AsName.O == "" { + endOffset := parser.yylval.offset + field.SetText(parser.lexer.client, strings.TrimSpace(parser.src[field.Offset:endOffset])) } - newField := $3.(*ast.SelectField) - newField.Offset = parser.startOffset(&yyS[yypt]) - $$ = append(fl, newField) + $$ = append(fl, field) } GroupByClause: @@ -8677,30 +8680,6 @@ SelectStmt: if $6 != nil { st.LockInfo = $6.(*ast.SelectLockInfo) } - lastField := st.Fields.Fields[len(st.Fields.Fields)-1] - if lastField.Expr != nil && lastField.AsName.O == "" { - src := parser.src - var lastEnd int - if $2 != nil { - lastEnd = yyS[yypt-5].offset - 1 - } else if $3 != nil { - lastEnd = yyS[yypt-4].offset - 1 - } else if $4 != nil { - lastEnd = yyS[yypt-3].offset - 1 - } else if $5 != nil { - lastEnd = yyS[yypt-2].offset - 1 - } else if st.LockInfo != nil && st.LockInfo.LockType != ast.SelectLockNone { - lastEnd = yyS[yypt-1].offset - 1 - } else if $7 != nil { - lastEnd = yyS[yypt].offset - 1 - } else { - lastEnd = len(src) - if src[lastEnd-1] == ';' { - lastEnd-- - } - } - lastField.SetText(parser.lexer.client, src[lastField.Offset:lastEnd]) - } if $2 != nil { st.Where = $2.(ast.ExprNode) } @@ -10795,7 +10774,7 @@ ShowStmt: | "SHOW" "CREATE" "RESOURCE" "GROUP" ResourceGroupName { $$ = &ast.ShowStmt{ - Tp: ast.ShowCreateResourceGroup, + Tp: ast.ShowCreateResourceGroup, ResourceGroupName: $5, } } diff --git a/parser/parser_test.go b/parser/parser_test.go index 2fb9b073fd398..a5dbb70d9cf67 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -7142,3 +7142,21 @@ func TestTTLTableOption(t *testing.T) { RunTest(t, table, false) } + +func TestMultiStmt(t *testing.T) { + p := parser.New() + stmts, _, err := p.Parse("SELECT 'foo'; SELECT 'foo;bar','baz'; select 'foo' , 'bar' , 'baz' ;select 1", "", "") + require.NoError(t, err) + require.Equal(t, len(stmts), 4) + stmt1 := stmts[0].(*ast.SelectStmt) + stmt2 := stmts[1].(*ast.SelectStmt) + stmt3 := stmts[2].(*ast.SelectStmt) + stmt4 := stmts[3].(*ast.SelectStmt) + require.Equal(t, "'foo'", stmt1.Fields.Fields[0].Text()) + require.Equal(t, "'foo;bar'", stmt2.Fields.Fields[0].Text()) + require.Equal(t, "'baz'", stmt2.Fields.Fields[1].Text()) + require.Equal(t, "'foo'", stmt3.Fields.Fields[0].Text()) + require.Equal(t, "'bar'", stmt3.Fields.Fields[1].Text()) + require.Equal(t, "'baz'", stmt3.Fields.Fields[2].Text()) + require.Equal(t, "1", stmt4.Fields.Fields[0].Text()) +} diff --git a/planner/core/BUILD.bazel b/planner/core/BUILD.bazel index 3afbdf3b8a0bc..12c2730c59364 100644 --- a/planner/core/BUILD.bazel +++ b/planner/core/BUILD.bazel @@ -107,7 +107,6 @@ go_library( "//sessiontxn/staleread", "//statistics", "//statistics/handle", - "//store/driver/backoff", "//table", "//table/tables", "//table/temptable", diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 205a2d8242b4b..61575810da1fb 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -1325,8 +1325,11 @@ func (ijHelper *indexJoinBuildHelper) resetContextForIndex(innerKeys []*expressi if ijHelper.curIdxOff2KeyOff[i] >= 0 { // Don't use the join columns if their collations are unmatched and the new collation is enabled. if collate.NewCollationEnabled() && types.IsString(idxCol.RetType.GetType()) && types.IsString(outerKeys[ijHelper.curIdxOff2KeyOff[i]].RetType.GetType()) { - _, coll := expression.DeriveCollationFromExprs(nil, idxCol, outerKeys[ijHelper.curIdxOff2KeyOff[i]]) - if !collate.CompatibleCollate(idxCol.GetType().GetCollate(), coll) { + et, err := expression.CheckAndDeriveCollationFromExprs(ijHelper.innerPlan.ctx, "equal", types.ETInt, idxCol, outerKeys[ijHelper.curIdxOff2KeyOff[i]]) + if err != nil { + logutil.BgLogger().Error("Unexpected error happened during constructing index join", zap.Stack("stack")) + } + if !collate.CompatibleCollate(idxCol.GetType().GetCollate(), et.Collation) { ijHelper.curIdxOff2KeyOff[i] = -1 } } diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index a2d1f242a0ff6..c7af8385d556c 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -1563,7 +1563,7 @@ func (er *expressionRewriter) inToExpression(lLen int, not bool, tp *types.Field continue // no need to refine it } er.sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to INT", c.String())) - expression.RemoveMutableConst(er.sctx, []expression.Expression{c}) + expression.RemoveMutableConst(er.sctx, args) } args[i], isExceptional = expression.RefineComparedConstant(er.sctx, *leftFt, c, opcode.EQ) if isExceptional { diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index c2d3e1a62d7bc..1ecc9f995243c 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -944,6 +944,9 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if canConvertPointGet && expression.MaybeOverOptimized4PlanCache(ds.ctx, path.AccessConds) { canConvertPointGet = ds.canConvertToPointGetForPlanCache(path) } + if canConvertPointGet && path.Index != nil && path.Index.MVIndex { + canConvertPointGet = false // cannot use PointGet upon MVIndex + } if canConvertPointGet && !path.IsIntHandlePath { // We simply do not build [batch] point get for prefix indexes. This can be optimized. @@ -2012,15 +2015,16 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid } // In disaggregated tiflash mode, only MPP is allowed, cop and batchCop is deprecated. // So if prop.TaskTp is RootTaskType, have to use mppTask then convert to rootTask. - isDisaggregatedTiFlashPath := config.GetGlobalConfig().DisaggregatedTiFlash && ts.StoreType == kv.TiFlash + isDisaggregatedTiFlash := config.GetGlobalConfig().DisaggregatedTiFlash + isDisaggregatedTiFlashPath := isDisaggregatedTiFlash && ts.StoreType == kv.TiFlash canMppConvertToRootForDisaggregatedTiFlash := isDisaggregatedTiFlashPath && prop.TaskTp == property.RootTaskType && ds.SCtx().GetSessionVars().IsMPPAllowed() if prop.TaskTp == property.MppTaskType || canMppConvertToRootForDisaggregatedTiFlash { if ts.KeepOrder { return invalidTask, nil } - if prop.MPPPartitionTp != property.AnyType || (ts.isPartition && !canMppConvertToRootForDisaggregatedTiFlash) { + if prop.MPPPartitionTp != property.AnyType || (ts.isPartition && !isDisaggregatedTiFlash) { // If ts is a single partition, then this partition table is in static-only prune, then we should not choose mpp execution. - // But in disaggregated tiflash mode, we can only use mpp, so we add ExchangeSender and ExchangeReceiver above TableScan for static pruning partition table. + // But in disaggregated tiflash mode, we enable using mpp for static pruning partition table, because cop and batchCop is deprecated. ds.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because table `" + ds.tableInfo.Name.O + "`is a partition table which is not supported when `@@tidb_partition_prune_mode=static`.") return invalidTask, nil } @@ -2049,7 +2053,11 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid // So have to return a rootTask, but prop requires mppTask, cannot meet this requirement. task = invalidTask } else if prop.TaskTp == property.RootTaskType { - // when got here, canMppConvertToRootForDisaggregatedTiFlash is true. + // When got here, canMppConvertToRootForDisaggregatedTiFlash is true. + // This is for situations like cannot generate mppTask for some operators. + // Such as when the build side of HashJoin is Projection, + // which cannot pushdown to tiflash(because TiFlash doesn't support some expr in Proj) + // So HashJoin cannot pushdown to tiflash. But we still want TableScan to run on tiflash. task = mppTask task = task.convertToRootTask(ds.ctx) if !task.invalid() { diff --git a/planner/core/indexmerge_path.go b/planner/core/indexmerge_path.go index 8a71e5d5d4f5a..27500e1d8d816 100644 --- a/planner/core/indexmerge_path.go +++ b/planner/core/indexmerge_path.go @@ -15,7 +15,6 @@ package core import ( - "fmt" "math" "strings" @@ -37,6 +36,16 @@ import ( // generateIndexMergePath generates IndexMerge AccessPaths on this DataSource. func (ds *DataSource) generateIndexMergePath() error { + var warningMsg string + stmtCtx := ds.ctx.GetSessionVars().StmtCtx + defer func() { + if len(ds.indexMergeHints) > 0 && warningMsg != "" { + ds.indexMergeHints = nil + stmtCtx.AppendWarning(errors.Errorf(warningMsg)) + logutil.BgLogger().Debug(warningMsg) + } + }() + // Consider the IndexMergePath. Now, we just generate `IndexMergePath` in DNF case. // Use allConds instread of pushedDownConds, // because we want to use IndexMerge even if some expr cannot be pushed to TiKV. @@ -46,67 +55,49 @@ func (ds *DataSource) generateIndexMergePath() error { indexMergeConds = append(indexMergeConds, expression.PushDownNot(ds.ctx, expr)) } - stmtCtx := ds.ctx.GetSessionVars().StmtCtx - isPossibleIdxMerge := len(indexMergeConds) > 0 && // have corresponding access conditions, and - (len(ds.possibleAccessPaths) > 1 || // (have multiple index paths, or - (len(ds.possibleAccessPaths) == 1 && isMVIndexPath(ds.possibleAccessPaths[0]))) // have a MVIndex) sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || len(ds.indexMergeHints) > 0) && !stmtCtx.NoIndexMergeHint - // We current do not consider `IndexMergePath`: - // 1. If there is an index path. - // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. - needConsiderIndexMerge := true - if len(ds.indexMergeHints) == 0 { - for i := 1; i < len(ds.possibleAccessPaths); i++ { - if len(ds.possibleAccessPaths[i].AccessConds) != 0 { - needConsiderIndexMerge = false - break - } - } - if needConsiderIndexMerge { - // PushDownExprs() will append extra warnings, which is annoying. So we reset warnings here. - warnings := stmtCtx.GetWarnings() - extraWarnings := stmtCtx.GetExtraWarnings() - _, remaining := expression.PushDownExprs(stmtCtx, indexMergeConds, ds.ctx.GetClient(), kv.UnSpecified) - stmtCtx.SetWarnings(warnings) - stmtCtx.SetExtraWarnings(extraWarnings) + if !sessionAndStmtPermission { + warningMsg = "IndexMerge is inapplicable or disabled. Got no_index_merge hint or tidb_enable_index_merge is off." + return nil + } - remainingExpr := 0 - for _, expr := range remaining { - // Handle these 3 functions specially since they can be used to access MVIndex. - if sf, ok := expr.(*expression.ScalarFunction); ok { - if sf.FuncName.L == ast.JSONMemberOf || sf.FuncName.L == ast.JSONOverlaps || - sf.FuncName.L == ast.JSONContains { - continue - } - } - remainingExpr++ - } - if remainingExpr > 0 { - needConsiderIndexMerge = false - } - } + if ds.tableInfo.TempTableType == model.TempTableLocal { + warningMsg = "IndexMerge is inapplicable or disabled. Cannot use IndexMerge on temporary table." + return nil } - if isPossibleIdxMerge && sessionAndStmtPermission && needConsiderIndexMerge && ds.tableInfo.TempTableType != model.TempTableLocal { - err := ds.generateAndPruneIndexMergePath(indexMergeConds, ds.indexMergeHints != nil) - if err != nil { - return err - } - } else if len(ds.indexMergeHints) > 0 { - ds.indexMergeHints = nil - var msg string - if !isPossibleIdxMerge { - msg = "No available filter or available index." - } else if !sessionAndStmtPermission { - msg = "Got no_index_merge hint or tidb_enable_index_merge is off." - } else if ds.tableInfo.TempTableType == model.TempTableLocal { - msg = "Cannot use IndexMerge on temporary table." + regularPathCount := len(ds.possibleAccessPaths) + var err error + if warningMsg, err = ds.generateIndexMerge4NormalIndex(regularPathCount, indexMergeConds); err != nil { + return err + } + if err := ds.generateIndexMerge4MVIndex(regularPathCount, indexMergeConds); err != nil { + return err + } + + // If without hints, it means that `enableIndexMerge` is true + if len(ds.indexMergeHints) == 0 { + return nil + } + // If len(indexMergeHints) > 0, then add warnings if index-merge hints cannot work. + if regularPathCount == len(ds.possibleAccessPaths) { + if warningMsg == "" { + warningMsg = "IndexMerge is inapplicable" } - msg = fmt.Sprintf("IndexMerge is inapplicable or disabled. %s", msg) - stmtCtx.AppendWarning(errors.Errorf(msg)) - logutil.BgLogger().Debug(msg) + return nil } + // If len(indexMergeHints) > 0 and some index-merge paths were added, then prune all other non-index-merge paths. + ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] + minRowCount := ds.possibleAccessPaths[0].CountAfterAccess + for _, path := range ds.possibleAccessPaths { + if minRowCount < path.CountAfterAccess { + minRowCount = path.CountAfterAccess + } + } + if ds.stats.RowCount > minRowCount { + ds.stats = ds.tableStats.ScaleByExpectCnt(minRowCount) + } return nil } @@ -441,57 +432,53 @@ func (ds *DataSource) generateIndexMergeAndPaths(normalPathCnt int) *util.Access return indexMergePath } -func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expression.Expression, needPrune bool) error { - regularPathCount := len(ds.possibleAccessPaths) +func (ds *DataSource) generateIndexMerge4NormalIndex(regularPathCount int, indexMergeConds []expression.Expression) (string, error) { + isPossibleIdxMerge := len(indexMergeConds) > 0 && // have corresponding access conditions, and + len(ds.possibleAccessPaths) > 1 // have multiple index paths + if !isPossibleIdxMerge { + return "IndexMerge is inapplicable or disabled. No available filter or available index.", nil + } + + // We current do not consider `IndexMergePath`: + // 1. If there is an index path. + // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. + stmtCtx := ds.ctx.GetSessionVars().StmtCtx + needConsiderIndexMerge := true + if len(ds.indexMergeHints) == 0 { + for i := 1; i < len(ds.possibleAccessPaths); i++ { + if len(ds.possibleAccessPaths[i].AccessConds) != 0 { + needConsiderIndexMerge = false + break + } + } + if needConsiderIndexMerge { + // PushDownExprs() will append extra warnings, which is annoying. So we reset warnings here. + warnings := stmtCtx.GetWarnings() + extraWarnings := stmtCtx.GetExtraWarnings() + _, remaining := expression.PushDownExprs(stmtCtx, indexMergeConds, ds.ctx.GetClient(), kv.UnSpecified) + stmtCtx.SetWarnings(warnings) + stmtCtx.SetExtraWarnings(extraWarnings) + if len(remaining) > 0 { + needConsiderIndexMerge = false + } + } + } + + if !needConsiderIndexMerge { + return "IndexMerge is inapplicable or disabled. ", nil // IndexMerge is inapplicable + } + // 1. Generate possible IndexMerge paths for `OR`. err := ds.generateIndexMergeOrPaths(indexMergeConds) if err != nil { - return err + return "", err } // 2. Generate possible IndexMerge paths for `AND`. indexMergeAndPath := ds.generateIndexMergeAndPaths(regularPathCount) if indexMergeAndPath != nil { ds.possibleAccessPaths = append(ds.possibleAccessPaths, indexMergeAndPath) } - // 3. Generate possible IndexMerge paths for MVIndex. - mvIndexMergePath, err := ds.generateIndexMerge4MVIndex(regularPathCount, indexMergeConds) - if err != nil { - return err - } - if mvIndexMergePath != nil { - ds.possibleAccessPaths = append(ds.possibleAccessPaths, mvIndexMergePath...) - } - - // 4. If needed, append a warning if no IndexMerge is generated. - - // If without hints, it means that `enableIndexMerge` is true - if len(ds.indexMergeHints) == 0 { - return nil - } - // With hints and without generated IndexMerge paths - if regularPathCount == len(ds.possibleAccessPaths) { - ds.indexMergeHints = nil - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable")) - return nil - } - - // 4. If needPrune is true, prune non-IndexMerge paths. - - // Do not need to consider the regular paths in find_best_task(). - // So we can use index merge's row count as DataSource's row count. - if needPrune { - ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] - minRowCount := ds.possibleAccessPaths[0].CountAfterAccess - for _, path := range ds.possibleAccessPaths { - if minRowCount < path.CountAfterAccess { - minRowCount = path.CountAfterAccess - } - } - if ds.stats.RowCount > minRowCount { - ds.stats = ds.tableStats.ScaleByExpectCnt(minRowCount) - } - } - return nil + return "", nil } // generateIndexMergeOnDNF4MVIndex generates IndexMerge paths for MVIndex upon DNF filters. @@ -582,12 +569,12 @@ func (ds *DataSource) generateIndexMergeOnDNF4MVIndex(normalPathCnt int, filters IndexRangeScan(a, [3,3]) TableRowIdScan(t) */ -func (ds *DataSource) generateIndexMerge4MVIndex(normalPathCnt int, filters []expression.Expression) (mvIndexPaths []*util.AccessPath, err error) { +func (ds *DataSource) generateIndexMerge4MVIndex(normalPathCnt int, filters []expression.Expression) error { dnfMVIndexPaths, err := ds.generateIndexMergeOnDNF4MVIndex(normalPathCnt, filters) if err != nil { - return nil, err + return err } - mvIndexPaths = append(mvIndexPaths, dnfMVIndexPaths...) + ds.possibleAccessPaths = append(ds.possibleAccessPaths, dnfMVIndexPaths...) for idx := 0; idx < normalPathCnt; idx++ { if !isMVIndexPath(ds.possibleAccessPaths[idx]) { @@ -606,15 +593,15 @@ func (ds *DataSource) generateIndexMerge4MVIndex(normalPathCnt int, filters []ex partialPaths, isIntersection, ok, err := ds.buildPartialPaths4MVIndex(accessFilters, idxCols, ds.possibleAccessPaths[idx].Index) if err != nil { - return nil, err + return err } if !ok { continue } - mvIndexPaths = append(mvIndexPaths, ds.buildPartialPathUp4MVIndex(partialPaths, isIntersection, remainingFilters)) + ds.possibleAccessPaths = append(ds.possibleAccessPaths, ds.buildPartialPathUp4MVIndex(partialPaths, isIntersection, remainingFilters)) } - return + return nil } // buildPartialPathUp4MVIndex builds these partial paths up to a complete index merge path. diff --git a/planner/core/indexmerge_path_test.go b/planner/core/indexmerge_path_test.go index 1119cfb5c666e..b1487249b0902 100644 --- a/planner/core/indexmerge_path_test.go +++ b/planner/core/indexmerge_path_test.go @@ -15,13 +15,56 @@ package core_test import ( + "context" + "fmt" + "math/rand" + "strings" "testing" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/testdata" + "github.com/stretchr/testify/require" ) +func TestAnalyzeMVIndex(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t(a int, b int, c int, j json, +index(a), index(b), +index idx(a, b, (cast(j as signed array)), c), +index idx2(a, b, (cast(j->'$.str' as char(10) array)), c))`) + + tk.MustExec("set tidb_analyze_version=2") + tk.MustExec("analyze table t") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) + tk.MustExec("analyze table t index idx") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t", + "Warning 1105 The version 2 would collect all statistics not only the selected indexes", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) + + tk.MustExec("set tidb_analyze_version=1") + tk.MustExec("analyze table t") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) + tk.MustExec("analyze table t index idx") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx")) + tk.MustExec("analyze table t index a") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows()) + tk.MustExec("analyze table t index a, idx, idx2") + tk.MustQuery("show warnings").Sort().Check(testkit.Rows( + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx", + "Warning 1105 analyzing multi-valued indexes is not supported, skip idx2")) +} + func TestIndexMergeJSONMemberOf(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -149,3 +192,202 @@ func TestMVIndexIndexMergePlanCache(t *testing.T) { tk.MustExec("execute st") tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) } + +func TestMVIndexPointGet(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t(j json, unique kj((cast(j as signed array))))`) + + for _, sql := range []string{ + "select j from t where j=1", + "select j from t where j=1 or j=2", + "select j from t where j in (1, 2)", + } { + plan := tk.MustQuery("explain " + sql).Rows() + hasPointGet := false + for _, line := range plan { + if strings.Contains(strings.ToLower(line[0].(string)), "point") { + hasPointGet = true + } + } + require.True(t, !hasPointGet) // no point-get plan + } +} + +func TestEnforceMVIndex(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t(a int, j json, index kj((cast(j as signed array))))`) + + var input []string + var output []struct { + SQL string + Plan []string + Err string + } + planSuiteData := core.GetIndexMergeSuiteData() + planSuiteData.LoadTestCases(t, &input, &output) + + for i, query := range input { + testdata.OnRecord(func() { + output[i].SQL = query + }) + rs, err := tk.Exec("explain format = 'brief' " + query) + if err != nil { + testdata.OnRecord(func() { + output[i].Err = err.Error() + output[i].Plan = nil + }) + require.Equal(t, output[i].Err, err.Error()) + } else { + result := tk.ResultSetToResultWithCtx(context.Background(), rs, "") + testdata.OnRecord(func() { + output[i].Err = "" + output[i].Plan = testdata.ConvertRowsToStrings(result.Rows()) + }) + result.Check(testkit.Rows(output[i].Plan...)) + } + } +} + +func TestMVIndexInvisible(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec(`create table t(a int, j json, index kj((cast(j as signed array))))`) + tk.MustQuery(`explain format='brief' select /*+ use_index(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows( + `Selection 8000.00 root json_memberof(cast(1, json BINARY), test.t.j)`, + `└─IndexMerge 10.00 root type: union`, + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo", + ` └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo`)) + + tk.MustExec(`ALTER TABLE t ALTER INDEX kj INVISIBLE`) + tk.MustQuery(`explain format='brief' select /*+ use_index(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows( + "Selection 8000.00 root json_memberof(cast(1, json BINARY), test.t.j)", + "└─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo")) + tk.MustQuery(`explain format='brief' select /*+ use_index_merge(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows( + "Selection 8000.00 root json_memberof(cast(1, json BINARY), test.t.j)", + "└─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo")) + + tk.MustExec(`ALTER TABLE t ALTER INDEX kj VISIBLE`) + tk.MustQuery(`explain format='brief' select /*+ use_index(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows( + `Selection 8000.00 root json_memberof(cast(1, json BINARY), test.t.j)`, + `└─IndexMerge 10.00 root type: union`, + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo", + ` └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo`)) +} + +func TestMVIndexFullScan(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec(`create table t(j json, index kj((cast(j as signed array))))`) + tk.MustExec(`insert into t values ('[1]')`) + tk.MustExec(`insert into t values ('[1, 2]')`) + tk.MustExec(`insert into t values ('[]')`) + tk.MustExec(`insert into t values (NULL)`) + + tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t`).Check(testkit.Rows("4")) + tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t where (1 member of (j))`).Check(testkit.Rows("2")) + tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t where json_contains((j), '[1]')`).Check(testkit.Rows("2")) + tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t where json_overlaps((j), '[1]')`).Check(testkit.Rows("2")) + + // Forbid IndexMerge+IndexFullScan since IndexFullScan on MVIndex cannot read all rows some cases. + tk.MustGetErrMsg(`select /*+ use_index(t, kj) */ count(*) from t`, "[planner:1815]Internal : Can't find a proper physical plan for this query") +} + +func TestMVIndexRandom(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + for _, testCase := range []struct { + indexType string + insertValOpts randMVIndexValOpts + queryValsOpts randMVIndexValOpts + }{ + {"signed", randMVIndexValOpts{"signed", 0, 3}, randMVIndexValOpts{"signed", 0, 3}}, + {"unsigned", randMVIndexValOpts{"unsigned", 0, 3}, randMVIndexValOpts{"unsigned", 0, 3}}, // unsigned-index + unsigned-values + {"char(3)", randMVIndexValOpts{"string", 3, 3}, randMVIndexValOpts{"string", 3, 3}}, + {"char(3)", randMVIndexValOpts{"string", 3, 3}, randMVIndexValOpts{"string", 1, 3}}, + {"char(3)", randMVIndexValOpts{"string", 3, 3}, randMVIndexValOpts{"string", 5, 3}}, + {"date", randMVIndexValOpts{"date", 0, 3}, randMVIndexValOpts{"date", 0, 3}}, + } { + tk.MustExec("drop table if exists t") + tk.MustExec(fmt.Sprintf(`create table t(a int, j json, index kj((cast(j as %v array))))`, testCase.indexType)) + + nRows := 20 + rows := make([]string, 0, nRows) + for i := 0; i < nRows; i++ { + va, v1, v2 := rand.Intn(testCase.insertValOpts.distinct), randMVIndexValue(testCase.insertValOpts), randMVIndexValue(testCase.insertValOpts) + if testCase.indexType == "date" { + rows = append(rows, fmt.Sprintf(`(%v, json_array(cast(%v as date), cast(%v as date)))`, va, v1, v2)) + } else { + rows = append(rows, fmt.Sprintf(`(%v, '[%v, %v]')`, va, v1, v2)) + } + } + tk.MustExec(fmt.Sprintf("insert into t values %v", strings.Join(rows, ", "))) + + nQueries := 20 + for i := 0; i < nQueries; i++ { + conds := randMVIndexConds(rand.Intn(3)+1, testCase.queryValsOpts) + r1 := tk.MustQuery("select /*+ ignore_index(t, kj) */ * from t where " + conds).Sort() + tk.MustQuery("select /*+ use_index_merge(t, kj) */ * from t where " + conds).Sort().Check(r1.Rows()) + } + } +} + +func randMVIndexConds(nConds int, valOpts randMVIndexValOpts) string { + var conds string + for i := 0; i < nConds; i++ { + if i > 0 { + if rand.Intn(5) < 1 { // OR + conds += " OR " + } else { // AND + conds += " AND " + } + } + cond := randMVIndexCond(rand.Intn(4), valOpts) + conds += cond + } + return conds +} + +func randMVIndexCond(condType int, valOpts randMVIndexValOpts) string { + switch condType { + case 0: // member_of + return fmt.Sprintf(`(%v member of (j))`, randMVIndexValue(valOpts)) + case 1: // json_contains + return fmt.Sprintf(`json_contains(j, '[%v, %v]')`, randMVIndexValue(valOpts), randMVIndexValue(valOpts)) + case 2: // json_overlaps + return fmt.Sprintf(`json_overlaps(j, '[%v, %v]')`, randMVIndexValue(valOpts), randMVIndexValue(valOpts)) + default: // others + return fmt.Sprintf(`a < %v`, rand.Intn(valOpts.distinct)) + } +} + +type randMVIndexValOpts struct { + valType string // INT, UNSIGNED, STR, DATE + maxStrLen int + distinct int +} + +func randMVIndexValue(opts randMVIndexValOpts) string { + switch strings.ToLower(opts.valType) { + case "signed": + return fmt.Sprintf("%v", rand.Intn(opts.distinct)-(opts.distinct/2)) + case "unsigned": + return fmt.Sprintf("%v", rand.Intn(opts.distinct)) + case "string": + return fmt.Sprintf(`"%v"`, strings.Repeat(fmt.Sprintf("%v", rand.Intn(opts.distinct)), rand.Intn(opts.maxStrLen)+1)) + case "date": + return fmt.Sprintf(`"2000-01-%v"`, rand.Intn(opts.distinct)+1) + } + return "" +} diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 2d73534fc2e1e..f6e566bac43a3 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -27,7 +27,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" @@ -50,7 +49,6 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/statistics/handle" - "github.com/pingcap/tidb/store/driver/backoff" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/table/temptable" @@ -67,7 +65,6 @@ import ( "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/set" "github.com/pingcap/tidb/util/size" - "github.com/tikv/client-go/v2/tikv" ) const ( @@ -692,13 +689,6 @@ func (ds *DataSource) setPreferredStoreType(hintInfo *tableHintInfo) { ds.preferStoreType = 0 return } - if config.GetGlobalConfig().DisaggregatedTiFlash && !isTiFlashComputeNodeAvailable(ds.ctx) { - // TiFlash is in disaggregated mode, need to make sure tiflash_compute node is available. - errMsg := "No available tiflash_compute node" - warning := ErrInternal.GenWithStack(errMsg) - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(warning) - return - } for _, path := range ds.possibleAccessPaths { if path.StoreType == kv.TiFlash { ds.preferStoreType |= preferTiFlash @@ -716,15 +706,6 @@ func (ds *DataSource) setPreferredStoreType(hintInfo *tableHintInfo) { } } -func isTiFlashComputeNodeAvailable(ctx sessionctx.Context) bool { - bo := backoff.NewBackofferWithVars(context.Background(), 5000, nil) - stores, err := ctx.GetStore().(tikv.Storage).GetRegionCache().GetTiFlashComputeStores(bo.TiKVBackoffer()) - if err != nil || len(stores) == 0 { - return false - } - return true -} - func resetNotNullFlag(schema *expression.Schema, start, end int) { for i := start; i < end; i++ { col := *schema.Columns[i] diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index 640c0f04630c1..a8a7a1918cfc0 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" @@ -2626,3 +2627,75 @@ func TestCountStarForTiFlash(t *testing.T) { tk.MustQuery("explain format = 'brief' " + ts).Check(testkit.Rows(output[i].Plan...)) } } + +func TestHashAggPushdownToTiFlashCompute(t *testing.T) { + var ( + input []string + output []struct { + SQL string + Plan []string + Warning []string + } + ) + planSuiteData := core.GetPlanSuiteData() + planSuiteData.LoadTestCases(t, &input, &output) + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists tbl_15;") + tk.MustExec(`create table tbl_15 (col_89 text (473) collate utf8mb4_bin , + col_90 timestamp default '1976-04-03' , + col_91 tinyint unsigned not null , + col_92 tinyint , + col_93 double not null , + col_94 datetime not null default '1970-06-08' , + col_95 datetime default '2028-02-13' , + col_96 int unsigned not null default 2532480521 , + col_97 char (168) default '') partition by hash (col_91) partitions 4;`) + + tk.MustExec("drop table if exists tbl_16;") + tk.MustExec(`create table tbl_16 (col_98 text (246) not null , + col_99 decimal (30 ,19) , + col_100 mediumint unsigned , + col_101 text (410) collate utf8mb4_bin , + col_102 date not null , + col_103 timestamp not null default '2003-08-27' , + col_104 text (391) not null , + col_105 date default '2010-10-24' , + col_106 text (9) not null,primary key (col_100, col_98(5), col_103), + unique key idx_23 (col_100, col_106 (3), col_101 (3))) partition by hash (col_100) partitions 2;`) + + config.UpdateGlobal(func(conf *config.Config) { + conf.DisaggregatedTiFlash = true + }) + defer config.UpdateGlobal(func(conf *config.Config) { + conf.DisaggregatedTiFlash = false + }) + + dom := domain.GetDomain(tk.Session()) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + require.True(t, exists) + for _, tblInfo := range db.Tables { + tableName := tblInfo.Name.L + if tableName == "tbl_15" || tableName == "tbl_16" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: true, + } + } + } + + tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1;") + tk.MustExec("set @@tidb_partition_prune_mode = 'static';") + tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash';") + + for i, ts := range input { + testdata.OnRecord(func() { + output[i].SQL = ts + output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + ts).Rows()) + }) + tk.MustQuery("explain format = 'brief' " + ts).Check(testkit.Rows(output[i].Plan...)) + } +} diff --git a/planner/core/plan_cache.go b/planner/core/plan_cache.go index 98b49c1bcc452..dbbed82a32e8f 100644 --- a/planner/core/plan_cache.go +++ b/planner/core/plan_cache.go @@ -287,7 +287,7 @@ func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isNonPrepared // check whether this plan is cacheable. if stmtCtx.UseCache { - checkPlanCacheability(sctx, p, len(paramTypes)) + checkPlanCacheability(sctx, p, len(paramTypes), len(limitParams)) } // put this plan into the plan cache. @@ -312,7 +312,7 @@ func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isNonPrepared } // checkPlanCacheability checks whether this plan is cacheable and set to skip plan cache if it's uncacheable. -func checkPlanCacheability(sctx sessionctx.Context, p Plan, paramNum int) { +func checkPlanCacheability(sctx sessionctx.Context, p Plan, paramNum int, limitParamNum int) { stmtCtx := sctx.GetSessionVars().StmtCtx var pp PhysicalPlan switch x := p.(type) { @@ -347,6 +347,11 @@ func checkPlanCacheability(sctx sessionctx.Context, p Plan, paramNum int) { stmtCtx.SetSkipPlanCache(errors.New("skip plan-cache: the plan with IndexMerge accessing Multi-Valued Index is un-cacheable")) return } + + // before cache the param limit plan, check switch + if limitParamNum != 0 && !sctx.GetSessionVars().EnablePlanCacheForParamLimit { + stmtCtx.SetSkipPlanCache(errors.New("skip plan-cache: the switch 'tidb_enable_plan_cache_for_param_limit' is off")) + } } // RebuildPlan4CachedPlan will rebuild this plan under current user parameters. diff --git a/planner/core/plan_cache_lru.go b/planner/core/plan_cache_lru.go index 062ed6cc13735..7379ec5411a37 100644 --- a/planner/core/plan_cache_lru.go +++ b/planner/core/plan_cache_lru.go @@ -51,9 +51,6 @@ type LRUPlanCache struct { lruList *list.List // lock make cache thread safe lock sync.Mutex - - // pickFromBucket get one element from bucket. The LRUPlanCache can not work if it is nil - pickFromBucket func(map[*list.Element]struct{}, *planCacheMatchOpts) (*list.Element, bool) // onEvict will be called if any eviction happened, only for test use now onEvict func(kvcache.Key, kvcache.Value) @@ -67,21 +64,19 @@ type LRUPlanCache struct { // NewLRUPlanCache creates a PCLRUCache object, whose capacity is "capacity". // NOTE: "capacity" should be a positive value. -func NewLRUPlanCache(capacity uint, guard float64, quota uint64, - pickFromBucket func(map[*list.Element]struct{}, *planCacheMatchOpts) (*list.Element, bool), sctx sessionctx.Context) *LRUPlanCache { +func NewLRUPlanCache(capacity uint, guard float64, quota uint64, sctx sessionctx.Context) *LRUPlanCache { if capacity < 1 { capacity = 100 logutil.BgLogger().Info("capacity of LRU cache is less than 1, will use default value(100) init cache") } return &LRUPlanCache{ - capacity: capacity, - size: 0, - buckets: make(map[string]map[*list.Element]struct{}, 1), //Generally one query has one plan - lruList: list.New(), - pickFromBucket: pickFromBucket, - quota: quota, - guard: guard, - sctx: sctx, + capacity: capacity, + size: 0, + buckets: make(map[string]map[*list.Element]struct{}, 1), //Generally one query has one plan + lruList: list.New(), + quota: quota, + guard: guard, + sctx: sctx, } } @@ -260,17 +255,25 @@ func (l *LRUPlanCache) memoryControl() { } // PickPlanFromBucket pick one plan from bucket -func PickPlanFromBucket(bucket map[*list.Element]struct{}, matchOpts *planCacheMatchOpts) (*list.Element, bool) { +func (l *LRUPlanCache) pickFromBucket(bucket map[*list.Element]struct{}, matchOpts *planCacheMatchOpts) (*list.Element, bool) { for k := range bucket { plan := k.Value.(*planCacheEntry).PlanValue.(*PlanCacheValue) + // check param types' compatibility ok1 := plan.matchOpts.paramTypes.CheckTypesCompatibility4PC(matchOpts.paramTypes) if !ok1 { continue } + + // check limit offset and key if equal and check switch if enabled ok2 := checkUint64SliceIfEqual(plan.matchOpts.limitOffsetAndCount, matchOpts.limitOffsetAndCount) - if ok2 { - return k, true + if !ok2 { + continue + } + if len(plan.matchOpts.limitOffsetAndCount) > 0 && !l.sctx.GetSessionVars().EnablePlanCacheForParamLimit { + // offset and key slice matched, but it is a plan with param limit and the switch is disabled + continue } + return k, true } return nil, false } diff --git a/planner/core/plan_cache_lru_test.go b/planner/core/plan_cache_lru_test.go index 72e4549b337a9..e3d6c43c9310b 100644 --- a/planner/core/plan_cache_lru_test.go +++ b/planner/core/plan_cache_lru_test.go @@ -47,11 +47,13 @@ func randomPlanCacheValue(types []*types.FieldType) *PlanCacheValue { func TestLRUPCPut(t *testing.T) { // test initialize - lruA := NewLRUPlanCache(0, 0, 0, PickPlanFromBucket, MockContext()) + mockCtx := MockContext() + mockCtx.GetSessionVars().EnablePlanCacheForParamLimit = true + lruA := NewLRUPlanCache(0, 0, 0, mockCtx) require.Equal(t, lruA.capacity, uint(100)) maxMemDroppedKv := make(map[kvcache.Key]kvcache.Value) - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, mockCtx) lru.onEvict = func(key kvcache.Key, value kvcache.Value) { maxMemDroppedKv[key] = value } @@ -131,7 +133,9 @@ func TestLRUPCPut(t *testing.T) { } func TestLRUPCGet(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + mockCtx := MockContext() + mockCtx.GetSessionVars().EnablePlanCacheForParamLimit = true + lru := NewLRUPlanCache(3, 0, 0, mockCtx) keys := make([]*planCacheKey, 5) vals := make([]*PlanCacheValue, 5) @@ -185,7 +189,9 @@ func TestLRUPCGet(t *testing.T) { } func TestLRUPCDelete(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + mockCtx := MockContext() + mockCtx.GetSessionVars().EnablePlanCacheForParamLimit = true + lru := NewLRUPlanCache(3, 0, 0, mockCtx) keys := make([]*planCacheKey, 3) vals := make([]*PlanCacheValue, 3) @@ -222,7 +228,7 @@ func TestLRUPCDelete(t *testing.T) { } func TestLRUPCDeleteAll(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, MockContext()) keys := make([]*planCacheKey, 3) vals := make([]*PlanCacheValue, 3) @@ -253,7 +259,7 @@ func TestLRUPCDeleteAll(t *testing.T) { func TestLRUPCSetCapacity(t *testing.T) { maxMemDroppedKv := make(map[kvcache.Key]kvcache.Value) - lru := NewLRUPlanCache(5, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(5, 0, 0, MockContext()) lru.onEvict = func(key kvcache.Key, value kvcache.Value) { maxMemDroppedKv[key] = value } @@ -318,7 +324,7 @@ func TestLRUPCSetCapacity(t *testing.T) { } func TestIssue37914(t *testing.T) { - lru := NewLRUPlanCache(3, 0.1, 1, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0.1, 1, MockContext()) pTypes := []*types.FieldType{types.NewFieldType(mysql.TypeFloat), types.NewFieldType(mysql.TypeDouble)} key := &planCacheKey{database: strconv.FormatInt(int64(1), 10)} @@ -330,7 +336,7 @@ func TestIssue37914(t *testing.T) { } func TestIssue38244(t *testing.T) { - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, MockContext()) + lru := NewLRUPlanCache(3, 0, 0, MockContext()) require.Equal(t, uint(3), lru.capacity) keys := make([]*planCacheKey, 5) @@ -357,7 +363,7 @@ func TestLRUPlanCacheMemoryUsage(t *testing.T) { pTypes := []*types.FieldType{types.NewFieldType(mysql.TypeFloat), types.NewFieldType(mysql.TypeDouble)} ctx := MockContext() ctx.GetSessionVars().EnablePreparedPlanCacheMemoryMonitor = true - lru := NewLRUPlanCache(3, 0, 0, PickPlanFromBucket, ctx) + lru := NewLRUPlanCache(3, 0, 0, ctx) evict := make(map[kvcache.Key]kvcache.Value) lru.onEvict = func(key kvcache.Key, value kvcache.Value) { evict[key] = value diff --git a/planner/core/plan_cache_test.go b/planner/core/plan_cache_test.go index 8acc28b7b0062..9f8ff161fd658 100644 --- a/planner/core/plan_cache_test.go +++ b/planner/core/plan_cache_test.go @@ -78,7 +78,7 @@ func TestInitLRUWithSystemVar(t *testing.T) { tk.MustQuery("select @@session.tidb_prepared_plan_cache_size").Check(testkit.Rows("1")) sessionVar := tk.Session().GetSessionVars() - lru := plannercore.NewLRUPlanCache(uint(sessionVar.PreparedPlanCacheSize), 0, 0, plannercore.PickPlanFromBucket, tk.Session()) + lru := plannercore.NewLRUPlanCache(uint(sessionVar.PreparedPlanCacheSize), 0, 0, tk.Session()) require.NotNil(t, lru) } @@ -503,3 +503,22 @@ func TestPlanCacheWithLimit(t *testing.T) { tk.MustExec("execute stmt using @a") tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip plan-cache: limit count more than 10000")) } + +func TestIssue40679(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int, key(a));") + tk.MustExec("prepare st from 'select * from t use index(a) where a < ?'") + tk.MustExec("set @a1=1.1") + tk.MustExec("execute st using @a1") + + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.True(t, strings.Contains(rows[1][0].(string), "RangeScan")) // RangeScan not FullScan + + tk.MustExec("execute st using @a1") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip plan-cache: '1.1' may be converted to INT")) +} diff --git a/planner/core/plan_cacheable_checker.go b/planner/core/plan_cacheable_checker.go index 2ff9e51823ee2..3da0f285cd9bf 100644 --- a/planner/core/plan_cacheable_checker.go +++ b/planner/core/plan_cacheable_checker.go @@ -135,22 +135,21 @@ func (checker *cacheableChecker) Enter(in ast.Node) (out ast.Node, skipChildren return in, true } } - // todo: these comment is used to add switch in the later pr - //case *ast.Limit: - // if node.Count != nil { - // if _, isParamMarker := node.Count.(*driver.ParamMarkerExpr); isParamMarker { - // checker.cacheable = false - // checker.reason = "query has 'limit ?' is un-cacheable" - // return in, true - // } - // } - // if node.Offset != nil { - // if _, isParamMarker := node.Offset.(*driver.ParamMarkerExpr); isParamMarker { - // checker.cacheable = false - // checker.reason = "query has 'limit ?, 10' is un-cacheable" - // return in, true - // } - // } + case *ast.Limit: + if node.Count != nil { + if _, isParamMarker := node.Count.(*driver.ParamMarkerExpr); isParamMarker && !checker.sctx.GetSessionVars().EnablePlanCacheForParamLimit { + checker.cacheable = false + checker.reason = "query has 'limit ?' is un-cacheable" + return in, true + } + } + if node.Offset != nil { + if _, isParamMarker := node.Offset.(*driver.ParamMarkerExpr); isParamMarker && !checker.sctx.GetSessionVars().EnablePlanCacheForParamLimit { + checker.cacheable = false + checker.reason = "query has 'limit ?, 10' is un-cacheable" + return in, true + } + } case *ast.FrameBound: if _, ok := node.Expr.(*driver.ParamMarkerExpr); ok { checker.cacheable = false diff --git a/planner/core/plan_cacheable_checker_test.go b/planner/core/plan_cacheable_checker_test.go index 7d417e377888f..fc09b7b536530 100644 --- a/planner/core/plan_cacheable_checker_test.go +++ b/planner/core/plan_cacheable_checker_test.go @@ -26,11 +26,14 @@ import ( "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/testkit" driver "github.com/pingcap/tidb/types/parser_driver" + "github.com/pingcap/tidb/util/mock" "github.com/stretchr/testify/require" ) func TestCacheable(t *testing.T) { store := testkit.CreateMockStore(t) + mockCtx := mock.NewContext() + mockCtx.GetSessionVars().EnablePlanCacheForParamLimit = true tk := testkit.NewTestKit(t, store) @@ -87,7 +90,8 @@ func TestCacheable(t *testing.T) { TableRefs: tableRefsClause, Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ := core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) limitStmt = &ast.Limit{ Offset: &driver.ParamMarkerExpr{}, @@ -96,14 +100,16 @@ func TestCacheable(t *testing.T) { TableRefs: tableRefsClause, Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) limitStmt = &ast.Limit{} stmt = &ast.DeleteStmt{ TableRefs: tableRefsClause, Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) stmt.(*ast.DeleteStmt).TableHints = append(stmt.(*ast.DeleteStmt).TableHints, &ast.TableOptimizerHint{ HintName: model.NewCIStr(core.HintIgnorePlanCache), @@ -139,7 +145,8 @@ func TestCacheable(t *testing.T) { TableRefs: tableRefsClause, Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) limitStmt = &ast.Limit{ Offset: &driver.ParamMarkerExpr{}, @@ -148,14 +155,16 @@ func TestCacheable(t *testing.T) { TableRefs: tableRefsClause, Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) limitStmt = &ast.Limit{} stmt = &ast.UpdateStmt{ TableRefs: tableRefsClause, Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) stmt.(*ast.UpdateStmt).TableHints = append(stmt.(*ast.UpdateStmt).TableHints, &ast.TableOptimizerHint{ HintName: model.NewCIStr(core.HintIgnorePlanCache), @@ -188,7 +197,8 @@ func TestCacheable(t *testing.T) { stmt = &ast.SelectStmt{ Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) limitStmt = &ast.Limit{ Offset: &driver.ParamMarkerExpr{}, @@ -196,13 +206,15 @@ func TestCacheable(t *testing.T) { stmt = &ast.SelectStmt{ Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) limitStmt = &ast.Limit{} stmt = &ast.SelectStmt{ Limit: limitStmt, } - require.True(t, core.Cacheable(stmt, is)) + c, _ = core.CacheableWithCtx(mockCtx, stmt, is) + require.True(t, c) paramExpr := &driver.ParamMarkerExpr{} orderByClause := &ast.OrderByClause{Items: []*ast.ByItem{{Expr: paramExpr}}} diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 5af0bb004d6c9..5535faa97ab92 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -26,7 +26,6 @@ import ( "unsafe" "github.com/pingcap/errors" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" @@ -1453,8 +1452,6 @@ func filterPathByIsolationRead(ctx sessionctx.Context, paths []*util.AccessPath, isolationReadEngines := ctx.GetSessionVars().GetIsolationReadEngines() availableEngine := map[kv.StoreType]struct{}{} var availableEngineStr string - var outputComputeNodeErrMsg bool - noTiFlashComputeNode := config.GetGlobalConfig().DisaggregatedTiFlash && !isTiFlashComputeNodeAvailable(ctx) for i := len(paths) - 1; i >= 0; i-- { // availableEngineStr is for warning message. if _, ok := availableEngine[paths[i].StoreType]; !ok { @@ -1464,20 +1461,7 @@ func filterPathByIsolationRead(ctx sessionctx.Context, paths []*util.AccessPath, } availableEngineStr += paths[i].StoreType.Name() } - _, exists := isolationReadEngines[paths[i].StoreType] - // Prune this path if: - // 1. path.StoreType doesn't exists in isolationReadEngines or - // 2. TiFlash is disaggregated and the number of tiflash_compute node is zero. - shouldPruneTiFlashCompute := noTiFlashComputeNode && exists && paths[i].StoreType == kv.TiFlash - failpoint.Inject("testDisaggregatedTiFlashQuery", func(val failpoint.Value) { - // Ignore check if tiflash_compute node number. - // After we support disaggregated tiflash in test framework, can delete this failpoint. - shouldPruneTiFlashCompute = val.(bool) - }) - if shouldPruneTiFlashCompute { - outputComputeNodeErrMsg = true - } - if (!exists && paths[i].StoreType != kv.TiDB) || shouldPruneTiFlashCompute { + if _, ok := isolationReadEngines[paths[i].StoreType]; !ok && paths[i].StoreType != kv.TiDB { paths = append(paths[:i], paths[i+1:]...) } } @@ -1486,11 +1470,7 @@ func filterPathByIsolationRead(ctx sessionctx.Context, paths []*util.AccessPath, if len(paths) == 0 { helpMsg := "" if engineVals == "tiflash" { - if outputComputeNodeErrMsg { - helpMsg = ". Please check tiflash_compute node is available" - } else { - helpMsg = ". Please check tiflash replica or ensure the query is readonly" - } + helpMsg = ". Please check tiflash replica or ensure the query is readonly" } err = ErrInternal.GenWithStackByArgs(fmt.Sprintf("No access path for table '%s' is found with '%v' = '%v', valid values can be '%s'%s.", tblName.String(), variable.TiDBIsolationReadEngines, engineVals, availableEngineStr, helpMsg)) @@ -2320,12 +2300,16 @@ func getColOffsetForAnalyze(colsInfo []*model.ColumnInfo, colID int64) int { // in tblInfo.Indices, index.Columns[i].Offset is set according to tblInfo.Columns. Since we decode row samples according to colsInfo rather than tbl.Columns // in the execution phase of ANALYZE, we need to modify index.Columns[i].Offset according to colInfos. // TODO: find a better way to find indexed columns in ANALYZE rather than use IndexColumn.Offset -func getModifiedIndexesInfoForAnalyze(tblInfo *model.TableInfo, allColumns bool, colsInfo []*model.ColumnInfo) []*model.IndexInfo { +func getModifiedIndexesInfoForAnalyze(sctx sessionctx.Context, tblInfo *model.TableInfo, allColumns bool, colsInfo []*model.ColumnInfo) []*model.IndexInfo { idxsInfo := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) for _, originIdx := range tblInfo.Indices { if originIdx.State != model.StatePublic { continue } + if originIdx.MVIndex { + sctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", originIdx.Name.L)) + continue + } if allColumns { // If all the columns need to be analyzed, we don't need to modify IndexColumn.Offset. idxsInfo = append(idxsInfo, originIdx) @@ -2401,7 +2385,7 @@ func (b *PlanBuilder) buildAnalyzeFullSamplingTask( execColsInfo = colsInfo } allColumns := len(tbl.TableInfo.Columns) == len(execColsInfo) - indexes := getModifiedIndexesInfoForAnalyze(tbl.TableInfo, allColumns, execColsInfo) + indexes := getModifiedIndexesInfoForAnalyze(b.ctx, tbl.TableInfo, allColumns, execColsInfo) handleCols := BuildHandleColsForAnalyze(b.ctx, tbl.TableInfo, allColumns, execColsInfo) newTask := AnalyzeColumnsTask{ HandleCols: handleCols, @@ -2631,6 +2615,10 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A commonHandleInfo = idx continue } + if idx.MVIndex { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", idx.Name.L)) + continue + } for i, id := range physicalIDs { if id == tbl.TableInfo.ID { id = -1 @@ -2724,6 +2712,10 @@ func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.A if idx == nil || idx.State != model.StatePublic { return nil, ErrAnalyzeMissIndex.GenWithStackByArgs(idxName.O, tblInfo.Name.O) } + if idx.MVIndex { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", idx.Name.L)) + continue + } for i, id := range physicalIDs { if id == tblInfo.ID { id = -1 @@ -2766,6 +2758,11 @@ func (b *PlanBuilder) buildAnalyzeAllIndex(as *ast.AnalyzeTableStmt, opts map[as } for _, idx := range tblInfo.Indices { if idx.State == model.StatePublic { + if idx.MVIndex { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("analyzing multi-valued indexes is not supported, skip %s", idx.Name.L)) + continue + } + for i, id := range physicalIDs { if id == tblInfo.ID { id = -1 diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 23bc05d2f117f..e80fd32fc3414 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -731,7 +731,7 @@ func newBatchPointGetPlan( } } for _, idxInfo := range tbl.Indices { - if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || + if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || idxInfo.MVIndex || !indexIsAvailableByHints(idxInfo, indexHints) { continue } @@ -1099,7 +1099,7 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt, check bool var err error for _, idxInfo := range tbl.Indices { - if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || + if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible || idxInfo.MVIndex || !indexIsAvailableByHints(idxInfo, tblName.IndexHints) { continue } diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index 656aed73ca189..c217cafbdb242 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -1339,7 +1339,7 @@ func TestPlanCacheSwitchDB(t *testing.T) { // DB is not specified se2, err := session.CreateSession4TestWithOpt(store, &session.Opt{ - PreparedPlanCache: core.NewLRUPlanCache(100, 0.1, math.MaxUint64, core.PickPlanFromBucket, tk.Session()), + PreparedPlanCache: core.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session()), }) require.NoError(t, err) tk2 := testkit.NewTestKitWithSession(t, store, se2) diff --git a/planner/core/rule_aggregation_push_down.go b/planner/core/rule_aggregation_push_down.go index c9326929b550f..24aef4161a8ec 100644 --- a/planner/core/rule_aggregation_push_down.go +++ b/planner/core/rule_aggregation_push_down.go @@ -405,6 +405,12 @@ func (a *aggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, u if err != nil { return nil, err } + // Update mode of new generated firstRow as other agg funcs. + if len(agg.AggFuncs) != 0 { + firstRow.Mode = agg.AggFuncs[0].Mode + } else { + firstRow.Mode = aggregation.Partial1Mode + } newAgg.AggFuncs = append(newAgg.AggFuncs, firstRow) } tmpSchema := expression.NewSchema(newAgg.GetGroupByCols()...) diff --git a/planner/core/rule_generate_column_substitute.go b/planner/core/rule_generate_column_substitute.go index c796f99af62c5..88039392bf1a3 100644 --- a/planner/core/rule_generate_column_substitute.go +++ b/planner/core/rule_generate_column_substitute.go @@ -15,12 +15,11 @@ package core import ( + "bytes" "context" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/ast" - "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" ) @@ -37,13 +36,13 @@ type ExprColumnMap map[expression.Expression]*expression.Column // For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and // an index on c. We need to replace a+1 with c so that we can use the index on c. // See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html -func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, _ *logicalOptimizeOp) (LogicalPlan, error) { +func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { exprToColumn := make(ExprColumnMap) collectGenerateColumn(lp, exprToColumn) if len(exprToColumn) == 0 { return lp, nil } - return gc.substitute(ctx, lp, exprToColumn), nil + return gc.substitute(ctx, lp, exprToColumn, opt), nil } // collectGenerateColumn collect the generate column and save them to a map from their expressions to themselves. @@ -74,18 +73,33 @@ func collectGenerateColumn(lp LogicalPlan, exprToColumn ExprColumnMap) { } } -func tryToSubstituteExpr(expr *expression.Expression, sctx sessionctx.Context, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column) { - if (*expr).Equal(sctx, candidateExpr) && candidateExpr.GetType().EvalType() == tp && +func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column, opt *logicalOptimizeOp) { + if (*expr).Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp && schema.ColumnIndex(col) != -1 { *expr = col + appendSubstituteColumnStep(lp, candidateExpr, col, opt) } } -func substituteExpression(cond expression.Expression, sctx *stmtctx.StatementContext, sessionCtx sessionctx.Context, exprToColumn ExprColumnMap, schema *expression.Schema) { +func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Expression, col *expression.Column, opt *logicalOptimizeOp) { + reason := func() string { return "" } + action := func() string { + buffer := bytes.NewBufferString("expression:") + buffer.WriteString(candidateExpr.String()) + buffer.WriteString(" substituted by") + buffer.WriteString(" column:") + buffer.WriteString(col.String()) + return buffer.String() + } + opt.appendStepToCurrent(lp.ID(), lp.TP(), reason, action) +} + +func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *logicalOptimizeOp) { sf, ok := cond.(*expression.ScalarFunction) if !ok { return } + sctx := lp.SCtx().GetSessionVars().StmtCtx defer func() { // If the argument is not changed, hash code doesn't need to recount again. // But we always do it to keep the code simple and stupid. @@ -96,10 +110,10 @@ func substituteExpression(cond expression.Expression, sctx *stmtctx.StatementCon switch sf.FuncName.L { case ast.EQ, ast.LT, ast.LE, ast.GT, ast.GE: for candidateExpr, column := range exprToColumn { - tryToSubstituteExpr(&sf.GetArgs()[1], sessionCtx, candidateExpr, sf.GetArgs()[0].GetType().EvalType(), schema, column) + tryToSubstituteExpr(&sf.GetArgs()[1], lp, candidateExpr, sf.GetArgs()[0].GetType().EvalType(), schema, column, opt) } for candidateExpr, column := range exprToColumn { - tryToSubstituteExpr(&sf.GetArgs()[0], sessionCtx, candidateExpr, sf.GetArgs()[1].GetType().EvalType(), schema, column) + tryToSubstituteExpr(&sf.GetArgs()[0], lp, candidateExpr, sf.GetArgs()[1].GetType().EvalType(), schema, column, opt) } case ast.In: expr = &sf.GetArgs()[0] @@ -115,43 +129,42 @@ func substituteExpression(cond expression.Expression, sctx *stmtctx.StatementCon } if canSubstitute { for candidateExpr, column := range exprToColumn { - tryToSubstituteExpr(expr, sessionCtx, candidateExpr, tp, schema, column) + tryToSubstituteExpr(expr, lp, candidateExpr, tp, schema, column, opt) } } case ast.Like: expr = &sf.GetArgs()[0] tp = sf.GetArgs()[1].GetType().EvalType() for candidateExpr, column := range exprToColumn { - tryToSubstituteExpr(expr, sessionCtx, candidateExpr, tp, schema, column) + tryToSubstituteExpr(expr, lp, candidateExpr, tp, schema, column, opt) } case ast.LogicOr, ast.LogicAnd: - substituteExpression(sf.GetArgs()[0], sctx, sessionCtx, exprToColumn, schema) - substituteExpression(sf.GetArgs()[1], sctx, sessionCtx, exprToColumn, schema) + substituteExpression(sf.GetArgs()[0], lp, exprToColumn, schema, opt) + substituteExpression(sf.GetArgs()[1], lp, exprToColumn, schema, opt) case ast.UnaryNot: - substituteExpression(sf.GetArgs()[0], sctx, sessionCtx, exprToColumn, schema) + substituteExpression(sf.GetArgs()[0], lp, exprToColumn, schema, opt) } } -func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap) LogicalPlan { - sctx := lp.SCtx().GetSessionVars().StmtCtx +func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap, opt *logicalOptimizeOp) LogicalPlan { var tp types.EvalType switch x := lp.(type) { case *LogicalSelection: for _, cond := range x.Conditions { - substituteExpression(cond, sctx, lp.SCtx(), exprToColumn, x.Schema()) + substituteExpression(cond, lp, exprToColumn, x.Schema(), opt) } case *LogicalProjection: for i := range x.Exprs { tp = x.Exprs[i].GetType().EvalType() for candidateExpr, column := range exprToColumn { - tryToSubstituteExpr(&x.Exprs[i], lp.SCtx(), candidateExpr, tp, x.children[0].Schema(), column) + tryToSubstituteExpr(&x.Exprs[i], lp, candidateExpr, tp, x.children[0].Schema(), column, opt) } } case *LogicalSort: for i := range x.ByItems { tp = x.ByItems[i].Expr.GetType().EvalType() for candidateExpr, column := range exprToColumn { - tryToSubstituteExpr(&x.ByItems[i].Expr, lp.SCtx(), candidateExpr, tp, x.Schema(), column) + tryToSubstituteExpr(&x.ByItems[i].Expr, lp, candidateExpr, tp, x.Schema(), column, opt) } } case *LogicalAggregation: @@ -162,6 +175,7 @@ func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToC if aggFunc.Args[i].Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp && x.Schema().ColumnIndex(column) != -1 { aggFunc.Args[i] = column + appendSubstituteColumnStep(lp, candidateExpr, column, opt) } } } @@ -172,12 +186,13 @@ func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToC if x.GroupByItems[i].Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp && x.Schema().ColumnIndex(column) != -1 { x.GroupByItems[i] = column + appendSubstituteColumnStep(lp, candidateExpr, column, opt) } } } } for _, child := range lp.Children() { - gc.substitute(ctx, child, exprToColumn) + gc.substitute(ctx, child, exprToColumn, opt) } return lp } diff --git a/planner/core/task.go b/planner/core/task.go index 5d7ca6e5fd424..ff4e22756f15a 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1163,6 +1163,12 @@ func (p *PhysicalUnionAll) attach2MppTasks(tasks ...task) task { func (p *PhysicalUnionAll) attach2Task(tasks ...task) task { for _, t := range tasks { if _, ok := t.(*mppTask); ok { + if p.TP() == plancodec.TypePartitionUnion { + // In attach2MppTasks(), will attach PhysicalUnion to mppTask directly. + // But PartitionUnion cannot pushdown to tiflash, so here disable PartitionUnion pushdown to tiflash explicitly. + // For now, return invalidTask immediately, we can refine this by letting childTask of PartitionUnion convert to rootTask. + return invalidTask + } return p.attach2MppTasks(tasks...) } } diff --git a/planner/core/testdata/index_merge_suite_in.json b/planner/core/testdata/index_merge_suite_in.json index 22e595e22a8ce..d660364305397 100644 --- a/planner/core/testdata/index_merge_suite_in.json +++ b/planner/core/testdata/index_merge_suite_in.json @@ -1,4 +1,19 @@ [ + { + "name": "TestEnforceMVIndex", + "cases": [ + "select /*+ use_index(t, kj) */ * from t", + "select /*+ use_index(t, kj) */ a from t", + "select /*+ use_index(t, kj) */ * from t where a<10", + "select /*+ use_index(t, kj) */ * from t where (1 member of (j))", + "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) and a=10", + "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) or a=10", + "select /*+ use_index_merge(t, kj) */ * from t", + "select /*+ use_index_merge(t, kj) */ a from t", + "select /*+ use_index_merge(t, kj) */ * from t where a<10", + "select /*+ use_index_merge(t, kj) */ * from t where (1 member of (j)) or a=10" + ] + }, { "name": "TestIndexMergeJSONMemberOf", "cases": [ diff --git a/planner/core/testdata/index_merge_suite_out.json b/planner/core/testdata/index_merge_suite_out.json index 5810f67c85e9c..2c66948aca057 100644 --- a/planner/core/testdata/index_merge_suite_out.json +++ b/planner/core/testdata/index_merge_suite_out.json @@ -1,4 +1,84 @@ [ + { + "Name": "TestEnforceMVIndex", + "Cases": [ + { + "SQL": "select /*+ use_index(t, kj) */ * from t", + "Plan": null, + "Err": "[planner:1815]Internal : Can't find a proper physical plan for this query" + }, + { + "SQL": "select /*+ use_index(t, kj) */ a from t", + "Plan": null, + "Err": "[planner:1815]Internal : Can't find a proper physical plan for this query" + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where a<10", + "Plan": null, + "Err": "[planner:1815]Internal : Can't find a proper physical plan for this query" + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where (1 member of (j))", + "Plan": [ + "Selection 8000.00 root json_memberof(cast(1, json BINARY), test.t.j)", + "└─IndexMerge 10.00 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo", + " └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Err": "" + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) and a=10", + "Plan": [ + "Selection 8.00 root json_memberof(cast(1, json BINARY), test.t.j)", + "└─IndexMerge 0.01 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo", + " └─Selection(Probe) 0.01 cop[tikv] eq(test.t.a, 10)", + " └─TableRowIDScan 10.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Err": "" + }, + { + "SQL": "select /*+ use_index(t, kj) */ * from t where (1 member of (j)) or a=10", + "Plan": null, + "Err": "[planner:1815]Internal : Can't find a proper physical plan for this query" + }, + { + "SQL": "select /*+ use_index_merge(t, kj) */ * from t", + "Plan": [ + "TableReader 10000.00 root data:TableFullScan", + "└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Err": "" + }, + { + "SQL": "select /*+ use_index_merge(t, kj) */ a from t", + "Plan": [ + "TableReader 10000.00 root data:TableFullScan", + "└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Err": "" + }, + { + "SQL": "select /*+ use_index_merge(t, kj) */ * from t where a<10", + "Plan": [ + "TableReader 3323.33 root data:Selection", + "└─Selection 3323.33 cop[tikv] lt(test.t.a, 10)", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Err": "" + }, + { + "SQL": "select /*+ use_index_merge(t, kj) */ * from t where (1 member of (j)) or a=10", + "Plan": [ + "Selection 8000.00 root or(json_memberof(cast(1, json BINARY), test.t.j), eq(test.t.a, 10))", + "└─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Err": "" + } + ] + }, { "Name": "TestIndexMergeJSONMemberOf", "Cases": [ diff --git a/planner/core/testdata/plan_suite_in.json b/planner/core/testdata/plan_suite_in.json index d433f5dd88dbe..6f6e74fac3cfa 100644 --- a/planner/core/testdata/plan_suite_in.json +++ b/planner/core/testdata/plan_suite_in.json @@ -1186,5 +1186,13 @@ "select a, count(*) from t group by a -- shouldn't be rewritten", "select sum(a) from t -- sum shouldn't be rewritten" ] + }, + { + "name": "TestHashAggPushdownToTiFlashCompute", + "cases": [ + "select /*+ agg_to_cop() hash_agg() */ avg( distinct tbl_15.col_96 ) as r0 , min( tbl_15.col_92 ) as r1 , sum( distinct tbl_15.col_91 ) as r2 , max( tbl_15.col_92 ) as r3 from tbl_15 where tbl_15.col_94 != '2033-01-09' and tbl_15.col_93 > 7623.679908049186 order by r0,r1,r2,r3 limit 79 ;", + "select /*+ agg_to_cop() hash_agg() */ count(1) from tbl_15 ;", + "select /*+ agg_to_cop() stream_agg() */ avg( tbl_16.col_100 ) as r0 from tbl_16 where tbl_16.col_100 in ( 10672141 ) or tbl_16.col_104 in ( 'yfEG1t!*b' ,'C1*bqx_qyO' ,'vQ^yUpKHr&j#~' ) group by tbl_16.col_100 order by r0 limit 20 ;" + ] } ] diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json index 31964823e95f2..14213e2223dab 100644 --- a/planner/core/testdata/plan_suite_out.json +++ b/planner/core/testdata/plan_suite_out.json @@ -7501,5 +7501,104 @@ "Warning": null } ] + }, + { + "Name": "TestHashAggPushdownToTiFlashCompute", + "Cases": [ + { + "SQL": "select /*+ agg_to_cop() hash_agg() */ avg( distinct tbl_15.col_96 ) as r0 , min( tbl_15.col_92 ) as r1 , sum( distinct tbl_15.col_91 ) as r2 , max( tbl_15.col_92 ) as r3 from tbl_15 where tbl_15.col_94 != '2033-01-09' and tbl_15.col_93 > 7623.679908049186 order by r0,r1,r2,r3 limit 79 ;", + "Plan": [ + "Limit 1.00 root offset:0, count:79", + "└─Sort 1.00 root Column#11, Column#12, Column#13, Column#14", + " └─HashAgg 1.00 root funcs:avg(distinct Column#89)->Column#11, funcs:min(Column#90)->Column#12, funcs:sum(distinct Column#91)->Column#13, funcs:max(Column#92)->Column#14", + " └─Projection 7100.44 root cast(test.tbl_15.col_96, decimal(10,0) UNSIGNED BINARY)->Column#89, Column#15, cast(test.tbl_15.col_91, decimal(3,0) UNSIGNED BINARY)->Column#91, Column#16", + " └─PartitionUnion 7100.44 root ", + " ├─TableReader 1775.11 root data:ExchangeSender", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#18)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#20)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#18, funcs:max(test.tbl_15.col_92)->Column#20", + " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p0 keep order:false, stats:pseudo", + " ├─TableReader 1775.11 root data:ExchangeSender", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#30)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#32)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#30, funcs:max(test.tbl_15.col_92)->Column#32", + " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p1 keep order:false, stats:pseudo", + " ├─TableReader 1775.11 root data:ExchangeSender", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#42)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#44)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#42, funcs:max(test.tbl_15.col_92)->Column#44", + " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p2 keep order:false, stats:pseudo", + " └─TableReader 1775.11 root data:ExchangeSender", + " └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#54)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#56)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#54, funcs:max(test.tbl_15.col_92)->Column#56", + " └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p3 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ agg_to_cop() hash_agg() */ count(1) from tbl_15 ;", + "Plan": [ + "HashAgg 1.00 root funcs:count(Column#12)->Column#11", + "└─PartitionUnion 4.00 root ", + " ├─HashAgg 1.00 root funcs:count(Column#13)->Column#12", + " │ └─TableReader 1.00 root data:ExchangeSender", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#13", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p0 keep order:false, stats:pseudo", + " ├─HashAgg 1.00 root funcs:count(Column#14)->Column#12", + " │ └─TableReader 1.00 root data:ExchangeSender", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#14", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p1 keep order:false, stats:pseudo", + " ├─HashAgg 1.00 root funcs:count(Column#15)->Column#12", + " │ └─TableReader 1.00 root data:ExchangeSender", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#15", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p2 keep order:false, stats:pseudo", + " └─HashAgg 1.00 root funcs:count(Column#16)->Column#12", + " └─TableReader 1.00 root data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#16", + " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p3 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ agg_to_cop() stream_agg() */ avg( tbl_16.col_100 ) as r0 from tbl_16 where tbl_16.col_100 in ( 10672141 ) or tbl_16.col_104 in ( 'yfEG1t!*b' ,'C1*bqx_qyO' ,'vQ^yUpKHr&j#~' ) group by tbl_16.col_100 order by r0 limit 20 ;", + "Plan": [ + "TopN 20.00 root Column#10, offset:0, count:20", + "└─HashAgg 63.95 root group by:test.tbl_16.col_100, funcs:avg(Column#11, Column#12)->Column#10", + " └─PartitionUnion 63.95 root ", + " ├─StreamAgg 31.98 root group by:Column#22, funcs:count(Column#19)->Column#11, funcs:sum(Column#20)->Column#12, funcs:firstrow(Column#21)->test.tbl_16.col_100", + " │ └─Projection 39.97 root test.tbl_16.col_100, cast(test.tbl_16.col_100, decimal(8,0) UNSIGNED BINARY)->Column#20, test.tbl_16.col_100, test.tbl_16.col_100", + " │ └─Sort 39.97 root test.tbl_16.col_100", + " │ └─TableReader 39.97 root data:ExchangeSender", + " │ └─ExchangeSender 39.97 mpp[tiflash] ExchangeType: PassThrough", + " │ └─Selection 39.97 mpp[tiflash] or(eq(test.tbl_16.col_100, 10672141), in(test.tbl_16.col_104, \"yfEG1t!*b\", \"C1*bqx_qyO\", \"vQ^yUpKHr&j#~\"))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_16, partition:p0 keep order:false, stats:pseudo", + " └─StreamAgg 31.98 root group by:Column#26, funcs:count(Column#23)->Column#11, funcs:sum(Column#24)->Column#12, funcs:firstrow(Column#25)->test.tbl_16.col_100", + " └─Projection 39.97 root test.tbl_16.col_100, cast(test.tbl_16.col_100, decimal(8,0) UNSIGNED BINARY)->Column#24, test.tbl_16.col_100, test.tbl_16.col_100", + " └─Sort 39.97 root test.tbl_16.col_100", + " └─TableReader 39.97 root data:ExchangeSender", + " └─ExchangeSender 39.97 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection 39.97 mpp[tiflash] or(eq(test.tbl_16.col_100, 10672141), in(test.tbl_16.col_104, \"yfEG1t!*b\", \"C1*bqx_qyO\", \"vQ^yUpKHr&j#~\"))", + " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_16, partition:p1 keep order:false, stats:pseudo" + ], + "Warning": null + } + ] } ] diff --git a/resourcemanager/pooltask/BUILD.bazel b/resourcemanager/pooltask/BUILD.bazel index 151a0ddfdec02..c4113b69dd141 100644 --- a/resourcemanager/pooltask/BUILD.bazel +++ b/resourcemanager/pooltask/BUILD.bazel @@ -5,6 +5,8 @@ go_library( srcs = [ "task.go", "task_manager.go", + "task_manager_iterator.go", + "task_manager_scheduler.go", ], importpath = "github.com/pingcap/tidb/resourcemanager/pooltask", visibility = ["//visibility:public"], diff --git a/resourcemanager/pooltask/task_manager.go b/resourcemanager/pooltask/task_manager.go index 25ce9e8ad1b4b..66d6451b163ba 100644 --- a/resourcemanager/pooltask/task_manager.go +++ b/resourcemanager/pooltask/task_manager.go @@ -32,29 +32,29 @@ type tContainer[T any, U any, C any, CT any, TF Context[CT]] struct { task *TaskBox[T, U, C, CT, TF] } -type meta struct { - stats *list.List - createTS time.Time - origin int32 - running int32 +type meta[T any, U any, C any, CT any, TF Context[CT]] struct { + stats *list.List + createTS time.Time + initialConcurrency int32 + running atomic.Int32 } -func newStats(concurrency int32) *meta { - s := &meta{ - createTS: time.Now(), - stats: list.New(), - origin: concurrency, +func newStats[T any, U any, C any, CT any, TF Context[CT]](concurrency int32) *meta[T, U, C, CT, TF] { + s := &meta[T, U, C, CT, TF]{ + createTS: time.Now(), + stats: list.New(), + initialConcurrency: concurrency, } return s } -func (m *meta) getOriginConcurrency() int32 { - return m.origin +func (m *meta[T, U, C, CT, TF]) getOriginConcurrency() int32 { + return m.initialConcurrency } // TaskStatusContainer is a container that can control or watch the pool. type TaskStatusContainer[T any, U any, C any, CT any, TF Context[CT]] struct { - stats map[uint64]*meta + stats map[uint64]*meta[T, U, C, CT, TF] rw sync.RWMutex } @@ -70,7 +70,7 @@ func NewTaskManager[T any, U any, C any, CT any, TF Context[CT]](c int32) TaskMa task := make([]TaskStatusContainer[T, U, C, CT, TF], shard) for i := 0; i < shard; i++ { task[i] = TaskStatusContainer[T, U, C, CT, TF]{ - stats: make(map[uint64]*meta), + stats: make(map[uint64]*meta[T, U, C, CT, TF]), } } return TaskManager[T, U, C, CT, TF]{ @@ -83,7 +83,7 @@ func NewTaskManager[T any, U any, C any, CT any, TF Context[CT]](c int32) TaskMa func (t *TaskManager[T, U, C, CT, TF]) RegisterTask(taskID uint64, concurrency int32) { id := getShardID(taskID) t.task[id].rw.Lock() - t.task[id].stats[taskID] = newStats(concurrency) + t.task[id].stats[taskID] = newStats[T, U, C, CT, TF](concurrency) t.task[id].rw.Unlock() } @@ -113,7 +113,7 @@ func (t *TaskManager[T, U, C, CT, TF]) AddSubTask(taskID uint64, task *TaskBox[T t.running.Inc() t.task[shardID].rw.Lock() t.task[shardID].stats[taskID].stats.PushBack(tc) - t.task[shardID].stats[taskID].running++ // running job in this task + t.task[shardID].stats[taskID].running.Inc() // running job in this task t.task[shardID].rw.Unlock() } @@ -122,7 +122,7 @@ func (t *TaskManager[T, U, C, CT, TF]) ExitSubTask(taskID uint64) { shardID := getShardID(taskID) t.running.Dec() // total running tasks t.task[shardID].rw.Lock() - t.task[shardID].stats[taskID].running-- // running job in this task + t.task[shardID].stats[taskID].running.Dec() // running job in this task t.task[shardID].rw.Unlock() } @@ -131,7 +131,7 @@ func (t *TaskManager[T, U, C, CT, TF]) Running(taskID uint64) int32 { shardID := getShardID(taskID) t.task[shardID].rw.Lock() defer t.task[shardID].rw.Unlock() - return t.task[shardID].stats[taskID].running + return t.task[shardID].stats[taskID].running.Load() } // StopTask is to stop a task by TaskID. diff --git a/resourcemanager/pooltask/task_manager_iterator.go b/resourcemanager/pooltask/task_manager_iterator.go new file mode 100644 index 0000000000000..ada5994599ff5 --- /dev/null +++ b/resourcemanager/pooltask/task_manager_iterator.go @@ -0,0 +1,131 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pooltask + +import ( + "container/list" + "time" +) + +func (t *TaskManager[T, U, C, CT, TF]) getBoostTask() (tid uint64, result *TaskBox[T, U, C, CT, TF]) { + // boost task + // 1、the count of running task is less than concurrency + // 2、less run time, more possible to boost + tid, element := t.iter(canBoost[T, U, C, CT, TF]) + if element != nil { + return tid, element.Value.(tContainer[T, U, C, CT, TF]).task + } + return 0, nil +} + +func (t *TaskManager[T, U, C, CT, TF]) pauseTask() { + // pause task, + // 1、more run time, more possible to pause + // 2、if task have been boosted, first to pause. + tid, result := t.iter(canPause[T, U, C, CT, TF]) + if result != nil { + result.Value.(tContainer[T, U, C, CT, TF]).task.status.CompareAndSwap(RunningTask, StopTask) + // delete it from list + shardID := getShardID(tid) + t.task[shardID].rw.Lock() + defer t.task[shardID].rw.Unlock() + t.task[shardID].stats[tid].stats.Remove(result) + } +} + +func (t *TaskManager[T, U, C, CT, TF]) iter(fn func(m *meta[T, U, C, CT, TF], max time.Time) (*list.Element, bool)) (tid uint64, result *list.Element) { + var compareTS time.Time + for i := 0; i < shard; i++ { + breakFind := func(index int) (breakFind bool) { + t.task[i].rw.RLock() + defer t.task[i].rw.RUnlock() + for id, stats := range t.task[i].stats { + if result == nil { + result = findTask[T, U, C, CT, TF](stats, RunningTask) + tid = id + compareTS = stats.createTS + continue + } + newResult, pauseFind := fn(stats, compareTS) + if pauseFind { + result = newResult + tid = id + compareTS = stats.createTS + return true + } + if newResult != nil { + result = newResult + tid = id + compareTS = stats.createTS + } + } + return false + }(shard) + if breakFind { + break + } + } + return tid, result +} + +func canPause[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF], max time.Time) (result *list.Element, isBreak bool) { + if m.initialConcurrency < m.running.Load() { + box := findTask[T, U, C, CT, TF](m, RunningTask) + if box != nil { + return box, true + } + } + if m.createTS.Before(max) { + box := findTask[T, U, C, CT, TF](m, RunningTask) + if box != nil { + return box, false + } + } + return nil, false +} + +func canBoost[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF], min time.Time) (result *list.Element, isBreak bool) { + if m.running.Load() < m.initialConcurrency { + box := getTask[T, U, C, CT, TF](m) + if box != nil { + return box, true + } + } + if m.createTS.After(min) { + box := getTask[T, U, C, CT, TF](m) + if box != nil { + return box, false + } + } + return nil, false +} + +func findTask[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF], status int32) *list.Element { + for e := m.stats.Front(); e != nil; e = e.Next() { + box := e.Value.(tContainer[T, U, C, CT, TF]) + if box.task.status.Load() == status { + return e + } + } + return nil +} + +func getTask[T any, U any, C any, CT any, TF Context[CT]](m *meta[T, U, C, CT, TF]) *list.Element { + e := m.stats.Front() + if e != nil { + return e + } + return nil +} diff --git a/resourcemanager/pooltask/task_manager_scheduler.go b/resourcemanager/pooltask/task_manager_scheduler.go new file mode 100644 index 0000000000000..dcc158df06d82 --- /dev/null +++ b/resourcemanager/pooltask/task_manager_scheduler.go @@ -0,0 +1,28 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pooltask + +// Overclock is to increase the concurrency of pool. +func (t *TaskManager[T, U, C, CT, TF]) Overclock() (tid uint64, task *TaskBox[T, U, C, CT, TF]) { + if t.concurrency > t.running.Load() { + return t.getBoostTask() + } + return 0, nil +} + +// Downclock is to decrease the concurrency of pool. +func (t *TaskManager[T, U, C, CT, TF]) Downclock() { + t.pauseTask() +} diff --git a/resourcemanager/rm.go b/resourcemanager/rm.go index e6e48de2059cd..025eb0fcbc129 100644 --- a/resourcemanager/rm.go +++ b/resourcemanager/rm.go @@ -78,7 +78,7 @@ func (r *ResourceManager) Stop() { } // Register is to register pool into resource manager -func (r *ResourceManager) Register(pool util.GorotinuePool, name string, component util.Component) error { +func (r *ResourceManager) Register(pool util.GoroutinePool, name string, component util.Component) error { p := util.PoolContainer{Pool: pool, Component: component} return r.registerPool(name, &p) } diff --git a/resourcemanager/schedule.go b/resourcemanager/schedule.go index f6ac691e09b15..50a5f54697800 100644 --- a/resourcemanager/schedule.go +++ b/resourcemanager/schedule.go @@ -52,14 +52,14 @@ func (*ResourceManager) exec(pool *util.PoolContainer, cmd scheduler.Command) { switch cmd { case scheduler.Downclock: concurrency := con - 1 - log.Info("downclock goroutine pool", + log.Info("[resource manager] downclock goroutine pool", zap.Int("origin concurrency", con), zap.Int("concurrency", concurrency), zap.String("name", pool.Pool.Name())) pool.Pool.Tune(concurrency) case scheduler.Overclock: concurrency := con + 1 - log.Info("overclock goroutine pool", + log.Info("[resource manager] overclock goroutine pool", zap.Int("origin concurrency", con), zap.Int("concurrency", concurrency), zap.String("name", pool.Pool.Name())) diff --git a/resourcemanager/scheduler/cpu_scheduler.go b/resourcemanager/scheduler/cpu_scheduler.go index c84fcf36fb697..14338d80683d4 100644 --- a/resourcemanager/scheduler/cpu_scheduler.go +++ b/resourcemanager/scheduler/cpu_scheduler.go @@ -30,7 +30,7 @@ func NewCPUScheduler() *CPUScheduler { } // Tune is to tune the goroutine pool -func (*CPUScheduler) Tune(_ util.Component, pool util.GorotinuePool) Command { +func (*CPUScheduler) Tune(_ util.Component, pool util.GoroutinePool) Command { if time.Since(pool.LastTunerTs()) < util.MinSchedulerInterval.Load() { return Hold } diff --git a/resourcemanager/scheduler/scheduler.go b/resourcemanager/scheduler/scheduler.go index 3af8e6aff5b0b..521536a741dee 100644 --- a/resourcemanager/scheduler/scheduler.go +++ b/resourcemanager/scheduler/scheduler.go @@ -32,5 +32,5 @@ const ( // Scheduler is a scheduler interface type Scheduler interface { - Tune(component util.Component, p util.GorotinuePool) Command + Tune(component util.Component, p util.GoroutinePool) Command } diff --git a/resourcemanager/util/util.go b/resourcemanager/util/util.go index 4d433975fabb7..6d1959bd08904 100644 --- a/resourcemanager/util/util.go +++ b/resourcemanager/util/util.go @@ -25,8 +25,8 @@ var ( MinSchedulerInterval = atomic.NewDuration(200 * time.Millisecond) ) -// GorotinuePool is a pool interface -type GorotinuePool interface { +// GoroutinePool is a pool interface +type GoroutinePool interface { ReleaseAndWait() Tune(size int) LastTunerTs() time.Time @@ -37,7 +37,7 @@ type GorotinuePool interface { // PoolContainer is a pool container type PoolContainer struct { - Pool GorotinuePool + Pool GoroutinePool Component Component } diff --git a/server/conn.go b/server/conn.go index 4d4300c099ecb..4cd2aedcb42c1 100644 --- a/server/conn.go +++ b/server/conn.go @@ -668,12 +668,12 @@ func (cc *clientConn) readOptionalSSLRequestAndHandshakeResponse(ctx context.Con switch resp.AuthPlugin { case mysql.AuthCachingSha2Password: - resp.Auth, err = cc.authSha(ctx) + resp.Auth, err = cc.authSha(ctx, resp) if err != nil { return err } case mysql.AuthTiDBSM3Password: - resp.Auth, err = cc.authSM3(ctx) + resp.Auth, err = cc.authSM3(ctx, resp) if err != nil { return err } @@ -727,7 +727,7 @@ func (cc *clientConn) handleAuthPlugin(ctx context.Context, resp *handshakeRespo } // authSha implements the caching_sha2_password specific part of the protocol. -func (cc *clientConn) authSha(ctx context.Context) ([]byte, error) { +func (cc *clientConn) authSha(ctx context.Context, resp handshakeResponse41) ([]byte, error) { const ( shaCommand = 1 requestRsaPubKey = 2 // Not supported yet, only TLS is supported as secure channel. @@ -735,6 +735,13 @@ func (cc *clientConn) authSha(ctx context.Context) ([]byte, error) { fastAuthFail = 4 ) + // If no password is specified, we don't send the FastAuthFail to do the full authentication + // as that doesn't make sense without a password and confuses the client. + // https://github.com/pingcap/tidb/issues/40831 + if len(resp.Auth) == 0 { + return []byte{}, nil + } + // Currently we always send a "FastAuthFail" as the cached part of the protocol isn't implemented yet. // This triggers the client to send the full response. err := cc.writePacket([]byte{0, 0, 0, 0, shaCommand, fastAuthFail}) @@ -757,8 +764,16 @@ func (cc *clientConn) authSha(ctx context.Context) ([]byte, error) { } // authSM3 implements the tidb_sm3_password specific part of the protocol. -func (cc *clientConn) authSM3(ctx context.Context) ([]byte, error) { - err := cc.writePacket([]byte{0, 0, 0, 0, 1, 4}) +// tidb_sm3_password is very similar to caching_sha2_password. +func (cc *clientConn) authSM3(ctx context.Context, resp handshakeResponse41) ([]byte, error) { + // If no password is specified, we don't send the FastAuthFail to do the full authentication + // as that doesn't make sense without a password and confuses the client. + // https://github.com/pingcap/tidb/issues/40831 + if len(resp.Auth) == 0 { + return []byte{}, nil + } + + err := cc.writePacket([]byte{0, 0, 0, 0, 1, 4}) // fastAuthFail if err != nil { logutil.Logger(ctx).Error("authSM3 packet write failed", zap.Error(err)) return nil, err diff --git a/server/conn_test.go b/server/conn_test.go index 9f8033cd1f98d..fb1cd15102129 100644 --- a/server/conn_test.go +++ b/server/conn_test.go @@ -1806,3 +1806,48 @@ func TestExtensionChangeUser(t *testing.T) { require.Equal(t, expectedConnInfo.Error, logInfo.Error) require.Equal(t, *(expectedConnInfo.ConnectionInfo), *(logInfo.ConnectionInfo)) } + +func TestAuthSha(t *testing.T) { + store := testkit.CreateMockStore(t) + + var outBuffer bytes.Buffer + tidbdrv := NewTiDBDriver(store) + cfg := newTestConfig() + cfg.Port, cfg.Status.StatusPort = 0, 0 + cfg.Status.ReportStatus = false + server, err := NewServer(cfg, tidbdrv) + require.NoError(t, err) + defer server.Close() + + cc := &clientConn{ + connectionID: 1, + salt: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14}, + server: server, + pkt: &packetIO{ + bufWriter: bufio.NewWriter(&outBuffer), + }, + collation: mysql.DefaultCollationID, + peerHost: "localhost", + alloc: arena.NewAllocator(512), + chunkAlloc: chunk.NewAllocator(), + capability: mysql.ClientProtocol41, + } + + tk := testkit.NewTestKit(t, store) + ctx := &TiDBContext{Session: tk.Session()} + cc.setCtx(ctx) + + resp := handshakeResponse41{ + Capability: mysql.ClientProtocol41 | mysql.ClientPluginAuth, + AuthPlugin: mysql.AuthCachingSha2Password, + Auth: []byte{}, // No password + } + + authData, err := cc.authSha(context.Background(), resp) + require.NoError(t, err) + + // If no password is specified authSha() should return an empty byte slice + // which differs from when a password is specified as that should trigger + // fastAuthFail and the rest of the auth process. + require.Equal(t, authData, []byte{}) +} diff --git a/server/http_handler.go b/server/http_handler.go index 6044f82861386..2855186b9cdfa 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -206,7 +206,7 @@ func (t *tikvHandlerTool) getHandle(tb table.PhysicalTable, params map[string]st return handle, nil } -func (t *tikvHandlerTool) getMvccByIdxValue(idx table.Index, values url.Values, idxCols []*model.ColumnInfo, handle kv.Handle) (*helper.MvccKV, error) { +func (t *tikvHandlerTool) getMvccByIdxValue(idx table.Index, values url.Values, idxCols []*model.ColumnInfo, handle kv.Handle) ([]*helper.MvccKV, error) { sc := new(stmtctx.StatementContext) // HTTP request is not a database session, set timezone to UTC directly here. // See https://github.com/pingcap/tidb/blob/master/docs/tidb_http_api.md for more details. @@ -227,7 +227,18 @@ func (t *tikvHandlerTool) getMvccByIdxValue(idx table.Index, values url.Values, if err != nil { return nil, err } - return &helper.MvccKV{Key: strings.ToUpper(hex.EncodeToString(encodedKey)), RegionID: regionID, Value: data}, err + idxData := &helper.MvccKV{Key: strings.ToUpper(hex.EncodeToString(encodedKey)), RegionID: regionID, Value: data} + tablecodec.IndexKey2TempIndexKey(idx.Meta().ID, encodedKey) + data, err = t.GetMvccByEncodedKey(encodedKey) + if err != nil { + return nil, err + } + regionID, err = t.getRegionIDByKey(encodedKey) + if err != nil { + return nil, err + } + tempIdxData := &helper.MvccKV{Key: strings.ToUpper(hex.EncodeToString(encodedKey)), RegionID: regionID, Value: data} + return append([]*helper.MvccKV{}, idxData, tempIdxData), err } // formValue2DatumRow converts URL query string to a Datum Row. diff --git a/server/http_handler_test.go b/server/http_handler_test.go index 7d2b8c3867bf5..c28967abf06ee 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -539,16 +539,16 @@ partition by range (a) func decodeKeyMvcc(closer io.ReadCloser, t *testing.T, valid bool) { decoder := json.NewDecoder(closer) - var data helper.MvccKV + var data []helper.MvccKV err := decoder.Decode(&data) require.NoError(t, err) if valid { - require.NotNil(t, data.Value.Info) - require.Greater(t, len(data.Value.Info.Writes), 0) + require.NotNil(t, data[0].Value.Info) + require.Greater(t, len(data[0].Value.Info.Writes), 0) } else { - require.Nil(t, data.Value.Info.Lock) - require.Nil(t, data.Value.Info.Writes) - require.Nil(t, data.Value.Info.Values) + require.Nil(t, data[0].Value.Info.Lock) + require.Nil(t, data[0].Value.Info.Writes) + require.Nil(t, data[0].Value.Info.Values) } } diff --git a/server/plan_replayer.go b/server/plan_replayer.go index 30f7c4ae821c1..d5265a21e7939 100644 --- a/server/plan_replayer.go +++ b/server/plan_replayer.go @@ -276,7 +276,7 @@ func loadSQLMetaFile(z *zip.Reader) (uint64, error) { } //nolint: errcheck,all_revive defer v.Close() - _, err = toml.DecodeReader(v, &varMap) + _, err = toml.NewDecoder(v).Decode(&varMap) if err != nil { return 0, errors.AddStack(err) } diff --git a/session/bootstrap.go b/session/bootstrap.go index 639ce2c5192bb..14c3c2c713cfb 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -516,6 +516,27 @@ const ( created_time timestamp NOT NULL, primary key(job_id, scan_id), key(created_time));` + + // CreateTTLJobHistory is a table that stores ttl job's history + CreateTTLJobHistory = `CREATE TABLE IF NOT EXISTS mysql.tidb_ttl_job_history ( + job_id varchar(64) PRIMARY KEY, + table_id bigint(64) NOT NULL, + parent_table_id bigint(64) NOT NULL, + table_schema varchar(64) NOT NULL, + table_name varchar(64) NOT NULL, + partition_name varchar(64) DEFAULT NULL, + create_time timestamp NOT NULL, + finish_time timestamp NOT NULL, + ttl_expire timestamp NOT NULL, + summary_text text, + expired_rows bigint(64) DEFAULT NULL, + deleted_rows bigint(64) DEFAULT NULL, + error_delete_rows bigint(64) DEFAULT NULL, + status varchar(64) NOT NULL, + key(table_schema, table_name, create_time), + key(parent_table_id, create_time), + key(create_time) + );` ) // bootstrap initiates system DB for a store. @@ -757,7 +778,7 @@ const ( version109 = 109 // version110 sets tidb_enable_gc_aware_memory_track to off when a cluster upgrades from some version lower than v6.5.0. version110 = 110 - // version111 adds the table tidb_ttl_task + // version111 adds the table tidb_ttl_task and tidb_ttl_job_history version111 = 111 ) @@ -2239,6 +2260,7 @@ func upgradeToVer111(s Session, ver int64) { return } doReentrantDDL(s, CreateTTLTask) + doReentrantDDL(s, CreateTTLJobHistory) } func writeOOMAction(s Session) { @@ -2349,6 +2371,8 @@ func doDDLWorks(s Session) { mustExecute(s, CreateTTLTableStatus) // Create tidb_ttl_task table mustExecute(s, CreateTTLTask) + // Create tidb_ttl_job_history table + mustExecute(s, CreateTTLJobHistory) } // doBootstrapSQLFile executes SQL commands in a file as the last stage of bootstrap. diff --git a/session/session.go b/session/session.go index 214c56c08cc93..ee7bfab94e325 100644 --- a/session/session.go +++ b/session/session.go @@ -466,8 +466,7 @@ func (s *session) GetPlanCache(isNonPrepared bool) sessionctx.PlanCache { } if s.nonPreparedPlanCache == nil { // lazy construction s.nonPreparedPlanCache = plannercore.NewLRUPlanCache(uint(s.GetSessionVars().NonPreparedPlanCacheSize), - variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), - plannercore.PickPlanFromBucket, s) + variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), s) } return s.nonPreparedPlanCache } @@ -478,8 +477,7 @@ func (s *session) GetPlanCache(isNonPrepared bool) sessionctx.PlanCache { } if s.preparedPlanCache == nil { // lazy construction s.preparedPlanCache = plannercore.NewLRUPlanCache(uint(s.GetSessionVars().PreparedPlanCacheSize), - variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), - plannercore.PickPlanFromBucket, s) + variable.PreparedPlanCacheMemoryGuardRatio.Load(), plannercore.PreparedPlanCacheMaxMemory.Load(), s) } return s.preparedPlanCache } diff --git a/sessionctx/binloginfo/BUILD.bazel b/sessionctx/binloginfo/BUILD.bazel index 7a843495273ea..3c345cd043469 100644 --- a/sessionctx/binloginfo/BUILD.bazel +++ b/sessionctx/binloginfo/BUILD.bazel @@ -54,6 +54,7 @@ go_test( "@com_github_pingcap_tipb//go-binlog", "@com_github_stretchr_testify//require", "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials/insecure", "@org_uber_go_goleak//:goleak", ], ) diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go index 28235b5184b68..f6ca6bc059db8 100644 --- a/sessionctx/binloginfo/binloginfo_test.go +++ b/sessionctx/binloginfo/binloginfo_test.go @@ -45,6 +45,7 @@ import ( "github.com/pingcap/tipb/go-binlog" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) type mockBinlogPump struct { @@ -100,7 +101,7 @@ func createBinlogSuite(t *testing.T) (s *binlogSuite) { opt := grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) }) - clientCon, err := grpc.Dial(unixFile, opt, grpc.WithInsecure()) + clientCon, err := grpc.Dial(unixFile, opt, grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) require.NotNil(t, clientCon) diff --git a/sessionctx/variable/featuretag/distributereorg/BUILD.bazel b/sessionctx/variable/featuretag/distributereorg/BUILD.bazel index 153ce052ecbb2..f31f9ddd6e2d8 100644 --- a/sessionctx/variable/featuretag/distributereorg/BUILD.bazel +++ b/sessionctx/variable/featuretag/distributereorg/BUILD.bazel @@ -4,7 +4,7 @@ go_library( name = "distributereorg", srcs = [ "default.go", - "non_default.go", + "non_default.go", #keep ], importpath = "github.com/pingcap/tidb/sessionctx/variable/featuretag/distributereorg", visibility = ["//visibility:public"], diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 0b6e7ef0cb0d0..eb6eeadc72d79 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -1253,6 +1253,9 @@ type SessionVars struct { // PreparedPlanCacheMonitor indicates whether to enable prepared plan cache monitor. EnablePreparedPlanCacheMemoryMonitor bool + // EnablePlanCacheForParamLimit controls whether the prepare statement with parameterized limit can be cached + EnablePlanCacheForParamLimit bool + // EnableNonPreparedPlanCache indicates whether to enable non-prepared plan cache. EnableNonPreparedPlanCache bool diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 909c3b7c7c415..de0e5ffc90bd3 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -731,7 +731,7 @@ var defaultSysVars = []*SysVar{ return nil }}, {Scope: ScopeGlobal, Name: TiDBEnableTelemetry, Value: BoolToOnOff(DefTiDBEnableTelemetry), Type: TypeBool}, - {Scope: ScopeGlobal, Name: TiDBEnableHistoricalStats, Value: Off, Type: TypeBool}, + {Scope: ScopeGlobal, Name: TiDBEnableHistoricalStats, Value: On, Type: TypeBool}, /* tikv gc metrics */ {Scope: ScopeGlobal, Name: TiDBGCEnable, Value: On, Type: TypeBool, GetGlobal: func(_ context.Context, s *SessionVars) (string, error) { return getTiDBTableValue(s, "tikv_gc_enable", On) @@ -1188,12 +1188,29 @@ var defaultSysVars = []*SysVar{ /* The system variables below have GLOBAL and SESSION scope */ {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePlanReplayerContinuesCapture, Value: BoolToOnOff(false), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + historicalStatsEnabled, err := s.GlobalVarsAccessor.GetGlobalSysVar(TiDBEnableHistoricalStats) + if err != nil { + return err + } + if !TiDBOptOn(historicalStatsEnabled) && TiDBOptOn(val) { + return errors.Errorf("%v should be enabled before enabling %v", TiDBEnableHistoricalStats, TiDBEnablePlanReplayerContinuesCapture) + } s.EnablePlanReplayedContinuesCapture = TiDBOptOn(val) return nil }, GetSession: func(vars *SessionVars) (string, error) { return BoolToOnOff(vars.EnablePlanReplayedContinuesCapture), nil }, + Validation: func(vars *SessionVars, s string, s2 string, flag ScopeFlag) (string, error) { + historicalStatsEnabled, err := vars.GlobalVarsAccessor.GetGlobalSysVar(TiDBEnableHistoricalStats) + if err != nil { + return "", err + } + if !TiDBOptOn(historicalStatsEnabled) && TiDBOptOn(s) { + return "", errors.Errorf("%v should be enabled before enabling %v", TiDBEnableHistoricalStats, TiDBEnablePlanReplayerContinuesCapture) + } + return s, nil + }, }, {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePlanReplayerCapture, Value: BoolToOnOff(true), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { @@ -2267,6 +2284,10 @@ var defaultSysVars = []*SysVar{ s.PessimisticTransactionAggressiveLocking = TiDBOptOn(val) return nil }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePlanCacheForParamLimit, Value: BoolToOnOff(DefTiDBEnablePlanCacheForParamLimit), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.EnablePlanCacheForParamLimit = TiDBOptOn(val) + return nil + }}, } // FeedbackProbability points to the FeedbackProbability in statistics package. diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index c147fdda69ba7..c86795937d544 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -796,6 +796,9 @@ const ( // TiDBPessimisticTransactionAggressiveLocking controls whether aggressive locking for pessimistic transaction // is enabled. TiDBPessimisticTransactionAggressiveLocking = "tidb_pessimistic_txn_aggressive_locking" + + // TiDBEnablePlanCacheForParamLimit controls whether prepare statement with parameterized limit can be cached + TiDBEnablePlanCacheForParamLimit = "tidb_enable_plan_cache_for_param_limit" ) // TiDB vars that have only global scope @@ -1167,6 +1170,7 @@ const ( DefTiDBTTLDeleteWorkerCount = 4 DefTiDBEnableResourceControl = false DefTiDBPessimisticTransactionAggressiveLocking = false + DefTiDBEnablePlanCacheForParamLimit = true ) // Process global variables. diff --git a/statistics/BUILD.bazel b/statistics/BUILD.bazel index e6992020197c3..8dccd523fc887 100644 --- a/statistics/BUILD.bazel +++ b/statistics/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "fmsketch.go", "histogram.go", "index.go", + "interact_with_storage.go", "merge_worker.go", "row_sampler.go", "sample.go", diff --git a/statistics/handle/dump.go b/statistics/handle/dump.go index 81e982881ee83..da8603ea90573 100644 --- a/statistics/handle/dump.go +++ b/statistics/handle/dump.go @@ -263,7 +263,7 @@ func (h *Handle) tableHistoricalStatsToJSON(physicalID int64, snapshot uint64) ( }() // get meta version - rows, _, err := reader.read("select distinct version from mysql.stats_meta_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) + rows, _, err := reader.Read("select distinct version from mysql.stats_meta_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) if err != nil { return nil, errors.AddStack(err) } @@ -272,14 +272,14 @@ func (h *Handle) tableHistoricalStatsToJSON(physicalID int64, snapshot uint64) ( } statsMetaVersion := rows[0].GetInt64(0) // get stats meta - rows, _, err = reader.read("select modify_count, count from mysql.stats_meta_history where table_id = %? and version = %?", physicalID, statsMetaVersion) + rows, _, err = reader.Read("select modify_count, count from mysql.stats_meta_history where table_id = %? and version = %?", physicalID, statsMetaVersion) if err != nil { return nil, errors.AddStack(err) } modifyCount, count := rows[0].GetInt64(0), rows[0].GetInt64(1) // get stats version - rows, _, err = reader.read("select distinct version from mysql.stats_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) + rows, _, err = reader.Read("select distinct version from mysql.stats_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) if err != nil { return nil, errors.AddStack(err) } @@ -289,7 +289,7 @@ func (h *Handle) tableHistoricalStatsToJSON(physicalID int64, snapshot uint64) ( statsVersion := rows[0].GetInt64(0) // get stats - rows, _, err = reader.read("select stats_data from mysql.stats_history where table_id = %? and version = %? order by seq_no", physicalID, statsVersion) + rows, _, err = reader.Read("select stats_data from mysql.stats_history where table_id = %? and version = %? order by seq_no", physicalID, statsVersion) if err != nil { return nil, errors.AddStack(err) } diff --git a/statistics/handle/gc.go b/statistics/handle/gc.go index f16e2c9719088..af20eeb35a363 100644 --- a/statistics/handle/gc.go +++ b/statistics/handle/gc.go @@ -53,8 +53,11 @@ func (h *Handle) GCStats(is infoschema.InfoSchema, ddlLease time.Duration) error if err := h.gcTableStats(is, row.GetInt64(0)); err != nil { return errors.Trace(err) } - if err := h.gcHistoryStatsFromKV(row.GetInt64(0)); err != nil { - return errors.Trace(err) + _, existed := is.TableByID(row.GetInt64(0)) + if !existed { + if err := h.gcHistoryStatsFromKV(row.GetInt64(0)); err != nil { + return errors.Trace(err) + } } } if err := h.ClearOutdatedHistoryStats(); err != nil { diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index fc4f86dc54fb8..fd4b32fdb3c10 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -1067,7 +1067,7 @@ func (h *Handle) LoadNeededHistograms() (err error) { return nil } -func (h *Handle) loadNeededColumnHistograms(reader *statsReader, col model.TableItemID, loadFMSketch bool) (err error) { +func (h *Handle) loadNeededColumnHistograms(reader *statistics.StatsReader, col model.TableItemID, loadFMSketch bool) (err error) { oldCache := h.statsCache.Load().(statsCache) tbl, ok := oldCache.Get(col.TableID) if !ok { @@ -1093,7 +1093,7 @@ func (h *Handle) loadNeededColumnHistograms(reader *statsReader, col model.Table return errors.Trace(err) } } - rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where is_index = 0 and table_id = %? and hist_id = %?", col.TableID, col.ID) + rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where is_index = 0 and table_id = %? and hist_id = %?", col.TableID, col.ID) if err != nil { return errors.Trace(err) } @@ -1134,7 +1134,7 @@ func (h *Handle) loadNeededColumnHistograms(reader *statsReader, col model.Table return nil } -func (h *Handle) loadNeededIndexHistograms(reader *statsReader, idx model.TableItemID, loadFMSketch bool) (err error) { +func (h *Handle) loadNeededIndexHistograms(reader *statistics.StatsReader, idx model.TableItemID, loadFMSketch bool) (err error) { oldCache := h.statsCache.Load().(statsCache) tbl, ok := oldCache.Get(idx.TableID) if !ok { @@ -1160,7 +1160,7 @@ func (h *Handle) loadNeededIndexHistograms(reader *statsReader, idx model.TableI return errors.Trace(err) } } - rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where is_index = 1 and table_id = %? and hist_id = %?", idx.TableID, idx.ID) + rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where is_index = 1 and table_id = %? and hist_id = %?", idx.TableID, idx.ID) if err != nil { return errors.Trace(err) } @@ -1214,12 +1214,12 @@ func (h *Handle) FlushStats() { } } -func (h *Handle) cmSketchAndTopNFromStorage(reader *statsReader, tblID int64, isIndex, histID int64) (_ *statistics.CMSketch, _ *statistics.TopN, err error) { - topNRows, _, err := reader.read("select HIGH_PRIORITY value, count from mysql.stats_top_n where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) +func (h *Handle) cmSketchAndTopNFromStorage(reader *statistics.StatsReader, tblID int64, isIndex, histID int64) (_ *statistics.CMSketch, _ *statistics.TopN, err error) { + topNRows, _, err := reader.Read("select HIGH_PRIORITY value, count from mysql.stats_top_n where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) if err != nil { return nil, nil, err } - rows, _, err := reader.read("select cm_sketch from mysql.stats_histograms where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) + rows, _, err := reader.Read("select cm_sketch from mysql.stats_histograms where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) if err != nil { return nil, nil, err } @@ -1229,15 +1229,15 @@ func (h *Handle) cmSketchAndTopNFromStorage(reader *statsReader, tblID int64, is return statistics.DecodeCMSketchAndTopN(rows[0].GetBytes(0), topNRows) } -func (h *Handle) fmSketchFromStorage(reader *statsReader, tblID int64, isIndex, histID int64) (_ *statistics.FMSketch, err error) { - rows, _, err := reader.read("select value from mysql.stats_fm_sketch where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) +func (h *Handle) fmSketchFromStorage(reader *statistics.StatsReader, tblID int64, isIndex, histID int64) (_ *statistics.FMSketch, err error) { + rows, _, err := reader.Read("select value from mysql.stats_fm_sketch where table_id = %? and is_index = %? and hist_id = %?", tblID, isIndex, histID) if err != nil || len(rows) == 0 { return nil, err } return statistics.DecodeFMSketch(rows[0].GetBytes(0)) } -func (h *Handle) indexStatsFromStorage(reader *statsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo) error { +func (h *Handle) indexStatsFromStorage(reader *statistics.StatsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo) error { histID := row.GetInt64(2) distinct := row.GetInt64(3) histVer := row.GetUint64(4) @@ -1247,7 +1247,7 @@ func (h *Handle) indexStatsFromStorage(reader *statsReader, row chunk.Row, table errorRate := statistics.ErrorRate{} flag := row.GetInt64(8) lastAnalyzePos := row.GetDatum(10, types.NewFieldType(mysql.TypeBlob)) - if statistics.IsAnalyzed(flag) && !reader.isHistory() { + if statistics.IsAnalyzed(flag) && !reader.IsHistory() { h.mu.rateMap.clear(table.PhysicalID, histID, true) } else if idx != nil { errorRate = idx.ErrorRate @@ -1295,7 +1295,7 @@ func (h *Handle) indexStatsFromStorage(reader *statsReader, row chunk.Row, table return nil } -func (h *Handle) columnStatsFromStorage(reader *statsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo, loadAll bool) error { +func (h *Handle) columnStatsFromStorage(reader *statistics.StatsReader, row chunk.Row, table *statistics.Table, tableInfo *model.TableInfo, loadAll bool) error { histID := row.GetInt64(2) distinct := row.GetInt64(3) histVer := row.GetUint64(4) @@ -1307,7 +1307,7 @@ func (h *Handle) columnStatsFromStorage(reader *statsReader, row chunk.Row, tabl col := table.Columns[histID] errorRate := statistics.ErrorRate{} flag := row.GetInt64(8) - if statistics.IsAnalyzed(flag) && !reader.isHistory() { + if statistics.IsAnalyzed(flag) && !reader.IsHistory() { h.mu.rateMap.clear(table.PhysicalID, histID, false) } else if col != nil { errorRate = col.ErrorRate @@ -1439,14 +1439,14 @@ func (h *Handle) TableStatsFromStorage(tableInfo *model.TableInfo, physicalID in } table.Pseudo = false - rows, _, err := reader.read("select modify_count, count from mysql.stats_meta where table_id = %?", physicalID) + rows, _, err := reader.Read("select modify_count, count from mysql.stats_meta where table_id = %?", physicalID) if err != nil || len(rows) == 0 { return nil, err } table.ModifyCount = rows[0].GetInt64(0) table.Count = rows[0].GetInt64(1) - rows, _, err = reader.read("select table_id, is_index, hist_id, distinct_count, version, null_count, tot_col_size, stats_ver, flag, correlation, last_analyze_pos from mysql.stats_histograms where table_id = %?", physicalID) + rows, _, err = reader.Read("select table_id, is_index, hist_id, distinct_count, version, null_count, tot_col_size, stats_ver, flag, correlation, last_analyze_pos from mysql.stats_histograms where table_id = %?", physicalID) // Check deleted table. if err != nil || len(rows) == 0 { return nil, nil @@ -1464,7 +1464,7 @@ func (h *Handle) TableStatsFromStorage(tableInfo *model.TableInfo, physicalID in return h.extendedStatsFromStorage(reader, table, physicalID, loadAll) } -func (h *Handle) extendedStatsFromStorage(reader *statsReader, table *statistics.Table, physicalID int64, loadAll bool) (*statistics.Table, error) { +func (h *Handle) extendedStatsFromStorage(reader *statistics.StatsReader, table *statistics.Table, physicalID int64, loadAll bool) (*statistics.Table, error) { failpoint.Inject("injectExtStatsLoadErr", func() { failpoint.Return(nil, errors.New("gofail extendedStatsFromStorage error")) }) @@ -1474,7 +1474,7 @@ func (h *Handle) extendedStatsFromStorage(reader *statsReader, table *statistics } else { table.ExtendedStats = statistics.NewExtendedStatsColl() } - rows, _, err := reader.read("select name, status, type, column_ids, stats, version from mysql.stats_extended where table_id = %? and status in (%?, %?, %?) and version > %?", physicalID, StatsStatusInited, StatsStatusAnalyzed, StatsStatusDeleted, lastVersion) + rows, _, err := reader.Read("select name, status, type, column_ids, stats, version from mysql.stats_extended where table_id = %? and status in (%?, %?, %?) and version > %?", physicalID, StatsStatusInited, StatsStatusAnalyzed, StatsStatusDeleted, lastVersion) if err != nil || len(rows) == 0 { return table, nil } @@ -1525,7 +1525,7 @@ func (h *Handle) StatsMetaCountAndModifyCount(tableID int64) (int64, int64, erro err = err1 } }() - rows, _, err := reader.read("select count, modify_count from mysql.stats_meta where table_id = %?", tableID) + rows, _, err := reader.Read("select count, modify_count from mysql.stats_meta where table_id = %?", tableID) if err != nil { return 0, 0, err } @@ -1634,6 +1634,7 @@ func SaveTableStatsToStorage(sctx sessionctx.Context, results *statistics.Analyz logutil.BgLogger().Error("record historical stats meta failed", zap.Int64("table-id", tableID), zap.Uint64("version", statsVer), + zap.String("source", source), zap.Error(err1)) } } @@ -1913,8 +1914,8 @@ func (h *Handle) SaveMetaToStorage(tableID, count, modifyCount int64, source str return err } -func (h *Handle) histogramFromStorage(reader *statsReader, tableID int64, colID int64, tp *types.FieldType, distinct int64, isIndex int, ver uint64, nullCount int64, totColSize int64, corr float64) (_ *statistics.Histogram, err error) { - rows, fields, err := reader.read("select count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where table_id = %? and is_index = %? and hist_id = %? order by bucket_id", tableID, isIndex, colID) +func (h *Handle) histogramFromStorage(reader *statistics.StatsReader, tableID int64, colID int64, tp *types.FieldType, distinct int64, isIndex int, ver uint64, nullCount int64, totColSize int64, corr float64) (_ *statistics.Histogram, err error) { + rows, fields, err := reader.Read("select count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets where table_id = %? and is_index = %? and hist_id = %? order by bucket_id", tableID, isIndex, colID) if err != nil { return nil, errors.Trace(err) } @@ -1961,9 +1962,9 @@ func (h *Handle) histogramFromStorage(reader *statsReader, tableID int64, colID return hg, nil } -func (h *Handle) columnCountFromStorage(reader *statsReader, tableID, colID, statsVer int64) (int64, error) { +func (h *Handle) columnCountFromStorage(reader *statistics.StatsReader, tableID, colID, statsVer int64) (int64, error) { count := int64(0) - rows, _, err := reader.read("select sum(count) from mysql.stats_buckets where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) + rows, _, err := reader.Read("select sum(count) from mysql.stats_buckets where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) if err != nil { return 0, errors.Trace(err) } @@ -1979,7 +1980,7 @@ func (h *Handle) columnCountFromStorage(reader *statsReader, tableID, colID, sta // Before stats ver 2, histogram represents all data in this column. // In stats ver 2, histogram + TopN represent all data in this column. // So we need to add TopN total count here. - rows, _, err = reader.read("select sum(count) from mysql.stats_top_n where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) + rows, _, err = reader.Read("select sum(count) from mysql.stats_top_n where table_id = %? and is_index = 0 and hist_id = %?", tableID, colID) if err != nil { return 0, errors.Trace(err) } @@ -2014,26 +2015,7 @@ func (h *Handle) statsMetaByTableIDFromStorage(tableID int64, snapshot uint64) ( return } -// statsReader is used for simplify code that needs to read system tables in different sqls -// but requires the same transactions. -type statsReader struct { - ctx sqlexec.RestrictedSQLExecutor - snapshot uint64 -} - -func (sr *statsReader) read(sql string, args ...interface{}) (rows []chunk.Row, fields []*ast.ResultField, err error) { - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) - if sr.snapshot > 0 { - return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool, sqlexec.ExecOptionWithSnapshot(sr.snapshot)}, sql, args...) - } - return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, args...) -} - -func (sr *statsReader) isHistory() bool { - return sr.snapshot > 0 -} - -func (h *Handle) getGlobalStatsReader(snapshot uint64) (reader *statsReader, err error) { +func (h *Handle) getGlobalStatsReader(snapshot uint64) (reader *statistics.StatsReader, err error) { h.mu.Lock() defer func() { if r := recover(); r != nil { @@ -2043,44 +2025,12 @@ func (h *Handle) getGlobalStatsReader(snapshot uint64) (reader *statsReader, err h.mu.Unlock() } }() - return h.getStatsReader(snapshot, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) + return statistics.GetStatsReader(snapshot, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) } -func (h *Handle) releaseGlobalStatsReader(reader *statsReader) error { +func (h *Handle) releaseGlobalStatsReader(reader *statistics.StatsReader) error { defer h.mu.Unlock() - return h.releaseStatsReader(reader, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) -} - -func (h *Handle) getStatsReader(snapshot uint64, exec sqlexec.RestrictedSQLExecutor) (reader *statsReader, err error) { - failpoint.Inject("mockGetStatsReaderFail", func(val failpoint.Value) { - if val.(bool) { - failpoint.Return(nil, errors.New("gofail genStatsReader error")) - } - }) - if snapshot > 0 { - return &statsReader{ctx: exec, snapshot: snapshot}, nil - } - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("getStatsReader panic %v", r) - } - }() - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) - failpoint.Inject("mockGetStatsReaderPanic", nil) - _, err = exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") - if err != nil { - return nil, err - } - return &statsReader{ctx: exec}, nil -} - -func (h *Handle) releaseStatsReader(reader *statsReader, exec sqlexec.RestrictedSQLExecutor) error { - if reader.snapshot > 0 { - return nil - } - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) - _, err := exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") - return err + return reader.Close() } const ( diff --git a/statistics/handle/handle_hist.go b/statistics/handle/handle_hist.go index ad04d946e3f22..1d41e14791446 100644 --- a/statistics/handle/handle_hist.go +++ b/statistics/handle/handle_hist.go @@ -177,7 +177,7 @@ var errExit = errors.New("Stop loading since domain is closed") // StatsReaderContext exported for testing type StatsReaderContext struct { - reader *statsReader + reader *statistics.StatsReader createdTime time.Time } @@ -188,7 +188,7 @@ func (h *Handle) SubLoadWorker(ctx sessionctx.Context, exit chan struct{}, exitW exitWg.Done() logutil.BgLogger().Info("SubLoadWorker exited.") if readerCtx.reader != nil { - err := h.releaseStatsReader(readerCtx.reader, ctx.(sqlexec.RestrictedSQLExecutor)) + err := readerCtx.reader.Close() if err != nil { logutil.BgLogger().Error("Fail to release stats loader: ", zap.Error(err)) } @@ -295,13 +295,13 @@ func (h *Handle) handleOneItemTask(task *NeededItemTask, readerCtx *StatsReaderC func (h *Handle) loadFreshStatsReader(readerCtx *StatsReaderContext, ctx sqlexec.RestrictedSQLExecutor) { if readerCtx.reader == nil || readerCtx.createdTime.Add(h.Lease()).Before(time.Now()) { if readerCtx.reader != nil { - err := h.releaseStatsReader(readerCtx.reader, ctx) + err := readerCtx.reader.Close() if err != nil { logutil.BgLogger().Warn("Fail to release stats loader: ", zap.Error(err)) } } for { - newReader, err := h.getStatsReader(0, ctx) + newReader, err := statistics.GetStatsReader(0, ctx) if err != nil { logutil.BgLogger().Error("Fail to new stats loader, retry after a while.", zap.Error(err)) time.Sleep(h.Lease() / 10) @@ -317,7 +317,7 @@ func (h *Handle) loadFreshStatsReader(readerCtx *StatsReaderContext, ctx sqlexec } // readStatsForOneItem reads hist for one column/index, TODO load data via kv-get asynchronously -func (h *Handle) readStatsForOneItem(item model.TableItemID, w *statsWrapper, reader *statsReader) (*statsWrapper, error) { +func (h *Handle) readStatsForOneItem(item model.TableItemID, w *statsWrapper, reader *statistics.StatsReader) (*statsWrapper, error) { failpoint.Inject("mockReadStatsForOnePanic", nil) failpoint.Inject("mockReadStatsForOneFail", func(val failpoint.Value) { if val.(bool) { @@ -357,7 +357,7 @@ func (h *Handle) readStatsForOneItem(item model.TableItemID, w *statsWrapper, re return nil, errors.Trace(err) } } - rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where table_id = %? and hist_id = %? and is_index = %?", item.TableID, item.ID, int(isIndexFlag)) + rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where table_id = %? and hist_id = %? and is_index = %?", item.TableID, item.ID, int(isIndexFlag)) if err != nil { return nil, errors.Trace(err) } diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index 2b0669033f8c9..dc399a87fcad3 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -622,16 +622,16 @@ func TestLoadStats(t *testing.T) { require.True(t, idx.IsFullLoad()) // Following test tests whether the LoadNeededHistograms would panic. - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderFail", `return(true)`)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/mockGetStatsReaderFail", `return(true)`)) err = h.LoadNeededHistograms() require.Error(t, err) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderFail")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/mockGetStatsReaderFail")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderPanic", "panic")) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/mockGetStatsReaderPanic", "panic")) err = h.LoadNeededHistograms() require.Error(t, err) require.Regexp(t, ".*getStatsReader panic.*", err.Error()) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderPanic")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/statistics/mockGetStatsReaderPanic")) err = h.LoadNeededHistograms() require.NoError(t, err) } diff --git a/statistics/handle/historical_stats_handler.go b/statistics/handle/historical_stats_handler.go index c7a683da8b740..87b94b656899a 100644 --- a/statistics/handle/historical_stats_handler.go +++ b/statistics/handle/historical_stats_handler.go @@ -86,6 +86,7 @@ func (h *Handle) recordHistoricalStatsMeta(tableID int64, version uint64, source logutil.BgLogger().Error("record historical stats meta failed", zap.Int64("table-id", tableID), zap.Uint64("version", version), + zap.String("source", source), zap.Error(err)) } } diff --git a/statistics/interact_with_storage.go b/statistics/interact_with_storage.go new file mode 100644 index 0000000000000..478b845937067 --- /dev/null +++ b/statistics/interact_with_storage.go @@ -0,0 +1,86 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "context" + "fmt" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/sqlexec" +) + +// StatsReader is used for simplifying code that needs to read statistics from system tables(mysql.stats_xxx) in different sqls +// but requires the same transactions. +// +// Note that: +// 1. Remember to call (*StatsReader).Close after reading all statistics. +// 2. StatsReader is not thread-safe. Different goroutines cannot call (*StatsReader).Read concurrently. +type StatsReader struct { + ctx sqlexec.RestrictedSQLExecutor + snapshot uint64 +} + +// GetStatsReader returns a StatsReader. +func GetStatsReader(snapshot uint64, exec sqlexec.RestrictedSQLExecutor) (reader *StatsReader, err error) { + failpoint.Inject("mockGetStatsReaderFail", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(nil, errors.New("gofail genStatsReader error")) + } + }) + if snapshot > 0 { + return &StatsReader{ctx: exec, snapshot: snapshot}, nil + } + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("getStatsReader panic %v", r) + } + }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + failpoint.Inject("mockGetStatsReaderPanic", nil) + _, err = exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") + if err != nil { + return nil, err + } + return &StatsReader{ctx: exec}, nil +} + +// Read is a thin wrapper reading statistics from storage by sql command. +func (sr *StatsReader) Read(sql string, args ...interface{}) (rows []chunk.Row, fields []*ast.ResultField, err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + if sr.snapshot > 0 { + return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool, sqlexec.ExecOptionWithSnapshot(sr.snapshot)}, sql, args...) + } + return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, args...) +} + +// IsHistory indicates whether to read history statistics. +func (sr *StatsReader) IsHistory() bool { + return sr.snapshot > 0 +} + +// Close closes the StatsReader. +func (sr *StatsReader) Close() error { + if sr.IsHistory() || sr.ctx == nil { + return nil + } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, err := sr.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") + return err +} diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 9f9d4ef6fb002..065fc3621dabd 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -567,7 +567,7 @@ func buildBatchCopTasksConsistentHash(bo *backoff.Backoffer, for i, ranges := range rangesForEachPhysicalTable { rangesLen += ranges.Len() - locations, err := cache.SplitKeyRangesByLocations(bo, ranges) + locations, err := cache.SplitKeyRangesByLocations(bo, ranges, UnspecifiedLimit) if err != nil { return nil, errors.Trace(err) } @@ -677,7 +677,7 @@ func buildBatchCopTasksCore(bo *backoff.Backoffer, store *kvStore, rangesForEach rangesLen = 0 for i, ranges := range rangesForEachPhysicalTable { rangesLen += ranges.Len() - locations, err := cache.SplitKeyRangesByLocations(bo, ranges) + locations, err := cache.SplitKeyRangesByLocations(bo, ranges, UnspecifiedLimit) if err != nil { return nil, errors.Trace(err) } diff --git a/store/copr/coprocessor_test.go b/store/copr/coprocessor_test.go index 36ae88758bbc5..f7b15ebfd682d 100644 --- a/store/copr/coprocessor_test.go +++ b/store/copr/coprocessor_test.go @@ -381,46 +381,51 @@ func TestSplitRegionRanges(t *testing.T) { bo := backoff.NewBackofferWithVars(context.Background(), 3000, nil) - ranges, err := cache.SplitRegionRanges(bo, BuildKeyRanges("a", "c")) + ranges, err := cache.SplitRegionRanges(bo, BuildKeyRanges("a", "c"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "a", "c") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("h", "y")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("h", "y"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 3) rangeEqual(t, ranges, "h", "n", "n", "t", "t", "y") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "z")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "z"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 2) rangeEqual(t, ranges, "s", "t", "t", "z") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "s")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("s", "s"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "s", "s") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "t")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "t"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "t", "t") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "u")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("t", "u"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "t", "u") - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("u", "z")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("u", "z"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 1) rangeEqual(t, ranges, "u", "z") // min --> max - ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("a", "z")) + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("a", "z"), UnspecifiedLimit) require.NoError(t, err) require.Len(t, ranges, 4) rangeEqual(t, ranges, "a", "g", "g", "n", "n", "t", "t", "z") + + ranges, err = cache.SplitRegionRanges(bo, BuildKeyRanges("a", "z"), 3) + require.NoError(t, err) + require.Len(t, ranges, 3) + rangeEqual(t, ranges, "a", "g", "g", "n", "n", "t") } func TestRebuild(t *testing.T) { diff --git a/store/copr/region_cache.go b/store/copr/region_cache.go index 97c3d705c223b..aa33656c39cca 100644 --- a/store/copr/region_cache.go +++ b/store/copr/region_cache.go @@ -42,10 +42,10 @@ func NewRegionCache(rc *tikv.RegionCache) *RegionCache { } // SplitRegionRanges gets the split ranges from pd region. -func (c *RegionCache) SplitRegionRanges(bo *Backoffer, keyRanges []kv.KeyRange) ([]kv.KeyRange, error) { +func (c *RegionCache) SplitRegionRanges(bo *Backoffer, keyRanges []kv.KeyRange, limit int) ([]kv.KeyRange, error) { ranges := NewKeyRanges(keyRanges) - locations, err := c.SplitKeyRangesByLocations(bo, ranges) + locations, err := c.SplitKeyRangesByLocations(bo, ranges, limit) if err != nil { return nil, derr.ToTiDBErr(err) } @@ -122,10 +122,16 @@ func (l *LocationKeyRanges) splitKeyRangesByBuckets() []*LocationKeyRanges { return res } +// UnspecifiedLimit means no limit. +const UnspecifiedLimit = -1 + // SplitKeyRangesByLocations splits the KeyRanges by logical info in the cache. -func (c *RegionCache) SplitKeyRangesByLocations(bo *Backoffer, ranges *KeyRanges) ([]*LocationKeyRanges, error) { +func (c *RegionCache) SplitKeyRangesByLocations(bo *Backoffer, ranges *KeyRanges, limit int) ([]*LocationKeyRanges, error) { res := make([]*LocationKeyRanges, 0) for ranges.Len() > 0 { + if limit != UnspecifiedLimit && len(res) >= limit { + break + } loc, err := c.LocateKey(bo.TiKVBackoffer(), ranges.At(0).StartKey) if err != nil { return res, derr.ToTiDBErr(err) @@ -176,7 +182,7 @@ func (c *RegionCache) SplitKeyRangesByLocations(bo *Backoffer, ranges *KeyRanges // // TODO(youjiali1995): Try to do it in one round and reduce allocations if bucket is not enabled. func (c *RegionCache) SplitKeyRangesByBuckets(bo *Backoffer, ranges *KeyRanges) ([]*LocationKeyRanges, error) { - locs, err := c.SplitKeyRangesByLocations(bo, ranges) + locs, err := c.SplitKeyRangesByLocations(bo, ranges, UnspecifiedLimit) if err != nil { return nil, derr.ToTiDBErr(err) } diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index ce1e9fb3ec8fa..1519f0845db21 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -1198,6 +1198,7 @@ func (w *GCWorker) resolveLocksForRange( failpoint.Inject("setGcResolveMaxBackoff", func(v failpoint.Value) { sleep := v.(int) // cooperate with github.com/tikv/client-go/v2/locate/invalidCacheAndRetry + //nolint: SA1029 ctx = context.WithValue(ctx, "injectedBackoff", struct{}{}) bo = tikv.NewBackofferWithVars(ctx, sleep, nil) }) diff --git a/store/mockstore/unistore/pd/BUILD.bazel b/store/mockstore/unistore/pd/BUILD.bazel index e57867b014e47..5d5563cbdc153 100644 --- a/store/mockstore/unistore/pd/BUILD.bazel +++ b/store/mockstore/unistore/pd/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "@com_github_pingcap_log//:log", "@com_github_tikv_pd_client//:client", "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials/insecure", "@org_uber_go_zap//:zap", ], ) diff --git a/store/mockstore/unistore/pd/client.go b/store/mockstore/unistore/pd/client.go index 55547e9461899..96d53eb8d54bf 100644 --- a/store/mockstore/unistore/pd/client.go +++ b/store/mockstore/unistore/pd/client.go @@ -29,6 +29,7 @@ import ( pd "github.com/tikv/pd/client" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) // Client is a PD (Placement Driver) client. @@ -231,7 +232,7 @@ func (c *client) getOrCreateConn(addr string) (*grpc.ClientConn, error) { if err != nil { return nil, err } - cc, err := grpc.Dial(u.Host, grpc.WithInsecure()) + cc, err := grpc.Dial(u.Host, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/store/mockstore/unistore/tikv/BUILD.bazel b/store/mockstore/unistore/tikv/BUILD.bazel index 91db06dd09dfa..bc7ecacf382fc 100644 --- a/store/mockstore/unistore/tikv/BUILD.bazel +++ b/store/mockstore/unistore/tikv/BUILD.bazel @@ -54,6 +54,7 @@ go_library( "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_pd_client//:client", "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], diff --git a/store/mockstore/unistore/tikv/deadlock.go b/store/mockstore/unistore/tikv/deadlock.go index 7eeb1fb2c5b64..7e8b0179e082d 100644 --- a/store/mockstore/unistore/tikv/deadlock.go +++ b/store/mockstore/unistore/tikv/deadlock.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/store/mockstore/unistore/util/lockwaiter" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) // Follower will send detection rpc to Leader @@ -100,7 +101,7 @@ func (dt *DetectorClient) rebuildStreamClient() error { if err != nil { return err } - cc, err := grpc.Dial(leaderAddr, grpc.WithInsecure()) + cc, err := grpc.Dial(leaderAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return err } diff --git a/telemetry/BUILD.bazel b/telemetry/BUILD.bazel index 1f032aa3f237a..a6c79f7de596f 100644 --- a/telemetry/BUILD.bazel +++ b/telemetry/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "id.go", "status.go", "telemetry.go", + "ttl.go", "util.go", ], importpath = "github.com/pingcap/tidb/telemetry", @@ -24,6 +25,7 @@ go_library( "//infoschema", "//kv", "//metrics", + "//parser/ast", "//parser/model", "//parser/mysql", "//sessionctx", diff --git a/telemetry/data_feature_usage.go b/telemetry/data_feature_usage.go index 81bf7a9785a3a..8661ce13ecccb 100644 --- a/telemetry/data_feature_usage.go +++ b/telemetry/data_feature_usage.go @@ -60,6 +60,7 @@ type featureUsage struct { AutoIDNoCache bool `json:"autoIDNoCache"` IndexMergeUsageCounter *m.IndexMergeUsageCounter `json:"indexMergeUsageCounter"` ResourceControlUsage *resourceControlUsage `json:"resourceControl"` + TTLUsage *ttlUsageCounter `json:"ttlUsage"` } type placementPolicyUsage struct { @@ -117,6 +118,8 @@ func getFeatureUsage(ctx context.Context, sctx sessionctx.Context) (*featureUsag usage.IndexMergeUsageCounter = getIndexMergeUsageInfo() + usage.TTLUsage = getTTLUsageInfo(ctx, sctx) + return &usage, nil } diff --git a/telemetry/data_feature_usage_test.go b/telemetry/data_feature_usage_test.go index 2895356955c09..e55342c9389ba 100644 --- a/telemetry/data_feature_usage_test.go +++ b/telemetry/data_feature_usage_test.go @@ -15,12 +15,16 @@ package telemetry_test import ( + "encoding/json" "fmt" + "strings" "testing" + "time" _ "github.com/pingcap/tidb/autoid_service" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/telemetry" "github.com/pingcap/tidb/testkit" @@ -622,3 +626,157 @@ func TestIndexMergeUsage(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), usage.IndexMergeUsageCounter.IndexMergeUsed) } + +func TestTTLTelemetry(t *testing.T) { + timeFormat := "2006-01-02 15:04:05" + dateFormat := "2006-01-02" + + now := time.Now() + curDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + if interval := curDate.Add(time.Hour * 24).Sub(now); interval > 0 && interval < 5*time.Minute { + // make sure testing is not running at the end of one day + time.Sleep(interval) + } + + store, do := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@global.tidb_ttl_job_enable=0") + + getTTLTable := func(name string) *model.TableInfo { + tbl, err := do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr(name)) + require.NoError(t, err) + require.NotNil(t, tbl.Meta().TTLInfo) + return tbl.Meta() + } + + jobIDIdx := 1 + insertTTLHistory := func(tblName string, partitionName string, createTime, finishTime, ttlExpire time.Time, scanError string, totalRows, errorRows int64, status string) { + defer func() { + jobIDIdx++ + }() + + tbl := getTTLTable(tblName) + tblID := tbl.ID + partitionID := tbl.ID + if partitionName != "" { + for _, def := range tbl.Partition.Definitions { + if def.Name.L == strings.ToLower(partitionName) { + partitionID = def.ID + } + } + require.NotEqual(t, tblID, partitionID) + } + + summary := make(map[string]interface{}) + summary["total_rows"] = totalRows + summary["success_rows"] = totalRows - errorRows + summary["error_rows"] = errorRows + summary["total_scan_task"] = 1 + summary["scheduled_scan_task"] = 1 + summary["finished_scan_task"] = 1 + if scanError != "" { + summary["scan_task_err"] = scanError + } + + summaryText, err := json.Marshal(summary) + require.NoError(t, err) + + tk.MustExec("insert into "+ + "mysql.tidb_ttl_job_history ("+ + " job_id, table_id, parent_table_id, table_schema, table_name, partition_name, "+ + " create_time, finish_time, ttl_expire, summary_text, "+ + " expired_rows, deleted_rows, error_delete_rows, status) "+ + "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + jobIDIdx, partitionID, tblID, "test", tblName, partitionName, + createTime.Format(timeFormat), finishTime.Format(timeFormat), ttlExpire.Format(timeFormat), summaryText, + totalRows, totalRows-errorRows, errorRows, status, + ) + } + + oneDayAgoDate := curDate.Add(-24 * time.Hour) + // start today, end today + times11 := []time.Time{curDate.Add(time.Hour), curDate.Add(2 * time.Hour), curDate} + // start yesterday, end today + times21 := []time.Time{curDate.Add(-2 * time.Hour), curDate, curDate.Add(-3 * time.Hour)} + // start yesterday, end yesterday + times31 := []time.Time{oneDayAgoDate, oneDayAgoDate.Add(time.Hour), oneDayAgoDate.Add(-time.Hour)} + times32 := []time.Time{oneDayAgoDate.Add(2 * time.Hour), oneDayAgoDate.Add(3 * time.Hour), oneDayAgoDate.Add(time.Hour)} + times33 := []time.Time{oneDayAgoDate.Add(4 * time.Hour), oneDayAgoDate.Add(5 * time.Hour), oneDayAgoDate.Add(3 * time.Hour)} + // start 2 days ago, end yesterday + times41 := []time.Time{oneDayAgoDate.Add(-2 * time.Hour), oneDayAgoDate.Add(time.Hour), oneDayAgoDate.Add(-3 * time.Hour)} + // start two days ago, end two days ago + times51 := []time.Time{oneDayAgoDate.Add(-5 * time.Hour), oneDayAgoDate.Add(-4 * time.Hour), oneDayAgoDate.Add(-6 * time.Hour)} + + tk.MustExec("create table t1 (t timestamp) TTL=`t` + interval 1 hour") + insertTTLHistory("t1", "", times11[0], times11[1], times11[2], "", 100000000, 0, "finished") + insertTTLHistory("t1", "", times21[0], times21[1], times21[2], "", 100000000, 0, "finished") + insertTTLHistory("t1", "", times31[0], times31[1], times31[2], "err1", 112600, 110000, "finished") + insertTTLHistory("t1", "", times32[0], times32[1], times32[2], "", 2600, 0, "timeout") + insertTTLHistory("t1", "", times33[0], times33[1], times33[2], "", 2600, 0, "finished") + insertTTLHistory("t1", "", times41[0], times41[1], times41[2], "", 2600, 0, "finished") + insertTTLHistory("t1", "", times51[0], times51[1], times51[2], "", 100000000, 1, "finished") + + usage, err := telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + checkTableHistWithDeleteRows := func(vals ...int64) { + require.Equal(t, 5, len(vals)) + require.Equal(t, 5, len(usage.TTLUsage.TableHistWithDeleteRows)) + require.Equal(t, int64(10*1000), *usage.TTLUsage.TableHistWithDeleteRows[0].LessThan) + require.Equal(t, vals[0], usage.TTLUsage.TableHistWithDeleteRows[0].Count) + require.Equal(t, int64(100*1000), *usage.TTLUsage.TableHistWithDeleteRows[1].LessThan) + require.Equal(t, vals[1], usage.TTLUsage.TableHistWithDeleteRows[1].Count) + require.Equal(t, int64(1000*1000), *usage.TTLUsage.TableHistWithDeleteRows[2].LessThan) + require.Equal(t, vals[2], usage.TTLUsage.TableHistWithDeleteRows[2].Count) + require.Equal(t, int64(10*1000*1000), *usage.TTLUsage.TableHistWithDeleteRows[3].LessThan) + require.Equal(t, vals[3], usage.TTLUsage.TableHistWithDeleteRows[3].Count) + require.True(t, usage.TTLUsage.TableHistWithDeleteRows[4].LessThanMax) + require.Nil(t, usage.TTLUsage.TableHistWithDeleteRows[4].LessThan) + require.Equal(t, vals[4], usage.TTLUsage.TableHistWithDeleteRows[4].Count) + } + + checkTableHistWithDelay := func(vals ...int64) { + require.Equal(t, 5, len(vals)) + require.Equal(t, 5, len(usage.TTLUsage.TableHistWithDelayTime)) + require.Equal(t, int64(1), *usage.TTLUsage.TableHistWithDelayTime[0].LessThan) + require.Equal(t, vals[0], usage.TTLUsage.TableHistWithDelayTime[0].Count) + require.Equal(t, int64(6), *usage.TTLUsage.TableHistWithDelayTime[1].LessThan) + require.Equal(t, vals[1], usage.TTLUsage.TableHistWithDelayTime[1].Count) + require.Equal(t, int64(24), *usage.TTLUsage.TableHistWithDelayTime[2].LessThan) + require.Equal(t, vals[2], usage.TTLUsage.TableHistWithDelayTime[2].Count) + require.Equal(t, int64(72), *usage.TTLUsage.TableHistWithDelayTime[3].LessThan) + require.Equal(t, vals[3], usage.TTLUsage.TableHistWithDelayTime[3].Count) + require.True(t, usage.TTLUsage.TableHistWithDelayTime[4].LessThanMax) + require.Nil(t, usage.TTLUsage.TableHistWithDelayTime[4].LessThan) + require.Equal(t, vals[4], usage.TTLUsage.TableHistWithDelayTime[4].Count) + } + + require.False(t, usage.TTLUsage.TTLJobEnabled) + require.Equal(t, int64(1), usage.TTLUsage.TTLTables) + require.Equal(t, int64(1), usage.TTLUsage.TTLJobEnabledTables) + require.Equal(t, oneDayAgoDate.Format(dateFormat), usage.TTLUsage.TTLHistDate) + checkTableHistWithDeleteRows(0, 1, 0, 0, 0) + checkTableHistWithDelay(0, 0, 1, 0, 0) + + tk.MustExec("create table t2 (t timestamp) TTL=`t` + interval 20 hour") + tk.MustExec("set @@global.tidb_ttl_job_enable=1") + insertTTLHistory("t2", "", times31[0], times31[1], times31[2], "", 9999, 0, "finished") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.True(t, usage.TTLUsage.TTLJobEnabled) + require.Equal(t, int64(2), usage.TTLUsage.TTLTables) + require.Equal(t, int64(2), usage.TTLUsage.TTLJobEnabledTables) + require.Equal(t, oneDayAgoDate.Format(dateFormat), usage.TTLUsage.TTLHistDate) + checkTableHistWithDeleteRows(1, 1, 0, 0, 0) + checkTableHistWithDelay(0, 1, 1, 0, 0) + + tk.MustExec("create table t3 (t timestamp) TTL=`t` + interval 1 hour TTL_ENABLE='OFF'") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.True(t, usage.TTLUsage.TTLJobEnabled) + require.Equal(t, int64(3), usage.TTLUsage.TTLTables) + require.Equal(t, int64(2), usage.TTLUsage.TTLJobEnabledTables) + require.Equal(t, oneDayAgoDate.Format(dateFormat), usage.TTLUsage.TTLHistDate) + checkTableHistWithDeleteRows(1, 1, 0, 0, 0) + checkTableHistWithDelay(0, 1, 1, 0, 1) +} diff --git a/telemetry/main_test.go b/telemetry/main_test.go index 0e8d98b2a4f6c..8478a3ead4084 100644 --- a/telemetry/main_test.go +++ b/telemetry/main_test.go @@ -41,6 +41,8 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), } goleak.VerifyTestMain(m, opts...) diff --git a/telemetry/ttl.go b/telemetry/ttl.go new file mode 100644 index 0000000000000..b9c8c0210fb0c --- /dev/null +++ b/telemetry/ttl.go @@ -0,0 +1,214 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package telemetry + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" +) + +const ( + // selectDeletedRowsOneDaySQL selects the deleted rows for each table of last day + selectDeletedRowsOneDaySQL = `SELECT parent_table_id, CAST(SUM(deleted_rows) AS SIGNED) + FROM + mysql.tidb_ttl_job_history + WHERE + create_time >= CURDATE() - INTERVAL 7 DAY + AND finish_time >= CURDATE() - INTERVAL 1 DAY + AND finish_time < CURDATE() + GROUP BY parent_table_id;` + // selectDelaySQL selects the deletion delay in minute for each table at the end of last day + selectDelaySQL = `SELECT + parent_table_id, TIMESTAMPDIFF(MINUTE, MIN(tm), CURDATE()) AS ttl_minutes + FROM + ( + SELECT + table_id, + parent_table_id, + MAX(ttl_expire) AS tm + FROM + mysql.tidb_ttl_job_history + WHERE + create_time > CURDATE() - INTERVAL 7 DAY + AND finish_time < CURDATE() + AND status = 'finished' + AND JSON_VALID(summary_text) + AND summary_text ->> "$.scan_task_err" IS NULL + GROUP BY + table_id, parent_table_id + ) t + GROUP BY parent_table_id;` +) + +type ttlHistItem struct { + // LessThan is not null means it collects the count of items with condition [prevLessThan, LessThan) + // Notice that it's type is an int64 pointer to forbid serializing it when it is not set. + LessThan *int64 `json:"less_than,omitempty"` + // LessThanMax is true means the condition is [prevLessThan, MAX) + LessThanMax bool `json:"less_than_max,omitempty"` + // Count is the count of items that fit the condition + Count int64 `json:"count"` +} + +type ttlUsageCounter struct { + TTLJobEnabled bool `json:"ttl_job_enabled"` + TTLTables int64 `json:"ttl_table_count"` + TTLJobEnabledTables int64 `json:"ttl_job_enabled_tables"` + TTLHistDate string `json:"ttl_hist_date"` + TableHistWithDeleteRows []*ttlHistItem `json:"table_hist_with_delete_rows"` + TableHistWithDelayTime []*ttlHistItem `json:"table_hist_with_delay_time"` +} + +func int64Pointer(val int64) *int64 { + v := val + return &v +} + +func (c *ttlUsageCounter) UpdateTableHistWithDeleteRows(rows int64) { + for _, item := range c.TableHistWithDeleteRows { + if item.LessThanMax || rows < *item.LessThan { + item.Count++ + return + } + } +} + +func (c *ttlUsageCounter) UpdateTableHistWithDelayTime(tblCnt int, hours int64) { + for _, item := range c.TableHistWithDelayTime { + if item.LessThanMax || hours < *item.LessThan { + item.Count += int64(tblCnt) + return + } + } +} + +func getTTLUsageInfo(ctx context.Context, sctx sessionctx.Context) (counter *ttlUsageCounter) { + counter = &ttlUsageCounter{ + TTLJobEnabled: variable.EnableTTLJob.Load(), + TTLHistDate: time.Now().Add(-24 * time.Hour).Format("2006-01-02"), + TableHistWithDeleteRows: []*ttlHistItem{ + { + LessThan: int64Pointer(10 * 1000), + }, + { + LessThan: int64Pointer(100 * 1000), + }, + { + LessThan: int64Pointer(1000 * 1000), + }, + { + LessThan: int64Pointer(10000 * 1000), + }, + { + LessThanMax: true, + }, + }, + TableHistWithDelayTime: []*ttlHistItem{ + { + LessThan: int64Pointer(1), + }, + { + LessThan: int64Pointer(6), + }, + { + LessThan: int64Pointer(24), + }, + { + LessThan: int64Pointer(72), + }, + { + LessThanMax: true, + }, + }, + } + + is, ok := sctx.GetDomainInfoSchema().(infoschema.InfoSchema) + if !ok { + // it should never happen + logutil.BgLogger().Error(fmt.Sprintf("GetDomainInfoSchema returns a invalid type: %T", is)) + return + } + + ttlTables := make(map[int64]*model.TableInfo) + for _, db := range is.AllSchemas() { + for _, tbl := range is.SchemaTables(db.Name) { + tblInfo := tbl.Meta() + if tblInfo.State != model.StatePublic || tblInfo.TTLInfo == nil { + continue + } + + counter.TTLTables++ + if tblInfo.TTLInfo.Enable { + counter.TTLJobEnabledTables++ + } + ttlTables[tblInfo.ID] = tblInfo + } + } + + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, selectDeletedRowsOneDaySQL) + if err != nil { + logutil.BgLogger().Error("exec sql error", zap.String("SQL", selectDeletedRowsOneDaySQL), zap.Error(err)) + } else { + for _, row := range rows { + counter.UpdateTableHistWithDeleteRows(row.GetInt64(1)) + } + } + + rows, _, err = exec.ExecRestrictedSQL(ctx, nil, selectDelaySQL) + if err != nil { + logutil.BgLogger().Error("exec sql error", zap.String("SQL", selectDelaySQL), zap.Error(err)) + } else { + noHistoryTables := len(ttlTables) + for _, row := range rows { + tblID := row.GetInt64(0) + tbl, ok := ttlTables[tblID] + if !ok { + // table not exist, maybe truncated or deleted + continue + } + noHistoryTables-- + + evalIntervalSQL := fmt.Sprintf( + "SELECT TIMESTAMPDIFF(HOUR, CURDATE() - INTERVAL %d MINUTE, CURDATE() - INTERVAL %s %s)", + row.GetInt64(1), tbl.TTLInfo.IntervalExprStr, ast.TimeUnitType(tbl.TTLInfo.IntervalTimeUnit).String(), + ) + + innerRows, _, err := exec.ExecRestrictedSQL(ctx, nil, evalIntervalSQL) + if err != nil || len(innerRows) == 0 { + logutil.BgLogger().Error("exec sql error or empty rows returned", zap.String("SQL", evalIntervalSQL), zap.Error(err)) + continue + } + + hours := innerRows[0].GetInt64(0) + counter.UpdateTableHistWithDelayTime(1, hours) + } + + // When no history found for a table, use max delay + counter.UpdateTableHistWithDelayTime(noHistoryTables, math.MaxInt64) + } + return +} diff --git a/tests/realtikvtest/addindextest/BUILD.bazel b/tests/realtikvtest/addindextest/BUILD.bazel index a2e9c9906380b..a79f2a15f8ca7 100644 --- a/tests/realtikvtest/addindextest/BUILD.bazel +++ b/tests/realtikvtest/addindextest/BUILD.bazel @@ -43,10 +43,8 @@ go_test( "//parser/model", "//testkit", "//tests/realtikvtest", - "//util/logutil", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_uber_go_zap//:zap", ], ) diff --git a/tests/realtikvtest/addindextest/integration_test.go b/tests/realtikvtest/addindextest/integration_test.go index 07b54089395da..ed1e4e6c85dcd 100644 --- a/tests/realtikvtest/addindextest/integration_test.go +++ b/tests/realtikvtest/addindextest/integration_test.go @@ -31,10 +31,8 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/tests/realtikvtest" - "github.com/pingcap/tidb/util/logutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) func TestAddIndexIngestMemoryUsage(t *testing.T) { @@ -422,7 +420,7 @@ func TestAddIndexIngestCancel(t *testing.T) { return } if job.Type == model.ActionAddIndex && job.SchemaState == model.StateWriteReorganization { - idx := findIdxInfo(dom, "addindexlit", "t", "idx") + idx := testutil.FindIdxInfo(dom, "addindexlit", "t", "idx") if idx == nil { return } @@ -460,12 +458,3 @@ func (c *testCallback) OnJobRunBefore(job *model.Job) { c.OnJobRunBeforeExported(job) } } - -func findIdxInfo(dom *domain.Domain, dbName, tbName, idxName string) *model.IndexInfo { - tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) - if err != nil { - logutil.BgLogger().Warn("cannot find table", zap.String("dbName", dbName), zap.String("tbName", tbName)) - return nil - } - return tbl.Meta().FindIndexByName(idxName) -} diff --git a/tidb-binlog/pump_client/BUILD.bazel b/tidb-binlog/pump_client/BUILD.bazel index ad98822adaf5b..863fe7b61f8f9 100644 --- a/tidb-binlog/pump_client/BUILD.bazel +++ b/tidb-binlog/pump_client/BUILD.bazel @@ -21,6 +21,7 @@ go_library( "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//status", "@org_uber_go_zap//:zap", ], @@ -41,5 +42,6 @@ go_test( "@com_github_pingcap_tipb//go-binlog", "@com_github_stretchr_testify//require", "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials/insecure", ], ) diff --git a/tidb-binlog/pump_client/bench_test.go b/tidb-binlog/pump_client/bench_test.go index fe3eb2f6ffb30..ae93203319e21 100644 --- a/tidb-binlog/pump_client/bench_test.go +++ b/tidb-binlog/pump_client/bench_test.go @@ -23,6 +23,7 @@ import ( pb "github.com/pingcap/tipb/go-binlog" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) func Benchmark100Thread(b *testing.B) { @@ -86,7 +87,7 @@ func createMockPumpsClientAndServer(b *testing.B) (*PumpsClient, *mockPumpServer return net.DialTimeout("tcp", addr, timeout) }) - clientCon, err := grpc.Dial(addr, opt, grpc.WithInsecure()) + clientCon, err := grpc.Dial(addr, opt, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { b.Fatal(err) } diff --git a/tidb-binlog/pump_client/client_test.go b/tidb-binlog/pump_client/client_test.go index ac287773072ed..974ed76d29aa8 100644 --- a/tidb-binlog/pump_client/client_test.go +++ b/tidb-binlog/pump_client/client_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tipb/go-binlog" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -176,7 +177,7 @@ func TestWriteBinlog(t *testing.T) { opt := grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout(cfg.serverMode, addr, timeout) }) - clientCon, err := grpc.Dial(cfg.addr, opt, grpc.WithInsecure()) + clientCon, err := grpc.Dial(cfg.addr, opt, grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) require.NotNil(t, clientCon) pumpClient := mockPumpsClient(binlog.NewPumpClient(clientCon), true) diff --git a/tidb-binlog/pump_client/pump.go b/tidb-binlog/pump_client/pump.go index 82fa48da92da2..4459bb1bfc5ef 100644 --- a/tidb-binlog/pump_client/pump.go +++ b/tidb-binlog/pump_client/pump.go @@ -29,6 +29,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -103,7 +104,7 @@ func (p *PumpStatus) createGrpcClient() error { if p.security != nil { clientConn, err = grpc.Dial(p.Addr, dialerOpt, grpc.WithTransportCredentials(credentials.NewTLS(p.security))) } else { - clientConn, err = grpc.Dial(p.Addr, dialerOpt, grpc.WithInsecure()) + clientConn, err = grpc.Dial(p.Addr, dialerOpt, grpc.WithTransportCredentials(insecure.NewCredentials())) } if err != nil { atomic.AddInt64(&p.ErrNum, 1) diff --git a/ttl/cache/ttlstatus.go b/ttl/cache/ttlstatus.go index d28bafa5a76c8..b21a50a161f79 100644 --- a/ttl/cache/ttlstatus.go +++ b/ttl/cache/ttlstatus.go @@ -30,13 +30,15 @@ const ( // JobStatusWaiting means the job hasn't started JobStatusWaiting JobStatus = "waiting" // JobStatusRunning means this job is running - JobStatusRunning = "running" + JobStatusRunning JobStatus = "running" // JobStatusCancelling means this job is being canceled, but not canceled yet - JobStatusCancelling = "cancelling" + JobStatusCancelling JobStatus = "cancelling" // JobStatusCancelled means this job has been canceled successfully - JobStatusCancelled = "cancelled" + JobStatusCancelled JobStatus = "cancelled" // JobStatusTimeout means this job has timeout - JobStatusTimeout = "timeout" + JobStatusTimeout JobStatus = "timeout" + // JobStatusFinished means job has been finished + JobStatusFinished JobStatus = "finished" ) const selectFromTTLTableStatus = "SELECT LOW_PRIORITY table_id,parent_table_id,table_statistics,last_job_id,last_job_start_time,last_job_finish_time,last_job_ttl_expire,last_job_summary,current_job_id,current_job_owner_id,current_job_owner_addr,current_job_owner_hb_time,current_job_start_time,current_job_ttl_expire,current_job_state,current_job_status,current_job_status_update_time FROM mysql.tidb_ttl_table_status" diff --git a/ttl/client/BUILD.bazel b/ttl/client/BUILD.bazel index 6f2c7acaae481..e842ad03a887b 100644 --- a/ttl/client/BUILD.bazel +++ b/ttl/client/BUILD.bazel @@ -2,10 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "client", - srcs = ["command.go"], + srcs = [ + "command.go", + "notification.go", + ], importpath = "github.com/pingcap/tidb/ttl/client", visibility = ["//visibility:public"], deps = [ + "//ddl/util", "//util/logutil", "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", diff --git a/ttl/client/command.go b/ttl/client/command.go index bad2d756353cd..a285d9b186e3c 100644 --- a/ttl/client/command.go +++ b/ttl/client/command.go @@ -112,12 +112,13 @@ func TriggerNewTTLJob(ctx context.Context, cli CommandClient, dbName, tableName return &resp, nil } +// etcdClient is the client of etcd which implements the commandCli and notificationCli interface type etcdClient struct { etcdCli *clientv3.Client } -// NewEtcdCommandClient creates a client with etcd -func NewEtcdCommandClient(etcdCli *clientv3.Client) CommandClient { +// NewCommandClient creates a command client with etcd +func NewCommandClient(etcdCli *clientv3.Client) CommandClient { return &etcdClient{ etcdCli: etcdCli, } @@ -196,6 +197,7 @@ loop: return json.Unmarshal(cmdResp.Data, obj) } +// Command implements the CommandClient func (c *etcdClient) Command(ctx context.Context, cmdType string, request interface{}, response interface{}) (string, error) { requestID, err := c.sendCmd(ctx, cmdType, request) if err != nil { @@ -204,6 +206,7 @@ func (c *etcdClient) Command(ctx context.Context, cmdType string, request interf return requestID, c.waitCmdResponse(ctx, requestID, &response) } +// TakeCommand implements the CommandClient func (c *etcdClient) TakeCommand(ctx context.Context, reqID string) (bool, error) { resp, err := c.etcdCli.Delete(ctx, ttlCmdKeyRequestPrefix+reqID) if err != nil { @@ -212,6 +215,7 @@ func (c *etcdClient) TakeCommand(ctx context.Context, reqID string) (bool, error return resp.Deleted > 0, nil } +// ResponseCommand implements the CommandClient func (c *etcdClient) ResponseCommand(ctx context.Context, reqID string, obj interface{}) error { resp := &cmdResponse{ RequestID: reqID, @@ -241,6 +245,7 @@ func (c *etcdClient) ResponseCommand(ctx context.Context, reqID string, obj inte return err } +// WatchCommand implements the CommandClient func (c *etcdClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { ch := make(chan *CmdRequest) go func() { @@ -279,20 +284,24 @@ func (c *etcdClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { return ch } +// mockClient is a mock implementation for CommandCli and NotificationCli type mockClient struct { sync.Mutex - store map[string]interface{} - watchers []chan *CmdRequest + store map[string]interface{} + commandWatchers []chan *CmdRequest + notificationWatchers map[string][]chan clientv3.WatchResponse } -// NewMockCommandClient creates a mock client +// NewMockCommandClient creates a mock command client func NewMockCommandClient() CommandClient { return &mockClient{ - store: make(map[string]interface{}), - watchers: make([]chan *CmdRequest, 0, 1), + store: make(map[string]interface{}), + commandWatchers: make([]chan *CmdRequest, 0, 1), + notificationWatchers: make(map[string][]chan clientv3.WatchResponse), } } +// Command implements the CommandClient func (c *mockClient) Command(ctx context.Context, cmdType string, request interface{}, response interface{}) (string, error) { ctx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(ttlCmdKeyLeaseSeconds)) defer cancel() @@ -346,7 +355,7 @@ func (c *mockClient) sendCmd(ctx context.Context, cmdType string, request interf defer c.Unlock() key := ttlCmdKeyRequestPrefix + reqID c.store[key] = req - for _, ch := range c.watchers { + for _, ch := range c.commandWatchers { select { case <-ctx.Done(): return reqID, ctx.Err() @@ -358,6 +367,7 @@ func (c *mockClient) sendCmd(ctx context.Context, cmdType string, request interf return reqID, nil } +// TakeCommand implements the CommandClient func (c *mockClient) TakeCommand(_ context.Context, reqID string) (bool, error) { c.Lock() defer c.Unlock() @@ -369,6 +379,7 @@ func (c *mockClient) TakeCommand(_ context.Context, reqID string) (bool, error) return false, nil } +// ResponseCommand implements the CommandClient func (c *mockClient) ResponseCommand(_ context.Context, reqID string, obj interface{}) error { c.Lock() defer c.Unlock() @@ -391,11 +402,12 @@ func (c *mockClient) ResponseCommand(_ context.Context, reqID string, obj interf return nil } +// WatchCommand implements the CommandClient func (c *mockClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { c.Lock() defer c.Unlock() ch := make(chan *CmdRequest, 16+len(c.store)) - c.watchers = append(c.watchers, ch) + c.commandWatchers = append(c.commandWatchers, ch) for key, val := range c.store { if strings.HasPrefix(key, ttlCmdKeyRequestPrefix) { if req, ok := val.(*CmdRequest); ok { @@ -407,9 +419,9 @@ func (c *mockClient) WatchCommand(ctx context.Context) <-chan *CmdRequest { <-ctx.Done() c.Lock() defer c.Unlock() - for i, chItem := range c.watchers { + for i, chItem := range c.commandWatchers { if chItem == ch { - c.watchers = append(c.watchers[:i], c.watchers[i+1:]...) + c.commandWatchers = append(c.commandWatchers[:i], c.commandWatchers[i+1:]...) break } } diff --git a/ttl/client/command_test.go b/ttl/client/command_test.go index 830137f32904e..69cde75309ad6 100644 --- a/ttl/client/command_test.go +++ b/ttl/client/command_test.go @@ -42,7 +42,7 @@ func TestCommandClient(t *testing.T) { defer cluster.Terminate(t) etcd := cluster.RandClient() - etcdCli := NewEtcdCommandClient(etcd) + etcdCli := NewCommandClient(etcd) mockCli := NewMockCommandClient() ctx, cancel := context.WithTimeout(context.TODO(), time.Minute) diff --git a/ttl/client/notification.go b/ttl/client/notification.go new file mode 100644 index 0000000000000..6c44cd0dd7aa9 --- /dev/null +++ b/ttl/client/notification.go @@ -0,0 +1,79 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + + "github.com/pingcap/tidb/ddl/util" + clientv3 "go.etcd.io/etcd/client/v3" +) + +const ttlNotificationPrefix string = "/tidb/ttl/notification/" + +// NotificationClient is a client to notify other TTL workers +type NotificationClient interface { + // Notify sends a notification + Notify(ctx context.Context, typ string, data string) error + // WatchNotification opens a channel, in which we could receive all notifications + WatchNotification(ctx context.Context, typ string) clientv3.WatchChan +} + +// NewNotificationClient creates a notification client with etcd +func NewNotificationClient(etcdCli *clientv3.Client) NotificationClient { + return &etcdClient{ + etcdCli: etcdCli, + } +} + +// Notify stores the corresponding K-V in the etcd +func (c *etcdClient) Notify(ctx context.Context, typ string, data string) error { + return util.PutKVToEtcd(ctx, c.etcdCli, 1, ttlNotificationPrefix+typ, data) +} + +// WatchNotification returns a go channel to get notification +func (c *etcdClient) WatchNotification(ctx context.Context, typ string) clientv3.WatchChan { + return c.etcdCli.Watch(ctx, ttlNotificationPrefix+typ) +} + +// NewMockNotificationClient creates a mock notification client +func NewMockNotificationClient() NotificationClient { + return &mockClient{ + store: make(map[string]interface{}), + commandWatchers: make([]chan *CmdRequest, 0, 1), + notificationWatchers: make(map[string][]chan clientv3.WatchResponse), + } +} + +// Notify implements the NotificationClient +func (c *mockClient) Notify(_ context.Context, typ string, data string) error { + c.Lock() + defer c.Unlock() + + for _, ch := range c.notificationWatchers[typ] { + ch <- clientv3.WatchResponse{} + } + return nil +} + +// WatchNotification implements the NotificationClient +func (c *mockClient) WatchNotification(_ context.Context, typ string) clientv3.WatchChan { + c.Lock() + defer c.Unlock() + + ch := make(chan clientv3.WatchResponse, 1) + c.notificationWatchers[typ] = append(c.notificationWatchers[typ], ch) + return ch +} diff --git a/ttl/metrics/metrics.go b/ttl/metrics/metrics.go index 8768b0e267388..8bc01551bc2a0 100644 --- a/ttl/metrics/metrics.go +++ b/ttl/metrics/metrics.go @@ -48,6 +48,8 @@ var ( RunningJobsCnt = metrics.TTLJobStatus.With(prometheus.Labels{metrics.LblType: "running"}) CancellingJobsCnt = metrics.TTLJobStatus.With(prometheus.Labels{metrics.LblType: "cancelling"}) + + RunningTaskCnt = metrics.TTLTaskStatus.With(prometheus.Labels{metrics.LblType: "running"}) ) func initWorkerPhases(workerType string) map[string]prometheus.Counter { @@ -133,16 +135,16 @@ func (t *PhaseTracer) EndPhase() { t.EnterPhase("") } -const ttlPhaseTraceKey = "ttlPhaseTraceKey" +type ttlPhaseTraceKey struct{} // CtxWithPhaseTracer create a new context with tracer func CtxWithPhaseTracer(ctx context.Context, tracer *PhaseTracer) context.Context { - return context.WithValue(ctx, ttlPhaseTraceKey, tracer) + return context.WithValue(ctx, ttlPhaseTraceKey{}, tracer) } // PhaseTracerFromCtx returns a tracer from a given context func PhaseTracerFromCtx(ctx context.Context) *PhaseTracer { - if tracer, ok := ctx.Value(ttlPhaseTraceKey).(*PhaseTracer); ok { + if tracer, ok := ctx.Value(ttlPhaseTraceKey{}).(*PhaseTracer); ok { return tracer } return nil diff --git a/ttl/ttlworker/BUILD.bazel b/ttl/ttlworker/BUILD.bazel index f314eca5c01ed..55fe5dffc2b8a 100644 --- a/ttl/ttlworker/BUILD.bazel +++ b/ttl/ttlworker/BUILD.bazel @@ -55,6 +55,7 @@ go_test( ], embed = [":ttlworker"], flaky = True, + race = "on", deps = [ "//domain", "//infoschema", @@ -69,6 +70,7 @@ go_test( "//testkit", "//ttl/cache", "//ttl/client", + "//ttl/metrics", "//ttl/session", "//types", "//util/chunk", @@ -76,6 +78,7 @@ go_test( "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@com_github_prometheus_client_model//go", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@org_golang_x_time//rate", diff --git a/ttl/ttlworker/config.go b/ttl/ttlworker/config.go index c1774bc667348..89ca9eedae010 100644 --- a/ttl/ttlworker/config.go +++ b/ttl/ttlworker/config.go @@ -32,6 +32,7 @@ const ttlJobTimeout = 6 * time.Hour const taskManagerLoopTickerInterval = time.Minute const ttlTaskHeartBeatTickerInterval = time.Minute +const ttlGCInterval = time.Hour func getUpdateInfoSchemaCacheInterval() time.Duration { failpoint.Inject("update-info-schema-cache-interval", func(val failpoint.Value) time.Duration { diff --git a/ttl/ttlworker/job.go b/ttl/ttlworker/job.go index f2a78e7ef0270..5ba91dcc375a0 100644 --- a/ttl/ttlworker/job.go +++ b/ttl/ttlworker/job.go @@ -43,6 +43,25 @@ const finishJobTemplate = `UPDATE mysql.tidb_ttl_table_status current_job_status_update_time = NULL WHERE table_id = %? AND current_job_id = %?` const removeTaskForJobTemplate = "DELETE FROM mysql.tidb_ttl_task WHERE job_id = %?" +const addJobHistoryTemplate = `INSERT INTO + mysql.tidb_ttl_job_history ( + job_id, + table_id, + parent_table_id, + table_schema, + table_name, + partition_name, + create_time, + finish_time, + ttl_expire, + summary_text, + expired_rows, + deleted_rows, + error_delete_rows, + status + ) +VALUES + (%?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?)` func updateJobCurrentStatusSQL(tableID int64, oldStatus cache.JobStatus, newStatus cache.JobStatus, jobID string) (string, []interface{}) { return updateJobCurrentStatusTemplate, []interface{}{string(newStatus), tableID, string(oldStatus), jobID} @@ -56,11 +75,41 @@ func removeTaskForJob(jobID string) (string, []interface{}) { return removeTaskForJobTemplate, []interface{}{jobID} } +func addJobHistorySQL(job *ttlJob, finishTime time.Time, summary *TTLSummary) (string, []interface{}) { + status := cache.JobStatusFinished + if job.status == cache.JobStatusTimeout || job.status == cache.JobStatusCancelled { + status = job.status + } + + var partitionName interface{} + if job.tbl.Partition.O != "" { + partitionName = job.tbl.Partition.O + } + + return addJobHistoryTemplate, []interface{}{ + job.id, + job.tbl.ID, + job.tbl.TableInfo.ID, + job.tbl.Schema.O, + job.tbl.Name.O, + partitionName, + job.createTime.Format(timeFormat), + finishTime.Format(timeFormat), + job.ttlExpireTime.Format(timeFormat), + summary.SummaryText, + summary.TotalRows, + summary.SuccessRows, + summary.ErrorRows, + string(status), + } +} + type ttlJob struct { id string ownerID string - createTime time.Time + createTime time.Time + ttlExpireTime time.Time tbl *cache.PhysicalTable @@ -71,11 +120,11 @@ type ttlJob struct { } // finish turns current job into last job, and update the error message and statistics summary -func (job *ttlJob) finish(se session.Session, now time.Time, summary string) { +func (job *ttlJob) finish(se session.Session, now time.Time, summary *TTLSummary) { // at this time, the job.ctx may have been canceled (to cancel this job) // even when it's canceled, we'll need to update the states, so use another context err := se.RunInTxn(context.TODO(), func() error { - sql, args := finishJobSQL(job.tbl.ID, now, summary, job.id) + sql, args := finishJobSQL(job.tbl.ID, now, summary.SummaryText, job.id) _, err := se.ExecuteSQL(context.TODO(), sql, args...) if err != nil { return errors.Wrapf(err, "execute sql: %s", sql) @@ -87,6 +136,12 @@ func (job *ttlJob) finish(se session.Session, now time.Time, summary string) { return errors.Wrapf(err, "execute sql: %s", sql) } + sql, args = addJobHistorySQL(job, now, summary) + _, err = se.ExecuteSQL(context.TODO(), sql, args...) + if err != nil { + return errors.Wrapf(err, "execute sql: %s", sql) + } + return nil }, session.TxnModeOptimistic) diff --git a/ttl/ttlworker/job_manager.go b/ttl/ttlworker/job_manager.go index 223128b52f26c..5f8b7bd038fc4 100644 --- a/ttl/ttlworker/job_manager.go +++ b/ttl/ttlworker/job_manager.go @@ -38,12 +38,14 @@ import ( "go.uber.org/zap" ) +const scanTaskNotificationType string = "scan" + const insertNewTableIntoStatusTemplate = "INSERT INTO mysql.tidb_ttl_table_status (table_id,parent_table_id) VALUES (%?, %?)" const setTableStatusOwnerTemplate = `UPDATE mysql.tidb_ttl_table_status SET current_job_id = %?, current_job_owner_id = %?, current_job_start_time = %?, - current_job_status = 'waiting', + current_job_status = 'running', current_job_status_update_time = %?, current_job_ttl_expire = %?, current_job_owner_hb_time = %? @@ -56,6 +58,8 @@ const taskGCTemplate = `DELETE task FROM ON task.job_id = job.current_job_id WHERE job.table_id IS NULL` +const ttlJobHistoryGCTemplate = `DELETE FROM mysql.tidb_ttl_job_history WHERE create_time < CURDATE() - INTERVAL 90 DAY` + const timeFormat = "2006-01-02 15:04:05" func insertNewTableIntoStatusSQL(tableID int64, parentTableID int64) (string, []interface{}) { @@ -82,8 +86,9 @@ type JobManager struct { // id is the ddl id of this instance id string - store kv.Storage - cmdCli client.CommandClient + store kv.Storage + cmdCli client.CommandClient + notificationCli client.NotificationClient // infoSchemaCache and tableStatusCache are a cache stores the information from info schema and the tidb_ttl_table_status // table. They don't need to be protected by mutex, because they are only used in job loop goroutine. @@ -113,9 +118,11 @@ func NewJobManager(id string, sessPool sessionPool, store kv.Storage, etcdCli *c manager.tableStatusCache = cache.NewTableStatusCache(getUpdateTTLTableStatusCacheInterval()) if etcdCli != nil { - manager.cmdCli = client.NewEtcdCommandClient(etcdCli) + manager.cmdCli = client.NewCommandClient(etcdCli) + manager.notificationCli = client.NewNotificationClient(etcdCli) } else { manager.cmdCli = client.NewMockCommandClient() + manager.notificationCli = client.NewMockNotificationClient() } manager.taskManager = newTaskManager(manager.ctx, sessPool, manager.infoSchemaCache, id) @@ -138,7 +145,7 @@ func (m *JobManager) jobLoop() error { infoSchemaCacheUpdateTicker := time.Tick(m.infoSchemaCache.GetInterval()) tableStatusCacheUpdateTicker := time.Tick(m.tableStatusCache.GetInterval()) resizeWorkersTicker := time.Tick(getResizeWorkersInterval()) - taskGC := time.Tick(jobManagerLoopTickerInterval) + gcTicker := time.Tick(ttlGCInterval) scheduleJobTicker := time.Tick(jobManagerLoopTickerInterval) jobCheckTicker := time.Tick(jobManagerLoopTickerInterval) @@ -150,9 +157,11 @@ func (m *JobManager) jobLoop() error { checkScanTaskFinishedTicker := time.Tick(getTaskManagerLoopTickerInterval()) cmdWatcher := m.cmdCli.WatchCommand(m.ctx) + scanTaskNotificationWatcher := m.notificationCli.WatchNotification(m.ctx, scanTaskNotificationType) m.taskManager.resizeWorkersWithSysVar() for { m.reportMetrics() + m.taskManager.reportMetrics() now := se.Now() select { @@ -169,12 +178,9 @@ func (m *JobManager) jobLoop() error { if err != nil { logutil.Logger(m.ctx).Warn("fail to update table status cache", zap.Error(err)) } - case <-taskGC: - taskGCCtx, cancel := context.WithTimeout(m.ctx, ttlInternalSQLTimeout) - _, err = se.ExecuteSQL(taskGCCtx, taskGCTemplate) - if err != nil { - logutil.Logger(m.ctx).Warn("fail to gc redundant scan task", zap.Error(err)) - } + case <-gcTicker: + gcCtx, cancel := context.WithTimeout(m.ctx, ttlInternalSQLTimeout) + DoGC(gcCtx, se) cancel() // Job Schedule loop: case <-updateJobHeartBeatTicker: @@ -208,6 +214,17 @@ func (m *JobManager) jobLoop() error { // Task Manager Loop case <-scheduleTaskTicker: m.taskManager.rescheduleTasks(se, now) + case _, ok := <-scanTaskNotificationWatcher: + if !ok { + if m.ctx.Err() != nil { + return nil + } + + logutil.BgLogger().Warn("The TTL scan task notification watcher is closed unexpectedly, re-watch it again") + scanTaskNotificationWatcher = m.notificationCli.WatchNotification(m.ctx, scanTaskNotificationType) + continue + } + m.taskManager.rescheduleTasks(se, now) case <-taskCheckTicker: m.taskManager.checkInvalidTask(se) m.taskManager.checkFinishedTask(se, now) @@ -611,23 +628,32 @@ func (m *JobManager) lockNewJob(ctx context.Context, se session.Session, table * if err != nil { return nil, err } - return m.createNewJob(now, table) + + job := m.createNewJob(expireTime, now, table) + + // job is created, notify every scan managers to fetch new tasks + err = m.notificationCli.Notify(m.ctx, scanTaskNotificationType, job.id) + if err != nil { + logutil.Logger(m.ctx).Warn("fail to trigger scan tasks", zap.Error(err)) + } + return job, nil } -func (m *JobManager) createNewJob(now time.Time, table *cache.PhysicalTable) (*ttlJob, error) { +func (m *JobManager) createNewJob(expireTime time.Time, now time.Time, table *cache.PhysicalTable) *ttlJob { id := m.tableStatusCache.Tables[table.ID].CurrentJobID return &ttlJob{ id: id, ownerID: m.id, - createTime: now, + createTime: now, + ttlExpireTime: expireTime, // at least, the info schema cache and table status cache are consistent in table id, so it's safe to get table // information from schema cache directly tbl: table, - status: cache.JobStatusWaiting, - }, nil + status: cache.JobStatusRunning, + } } // updateHeartBeat updates the heartbeat for all task with current instance as owner @@ -687,7 +713,13 @@ func (m *JobManager) GetCommandCli() client.CommandClient { return m.cmdCli } -type ttlSummary struct { +// GetNotificationCli returns the notification client +func (m *JobManager) GetNotificationCli() client.NotificationClient { + return m.notificationCli +} + +// TTLSummary is the summary for TTL job +type TTLSummary struct { TotalRows uint64 `json:"total_rows"` SuccessRows uint64 `json:"success_rows"` ErrorRows uint64 `json:"error_rows"` @@ -697,22 +729,24 @@ type ttlSummary struct { FinishedScanTask int `json:"finished_scan_task"` ScanTaskErr string `json:"scan_task_err,omitempty"` + SummaryText string `json:"-"` } -func summarizeErr(err error) (string, error) { - summary := &ttlSummary{ +func summarizeErr(err error) (*TTLSummary, error) { + summary := &TTLSummary{ ScanTaskErr: err.Error(), } buf, err := json.Marshal(summary) if err != nil { - return "", err + return nil, err } - return string(buf), nil + summary.SummaryText = string(buf) + return summary, nil } -func summarizeTaskResult(tasks []*cache.TTLTask) (string, error) { - summary := &ttlSummary{} +func summarizeTaskResult(tasks []*cache.TTLTask) (*TTLSummary, error) { + summary := &TTLSummary{} var allErr error for _, t := range tasks { if t.State != nil { @@ -738,7 +772,19 @@ func summarizeTaskResult(tasks []*cache.TTLTask) (string, error) { buf, err := json.Marshal(summary) if err != nil { - return "", err + return nil, err + } + summary.SummaryText = string(buf) + return summary, nil +} + +// DoGC deletes some old TTL job histories and redundant scan tasks +func DoGC(ctx context.Context, se session.Session) { + if _, err := se.ExecuteSQL(ctx, taskGCTemplate); err != nil { + logutil.Logger(ctx).Warn("fail to gc redundant scan task", zap.Error(err)) + } + + if _, err := se.ExecuteSQL(ctx, ttlJobHistoryGCTemplate); err != nil { + logutil.Logger(ctx).Warn("fail to gc ttl job history", zap.Error(err)) } - return string(buf), nil } diff --git a/ttl/ttlworker/job_manager_integration_test.go b/ttl/ttlworker/job_manager_integration_test.go index c763e1363aecd..45fd9e69e2bf8 100644 --- a/ttl/ttlworker/job_manager_integration_test.go +++ b/ttl/ttlworker/job_manager_integration_test.go @@ -16,8 +16,10 @@ package ttlworker_test import ( "context" + "encoding/json" "fmt" "strconv" + "strings" "sync" "testing" "time" @@ -32,9 +34,11 @@ import ( "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/ttl/cache" "github.com/pingcap/tidb/ttl/client" + "github.com/pingcap/tidb/ttl/metrics" "github.com/pingcap/tidb/ttl/session" "github.com/pingcap/tidb/ttl/ttlworker" "github.com/pingcap/tidb/util/logutil" + dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" "go.uber.org/atomic" "go.uber.org/zap" @@ -69,7 +73,7 @@ func TestParallelLockNewJob(t *testing.T) { se := sessionFactory() job, err := m.LockNewJob(context.Background(), se, testTable, time.Now(), false) require.NoError(t, err) - job.Finish(se, time.Now(), "") + job.Finish(se, time.Now(), &ttlworker.TTLSummary{}) // lock one table in parallel, only one of them should lock successfully testTimes := 100 @@ -103,18 +107,19 @@ func TestParallelLockNewJob(t *testing.T) { wg.Wait() require.Equal(t, uint64(1), successCounter.Load()) - successJob.Finish(se, time.Now(), "") + successJob.Finish(se, time.Now(), &ttlworker.TTLSummary{}) } } func TestFinishJob(t *testing.T) { + timeFormat := "2006-01-02 15:04:05" store, dom := testkit.CreateMockStoreAndDomain(t) waitAndStopTTLManager(t, dom) tk := testkit.NewTestKit(t, store) sessionFactory := sessionFactory(t, store) - testTable := &cache.PhysicalTable{ID: 2, TableInfo: &model.TableInfo{ID: 1, TTLInfo: &model.TTLInfo{IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay)}}} + testTable := &cache.PhysicalTable{ID: 2, Schema: model.NewCIStr("db1"), TableInfo: &model.TableInfo{ID: 1, Name: model.NewCIStr("t1"), TTLInfo: &model.TTLInfo{IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay)}}} tk.MustExec("insert into mysql.tidb_ttl_table_status(table_id) values (2)") @@ -122,13 +127,33 @@ func TestFinishJob(t *testing.T) { m := ttlworker.NewJobManager("test-id", nil, store, nil) m.InfoSchemaCache().Tables[testTable.ID] = testTable se := sessionFactory() - job, err := m.LockNewJob(context.Background(), se, testTable, time.Now(), false) + startTime := time.Now() + job, err := m.LockNewJob(context.Background(), se, testTable, startTime, false) + require.NoError(t, err) + + expireTime, err := testTable.EvalExpireTime(context.Background(), se, startTime) require.NoError(t, err) - summary := `{"total_rows":0,"scan_task_err":"\"'an error message contains both single and double quote'\""}` - job.Finish(se, time.Now(), summary) - tk.MustQuery("select table_id, last_job_summary from mysql.tidb_ttl_table_status").Check(testkit.Rows(`2 {"total_rows":0,"scan_task_err":"\"'an error message contains both single and double quote'\""}`)) + summary := &ttlworker.TTLSummary{ + ScanTaskErr: "\"'an error message contains both single and double quote'\"", + TotalRows: 128, + SuccessRows: 120, + ErrorRows: 8, + } + summaryBytes, err := json.Marshal(summary) + summary.SummaryText = string(summaryBytes) + + require.NoError(t, err) + endTime := time.Now() + job.Finish(se, endTime, summary) + tk.MustQuery("select table_id, last_job_summary from mysql.tidb_ttl_table_status").Check(testkit.Rows("2 " + summary.SummaryText)) tk.MustQuery("select * from mysql.tidb_ttl_task").Check(testkit.Rows()) + expectedRow := []string{ + job.ID(), "2", "1", "db1", "t1", "", + startTime.Format(timeFormat), endTime.Format(timeFormat), expireTime.Format(timeFormat), + summary.SummaryText, "128", "120", "8", "finished", + } + tk.MustQuery("select * from mysql.tidb_ttl_job_history").Check(testkit.Rows(strings.Join(expectedRow, " "))) } func TestTTLAutoAnalyze(t *testing.T) { @@ -407,6 +432,50 @@ func TestJobTimeout(t *testing.T) { tk.MustQuery("select count(*) from mysql.tidb_ttl_task").Check(testkit.Rows("0")) } +func TestTriggerScanTask(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + sessionFactory := sessionFactory(t, store) + now := time.Now() + se := sessionFactory() + + waitAndStopTTLManager(t, dom) + + tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") + + m := ttlworker.NewJobManager("manager-1", nil, store, nil) + require.NoError(t, m.InfoSchemaCache().Update(se)) + m.TaskManager().ResizeWorkersWithSysVar() + m.Start() + defer func() { + m.Stop() + require.NoError(t, m.WaitStopped(context.Background(), time.Second*10)) + }() + + nCli := m.GetNotificationCli() + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + <-nCli.WatchNotification(context.Background(), "scan") + wg.Done() + }() + m.RescheduleJobs(se, now) + + // notification is sent + wg.Wait() + + for time.Now().Before(now.Add(time.Second * 5)) { + time.Sleep(time.Second) + rows := tk.MustQuery("SELECT status FROM mysql.tidb_ttl_task").Rows() + if len(rows) == 0 { + break + } + if rows[0][0] == cache.TaskStatusFinished { + break + } + } +} + func waitAndStopTTLManager(t *testing.T, dom *domain.Domain) { maxWaitTime := 30 for { @@ -423,3 +492,125 @@ func waitAndStopTTLManager(t *testing.T, dom *domain.Domain) { continue } } + +func TestGCScanTasks(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + addTableStatusRecord := func(tableID, parentTableID, curJobID int64) { + tk.MustExec("INSERT INTO mysql.tidb_ttl_table_status (table_id,parent_table_id) VALUES (?, ?)", tableID, parentTableID) + if curJobID == 0 { + return + } + + tk.MustExec(`UPDATE mysql.tidb_ttl_table_status + SET current_job_id = ?, + current_job_owner_id = '12345', + current_job_start_time = NOW(), + current_job_status = 'running', + current_job_status_update_time = NOW(), + current_job_ttl_expire = NOW(), + current_job_owner_hb_time = NOW() + WHERE table_id = ?`, curJobID, tableID) + } + + addScanTaskRecord := func(jobID, tableID, scanID int64) { + tk.MustExec(`INSERT INTO mysql.tidb_ttl_task SET + job_id = ?, + table_id = ?, + scan_id = ?, + expire_time = NOW(), + created_time = NOW()`, jobID, tableID, scanID) + } + + addTableStatusRecord(1, 1, 1) + addScanTaskRecord(1, 1, 1) + addScanTaskRecord(1, 1, 2) + addScanTaskRecord(2, 1, 1) + addScanTaskRecord(2, 1, 2) + addScanTaskRecord(3, 2, 1) + addScanTaskRecord(3, 2, 2) + + se := session.NewSession(tk.Session(), tk.Session(), func(_ session.Session) {}) + ttlworker.DoGC(context.TODO(), se) + tk.MustQuery("select job_id, scan_id from mysql.tidb_ttl_task order by job_id, scan_id asc").Check(testkit.Rows("1 1", "1 2")) +} + +func TestGCTTLHistory(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + addHistory := func(jobID, createdBeforeDays int) { + tk.MustExec(fmt.Sprintf(`INSERT INTO mysql.tidb_ttl_job_history ( + job_id, + table_id, + parent_table_id, + table_schema, + table_name, + partition_name, + create_time, + finish_time, + ttl_expire, + summary_text, + expired_rows, + deleted_rows, + error_delete_rows, + status + ) + VALUES + ( + %d, 1, 1, 'test', 't1', '', + CURDATE() - INTERVAL %d DAY, + CURDATE() - INTERVAL %d DAY + INTERVAL 1 HOUR, + CURDATE() - INTERVAL %d DAY, + "", 100, 100, 0, "finished" + )`, jobID, createdBeforeDays, createdBeforeDays, createdBeforeDays)) + } + + addHistory(1, 1) + addHistory(2, 30) + addHistory(3, 60) + addHistory(4, 89) + addHistory(5, 90) + addHistory(6, 91) + addHistory(7, 100) + se := session.NewSession(tk.Session(), tk.Session(), func(_ session.Session) {}) + ttlworker.DoGC(context.TODO(), se) + tk.MustQuery("select job_id from mysql.tidb_ttl_job_history order by job_id asc").Check(testkit.Rows("1", "2", "3", "4", "5")) +} + +func TestJobMetrics(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + sessionFactory := sessionFactory(t, store) + + waitAndStopTTLManager(t, dom) + + now := time.Now() + tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") + table, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTTL) + + se := sessionFactory() + m := ttlworker.NewJobManager("manager-1", nil, store, nil) + m.TaskManager().ResizeWorkersWithSysVar() + require.NoError(t, m.InfoSchemaCache().Update(se)) + // schedule jobs + m.RescheduleJobs(se, now) + // set the worker to be empty, so none of the tasks will be scheduled + m.TaskManager().SetScanWorkers4Test([]ttlworker.Worker{}) + + sql, args := cache.SelectFromTTLTableStatusWithID(table.Meta().ID) + rows, err := se.ExecuteSQL(ctx, sql, args...) + require.NoError(t, err) + tableStatus, err := cache.RowToTableStatus(se, rows[0]) + require.NoError(t, err) + + require.NotEmpty(t, tableStatus.CurrentJobID) + require.Equal(t, "manager-1", tableStatus.CurrentJobOwnerID) + require.Equal(t, cache.JobStatusRunning, tableStatus.CurrentJobStatus) + + m.ReportMetrics() + out := &dto.Metric{} + require.NoError(t, metrics.RunningJobsCnt.Write(out)) + require.Equal(t, float64(1), out.GetGauge().GetValue()) +} diff --git a/ttl/ttlworker/job_manager_test.go b/ttl/ttlworker/job_manager_test.go index 9e0211410591b..311a42072ea91 100644 --- a/ttl/ttlworker/job_manager_test.go +++ b/ttl/ttlworker/job_manager_test.go @@ -171,7 +171,12 @@ func (m *JobManager) UpdateHeartBeat(ctx context.Context, se session.Session, no return m.updateHeartBeat(ctx, se, now) } -func (j *ttlJob) Finish(se session.Session, now time.Time, summary string) { +// ReportMetrics is an exported version of reportMetrics +func (m *JobManager) ReportMetrics() { + m.reportMetrics() +} + +func (j *ttlJob) Finish(se session.Session, now time.Time, summary *TTLSummary) { j.finish(se, now, summary) } diff --git a/ttl/ttlworker/task_manager.go b/ttl/ttlworker/task_manager.go index f20e5cee4f3f6..0db5405034c25 100644 --- a/ttl/ttlworker/task_manager.go +++ b/ttl/ttlworker/task_manager.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/ttl/cache" + "github.com/pingcap/tidb/ttl/metrics" "github.com/pingcap/tidb/ttl/session" "github.com/pingcap/tidb/util/logutil" "go.uber.org/multierr" @@ -327,15 +328,9 @@ func (m *taskManager) lockScanTask(se session.Session, task *cache.TTLTask, now } err := se.RunInTxn(ctx, func() error { - sql, args := cache.SelectFromTTLTaskWithID(task.JobID, task.ScanID) - rows, err := se.ExecuteSQL(ctx, sql+" FOR UPDATE NOWAIT", args...) - if err != nil { - return errors.Wrapf(err, "execute sql: %s", sql) - } - if len(rows) == 0 { - return errors.Errorf("didn't find task with jobID: %s, scanID: %d", task.JobID, task.ScanID) - } - task, err = cache.RowToTTLTask(se, rows[0]) + var err error + + task, err = m.syncTaskFromTable(se, task.JobID, task.ScanID, true) if err != nil { return err } @@ -343,7 +338,7 @@ func (m *taskManager) lockScanTask(se session.Session, task *cache.TTLTask, now return errors.New("task is already scheduled") } - sql, args = setTTLTaskOwnerSQL(task.JobID, task.ScanID, m.id, now) + sql, args := setTTLTaskOwnerSQL(task.JobID, task.ScanID, m.id, now) _, err = se.ExecuteSQL(ctx, sql, args...) if err != nil { return errors.Wrapf(err, "execute sql: %s", sql) @@ -355,6 +350,12 @@ func (m *taskManager) lockScanTask(se session.Session, task *cache.TTLTask, now return nil, err } + // update the task after setting status and owner + task, err = m.syncTaskFromTable(se, task.JobID, task.ScanID, false) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(m.ctx) scanTask := &ttlScanTask{ ctx: ctx, @@ -371,6 +372,28 @@ func (m *taskManager) lockScanTask(se session.Session, task *cache.TTLTask, now }, nil } +func (m *taskManager) syncTaskFromTable(se session.Session, jobID string, scanID int64, detectLock bool) (*cache.TTLTask, error) { + ctx := m.ctx + + sql, args := cache.SelectFromTTLTaskWithID(jobID, scanID) + if detectLock { + sql += " FOR UPDATE NOWAIT" + } + rows, err := se.ExecuteSQL(ctx, sql, args...) + if err != nil { + return nil, errors.Wrapf(err, "execute sql: %s", sql) + } + if len(rows) == 0 { + return nil, errors.Errorf("didn't find task with jobID: %s, scanID: %d", jobID, scanID) + } + task, err := cache.RowToTTLTask(se, rows[0]) + if err != nil { + return nil, err + } + + return task, nil +} + // updateHeartBeat updates the heartbeat for all tasks with current instance as owner func (m *taskManager) updateHeartBeat(ctx context.Context, se session.Session, now time.Time) error { for _, task := range m.runningTasks { @@ -427,6 +450,7 @@ func (m *taskManager) reportTaskFinished(se session.Session, now time.Time, task if err != nil { return err } + task.Status = cache.TaskStatusFinished timeoutCtx, cancel := context.WithTimeout(m.ctx, ttlInternalSQLTimeout) _, err = se.ExecuteSQL(timeoutCtx, sql, args...) @@ -474,6 +498,10 @@ func (m *taskManager) checkInvalidTask(se session.Session) { m.runningTasks = ownRunningTask } +func (m *taskManager) reportMetrics() { + metrics.RunningTaskCnt.Set(float64(len(m.runningTasks))) +} + type runningScanTask struct { *ttlScanTask cancel func() diff --git a/ttl/ttlworker/task_manager_integration_test.go b/ttl/ttlworker/task_manager_integration_test.go index 8b7d0df5257b0..9e3bad19b2acd 100644 --- a/ttl/ttlworker/task_manager_integration_test.go +++ b/ttl/ttlworker/task_manager_integration_test.go @@ -26,8 +26,10 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/ttl/cache" + "github.com/pingcap/tidb/ttl/metrics" "github.com/pingcap/tidb/ttl/ttlworker" "github.com/pingcap/tidb/util/logutil" + dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" "go.uber.org/atomic" "go.uber.org/zap" @@ -185,3 +187,35 @@ func TestTaskScheduleExpireHeartBeat(t *testing.T) { m2.RescheduleTasks(sessionFactory(), now.Add(time.Hour)) tk.MustQuery("select status,owner_id from mysql.tidb_ttl_task").Check(testkit.Rows("running task-manager-2")) } + +func TestTaskMetrics(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + waitAndStopTTLManager(t, dom) + tk := testkit.NewTestKit(t, store) + sessionFactory := sessionFactory(t, store) + + // create table and scan task + tk.MustExec("create table test.t(id int, created_at datetime) ttl=created_at + interval 1 day") + table, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + sql := fmt.Sprintf("insert into mysql.tidb_ttl_task(job_id,table_id,scan_id,expire_time,created_time) values ('test-job', %d, %d, NOW(), NOW())", table.Meta().ID, 1) + tk.MustExec(sql) + + // update the infoschema cache + isc := cache.NewInfoSchemaCache(time.Second) + require.NoError(t, isc.Update(sessionFactory())) + now := time.Now() + + // schedule in a task manager + scanWorker := ttlworker.NewMockScanWorker(t) + scanWorker.Start() + m := ttlworker.NewTaskManager(context.Background(), nil, isc, "task-manager-1") + m.SetScanWorkers4Test([]ttlworker.Worker{scanWorker}) + m.RescheduleTasks(sessionFactory(), now) + tk.MustQuery("select status,owner_id from mysql.tidb_ttl_task").Check(testkit.Rows("running task-manager-1")) + + m.ReportMetrics() + out := &dto.Metric{} + require.NoError(t, metrics.RunningTaskCnt.Write(out)) + require.Equal(t, float64(1), out.GetGauge().GetValue()) +} diff --git a/ttl/ttlworker/task_manager_test.go b/ttl/ttlworker/task_manager_test.go index 9241146b719b3..37365cb9757f6 100644 --- a/ttl/ttlworker/task_manager_test.go +++ b/ttl/ttlworker/task_manager_test.go @@ -49,6 +49,11 @@ func (m *taskManager) RescheduleTasks(se session.Session, now time.Time) { m.rescheduleTasks(se, now) } +// ReportMetrics is an exported version of reportMetrics +func (m *taskManager) ReportMetrics() { + m.reportMetrics() +} + func TestResizeWorkers(t *testing.T) { tbl := newMockTTLTbl(t, "t1") diff --git a/util/gctuner/tuner.go b/util/gctuner/tuner.go index ec74f48ec4be0..3332d77b93875 100644 --- a/util/gctuner/tuner.go +++ b/util/gctuner/tuner.go @@ -144,15 +144,18 @@ func (t *tuner) getGCPercent() uint32 { // tuning check the memory inuse and tune GC percent dynamically. // Go runtime ensure that it will be called serially. func (t *tuner) tuning() { + if !EnableGOGCTuner.Load() { + return + } + inuse := readMemoryInuse() threshold := t.getThreshold() // stop gc tuning if threshold <= 0 { return } - if EnableGOGCTuner.Load() { - t.setGCPercent(calcGCPercent(inuse, threshold)) - } + + t.setGCPercent(calcGCPercent(inuse, threshold)) } // threshold = inuse + inuse * (gcPercent / 100) diff --git a/util/gpool/spmc/spmcpool.go b/util/gpool/spmc/spmcpool.go index 6644a0e895650..5f58bba12d5b4 100644 --- a/util/gpool/spmc/spmcpool.go +++ b/util/gpool/spmc/spmcpool.go @@ -140,12 +140,22 @@ func (p *Pool[T, U, C, CT, TF]) Tune(size int) { p.SetLastTuneTs(time.Now()) p.capacity.Store(int32(size)) if size > capacity { - // boost + for i := 0; i < size-capacity; i++ { + if tid, boostTask := p.taskManager.Overclock(); boostTask != nil { + p.addWaitingTask() + p.taskManager.AddSubTask(tid, boostTask.Clone()) + p.taskCh <- boostTask + } + } if size-capacity == 1 { p.cond.Signal() return } p.cond.Broadcast() + return + } + if size < capacity { + p.taskManager.Downclock() } } diff --git a/util/gpool/spmc/spmcpool_test.go b/util/gpool/spmc/spmcpool_test.go index 3036ad7412a3c..5bc5da4fdf3bc 100644 --- a/util/gpool/spmc/spmcpool_test.go +++ b/util/gpool/spmc/spmcpool_test.go @@ -15,9 +15,11 @@ package spmc import ( + "fmt" "sync" "sync/atomic" "testing" + "time" "github.com/pingcap/tidb/resourcemanager/pooltask" rmutil "github.com/pingcap/tidb/resourcemanager/util" @@ -121,6 +123,78 @@ func TestStopPool(t *testing.T) { pool.ReleaseAndWait() } +func TestTuneSimplePool(t *testing.T) { + testTunePool(t, "TestTuneSimplePool") +} + +func TestTuneMultiPool(t *testing.T) { + var concurrency = 5 + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + go func(id int) { + testTunePool(t, fmt.Sprintf("TestTuneMultiPool%d", id)) + wg.Done() + }(i) + } + wg.Wait() +} + +func testTunePool(t *testing.T, name string) { + type ConstArgs struct { + a int + } + myArgs := ConstArgs{a: 10} + // init the pool + // input type, output type, constArgs type + pool, err := NewSPMCPool[int, int, ConstArgs, any, pooltask.NilContext](name, 10, rmutil.UNKNOWN) + require.NoError(t, err) + pool.SetConsumerFunc(func(task int, constArgs ConstArgs, ctx any) int { + return task + constArgs.a + }) + + exit := make(chan struct{}) + + pfunc := func() (int, error) { + select { + case <-exit: + return 0, gpool.ErrProducerClosed + default: + return 1, nil + } + } + // add new task + resultCh, control := pool.AddProducer(pfunc, myArgs, pooltask.NilContext{}, WithConcurrency(10)) + tid := control.TaskID() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for result := range resultCh { + require.Greater(t, result, 10) + } + }() + time.Sleep(1 * time.Second) + newSize := pool.Cap() - 1 + pool.Tune(newSize) + time.Sleep(1 * time.Second) + require.Equal(t, newSize, pool.Cap()) + require.Equal(t, int32(newSize), pool.taskManager.Running(tid)) + + newSize = pool.Cap() + 1 + pool.Tune(newSize) + time.Sleep(1 * time.Second) + require.Equal(t, newSize, pool.Cap()) + require.Equal(t, int32(newSize), pool.taskManager.Running(tid)) + + // exit test + close(exit) + control.Wait() + wg.Wait() + // close pool + pool.ReleaseAndWait() +} + func TestPoolWithEnoughCapacity(t *testing.T) { const ( RunTimes = 1000 diff --git a/util/gpool/spmc/worker.go b/util/gpool/spmc/worker.go index b8e22376bb79a..158c677775987 100644 --- a/util/gpool/spmc/worker.go +++ b/util/gpool/spmc/worker.go @@ -67,7 +67,7 @@ func (w *goWorker[T, U, C, CT, TF]) run() { for t := range f.GetTaskCh() { if f.GetStatus() == pooltask.StopTask { f.Done() - continue + break } f.GetResultCh() <- w.pool.consumerFunc(t.Task, f.ConstArgs(), ctx) f.Done() diff --git a/util/signal/BUILD.bazel b/util/signal/BUILD.bazel index af19da427497c..e2963d6f554bb 100644 --- a/util/signal/BUILD.bazel +++ b/util/signal/BUILD.bazel @@ -9,8 +9,31 @@ go_library( ], importpath = "github.com/pingcap/tidb/util/signal", visibility = ["//visibility:public"], - deps = [ - "//util/logutil", - "@org_uber_go_zap//:zap", - ], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//util/logutil", + "@org_uber_go_zap//:zap", + ], + "//conditions:default": [], + }), ) diff --git a/util/syncutil/BUILD.bazel b/util/syncutil/BUILD.bazel index 919301546f69c..7703cfd35f89b 100644 --- a/util/syncutil/BUILD.bazel +++ b/util/syncutil/BUILD.bazel @@ -3,10 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "syncutil", srcs = [ - "mutex_deadlock.go", + "mutex_deadlock.go", #keep "mutex_sync.go", ], importpath = "github.com/pingcap/tidb/util/syncutil", visibility = ["//visibility:public"], - deps = ["@com_github_sasha_s_go_deadlock//:go-deadlock"], + deps = ["@com_github_sasha_s_go_deadlock//:go-deadlock"], #keep ) diff --git a/util/sys/linux/BUILD.bazel b/util/sys/linux/BUILD.bazel index 1212afe2fc3d9..f1363c2cb0d71 100644 --- a/util/sys/linux/BUILD.bazel +++ b/util/sys/linux/BUILD.bazel @@ -9,7 +9,48 @@ go_library( ], importpath = "github.com/pingcap/tidb/util/sys/linux", visibility = ["//visibility:public"], - deps = ["@org_golang_x_sys//unix"], + deps = select({ + "@io_bazel_rules_go//go/platform:aix": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:android": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:illumos": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:js": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "@org_golang_x_sys//unix", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "@org_golang_x_sys//unix", + ], + "//conditions:default": [], + }), ) go_test( diff --git a/util/topsql/BUILD.bazel b/util/topsql/BUILD.bazel index f90cf35b09e8f..9cfac0291454c 100644 --- a/util/topsql/BUILD.bazel +++ b/util/topsql/BUILD.bazel @@ -42,6 +42,7 @@ go_test( "@com_github_pingcap_tipb//go-tipb", "@com_github_stretchr_testify//require", "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//keepalive", "@org_uber_go_goleak//:goleak", ], diff --git a/util/topsql/reporter/BUILD.bazel b/util/topsql/reporter/BUILD.bazel index 6c1def091ee39..98a83a0d00910 100644 --- a/util/topsql/reporter/BUILD.bazel +++ b/util/topsql/reporter/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "@com_github_wangjohn_quickselect//:quickselect", "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//backoff", + "@org_golang_google_grpc//credentials/insecure", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], diff --git a/util/topsql/reporter/single_target.go b/util/topsql/reporter/single_target.go index bf66414b674d4..16c88956453cd 100644 --- a/util/topsql/reporter/single_target.go +++ b/util/topsql/reporter/single_target.go @@ -28,6 +28,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials/insecure" ) const ( @@ -357,7 +358,7 @@ func (*SingleTargetDataSink) dial(ctx context.Context, targetRPCAddr string) (*g dialCtx, targetRPCAddr, grpc.WithBlock(), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithInitialWindowSize(grpcInitialWindowSize), grpc.WithInitialConnWindowSize(grpcInitialConnWindowSize), grpc.WithDefaultCallOptions( diff --git a/util/topsql/topsql_test.go b/util/topsql/topsql_test.go index 04855ac163011..1862b090a94de 100644 --- a/util/topsql/topsql_test.go +++ b/util/topsql/topsql_test.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" ) @@ -241,7 +242,7 @@ func TestTopSQLPubSub(t *testing.T) { conn, err := grpc.Dial( server.Address(), grpc.WithBlock(), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 10 * time.Second, Timeout: 3 * time.Second, @@ -363,7 +364,7 @@ func TestPubSubWhenReporterIsStopped(t *testing.T) { conn, err := grpc.Dial( server.Address(), grpc.WithBlock(), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 10 * time.Second, Timeout: 3 * time.Second,