diff --git a/DEPS.bzl b/DEPS.bzl index dbd1edbaf98d6..0333a2dc70184 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -390,8 +390,8 @@ def go_deps(): name = "com_github_cespare_xxhash_v2", build_file_proto_mode = "disable_global", importpath = "github.com/cespare/xxhash/v2", - sum = "h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=", - version = "v2.1.2", + sum = "h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=", + version = "v2.2.0", ) go_repository( name = "com_github_charithe_durationcheck", @@ -1143,8 +1143,8 @@ def go_deps(): name = "com_github_go_kit_log", build_file_proto_mode = "disable_global", importpath = "github.com/go-kit/log", - sum = "h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=", - version = "v0.2.0", + sum = "h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=", + version = "v0.2.1", ) go_repository( name = "com_github_go_logfmt_logfmt", @@ -2182,8 +2182,8 @@ def go_deps(): name = "com_github_klauspost_compress", build_file_proto_mode = "disable_global", importpath = "github.com/klauspost/compress", - sum = "h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=", - version = "v1.15.1", + sum = "h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0=", + version = "v1.15.13", ) go_repository( name = "com_github_klauspost_cpuid", @@ -2435,8 +2435,8 @@ def go_deps(): name = "com_github_matttproud_golang_protobuf_extensions", build_file_proto_mode = "disable_global", importpath = "github.com/matttproud/golang_protobuf_extensions", - sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=", - version = "v1.0.1", + sum = "h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=", + version = "v1.0.4", ) go_repository( name = "com_github_maxatome_go_testdeep", @@ -2881,8 +2881,8 @@ def go_deps(): name = "com_github_pingcap_badger", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/badger", - sum = "h1:MKVFZuqFvAMiDtv3AbihOQ6rY5IE8LWflI1BuZ/hF0Y=", - version = "v1.5.1-0.20220314162537-ab58fbf40580", + sum = "h1:QB16qn8wx5X4SRn3/5axrjPMNS3WRt87+5Bfrnmt6IA=", + version = "v1.5.1-0.20221229114011-ddffaa0fff7a", ) go_repository( name = "com_github_pingcap_check", @@ -2895,8 +2895,8 @@ def go_deps(): name = "com_github_pingcap_errors", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/errors", - sum = "h1:3Dm0DWeQlwV8LbpQxP2tojHhxd9aY59KI+QN0ns6bBo=", - version = "v0.11.5-0.20220729040631-518f63d66278", + sum = "h1:m5ZsBa5o/0CkzZXfXLaThzKuR85SnHHetqBCpzQ30h8=", + version = "v0.11.5-0.20221009092201-b66cddb77c32", ) go_repository( name = "com_github_pingcap_failpoint", @@ -2923,8 +2923,8 @@ def go_deps(): name = "com_github_pingcap_kvproto", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/kvproto", - sum = "h1:46ZD6xzQWJ8Jkeal/U7SqkX030Mgs8DAn6QV/9zbqOQ=", - version = "v0.0.0-20221130022225-6c56ac56fe5f", + sum = "h1:v0Z0nC0knwWHn3e9br8EMNfLBB14QDULn142UGjiTMQ=", + version = "v0.0.0-20221213093948-9ccc6beaf0aa", ) go_repository( name = "com_github_pingcap_log", @@ -3001,29 +3001,29 @@ def go_deps(): name = "com_github_prometheus_client_golang", build_file_proto_mode = "disable_global", importpath = "github.com/prometheus/client_golang", - sum = "h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=", - version = "v1.13.0", + sum = "h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=", + version = "v1.14.0", ) go_repository( name = "com_github_prometheus_client_model", build_file_proto_mode = "disable_global", importpath = "github.com/prometheus/client_model", - sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=", - version = "v0.2.0", + sum = "h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=", + version = "v0.3.0", ) go_repository( name = "com_github_prometheus_common", build_file_proto_mode = "disable_global", importpath = "github.com/prometheus/common", - sum = "h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=", - version = "v0.37.0", + sum = "h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=", + version = "v0.39.0", ) go_repository( name = "com_github_prometheus_procfs", build_file_proto_mode = "disable_global", importpath = "github.com/prometheus/procfs", - sum = "h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=", - version = "v0.8.0", + sum = "h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=", + version = "v0.9.0", ) go_repository( name = "com_github_prometheus_prometheus", @@ -3523,12 +3523,20 @@ def go_deps(): sum = "h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ=", version = "v0.0.0-20181126055449-889f96f722a2", ) + go_repository( + name = "com_github_tiancaiamao_gp", + build_file_proto_mode = "disable", + importpath = "github.com/tiancaiamao/gp", + sum = "h1:4RNtqw1/tW67qP9fFgfQpTVd7DrfkaAWu4vsC18QmBo=", + version = "v0.0.0-20221221095600-1a473d1f9b4b", + ) + go_repository( name = "com_github_tikv_client_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/tikv/client-go/v2", - sum = "h1:/glZOHs/K2pkCioDVae+aThUHFYRYQkEgY4NUTgfh+s=", - version = "v2.0.3", + sum = "h1:m6glgBGCIds9QURbk8Mn+8mjLKDcv6nWrNwYh92fydQ=", + version = "v2.0.4-0.20221226080148-018c59dbd837", ) go_repository( name = "com_github_tikv_pd_client", @@ -4432,8 +4440,8 @@ def go_deps(): name = "org_golang_x_oauth2", build_file_proto_mode = "disable_global", importpath = "golang.org/x/oauth2", - sum = "h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=", - version = "v0.2.0", + sum = "h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=", + version = "v0.3.0", ) go_repository( name = "org_golang_x_sync", @@ -4600,8 +4608,8 @@ def go_deps(): name = "org_uber_go_multierr", build_file_proto_mode = "disable_global", importpath = "go.uber.org/multierr", - sum = "h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=", - version = "v1.8.0", + sum = "h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=", + version = "v1.9.0", ) go_repository( name = "org_uber_go_tools", @@ -4614,6 +4622,6 @@ def go_deps(): name = "org_uber_go_zap", build_file_proto_mode = "disable_global", importpath = "go.uber.org/zap", - sum = "h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=", - version = "v1.23.0", + sum = "h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=", + version = "v1.24.0", ) diff --git a/Makefile b/Makefile index 66b3ba0686917..ba8c742782900 100644 --- a/Makefile +++ b/Makefile @@ -408,10 +408,6 @@ bazel_coverage_test: failpoint-enable bazel_ci_prepare --build_event_json_file=bazel_1.json --@io_bazel_rules_go//go/config:cover_format=go_cover \ -- //... -//cmd/... -//tests/graceshutdown/... \ -//tests/globalkilltest/... -//tests/readonlytest/... -//br/pkg/task:task_test -//tests/realtikvtest/... - bazel $(BAZEL_GLOBAL_CONFIG) coverage $(BAZEL_CMD_CONFIG) \ - --build_event_json_file=bazel_2.json --@io_bazel_rules_go//go/config:cover_format=go_cover --define gotags=featuretag \ - -- //... -//cmd/... -//tests/graceshutdown/... \ - -//tests/globalkilltest/... -//tests/readonlytest/... -//br/pkg/task:task_test -//tests/realtikvtest/... bazel_build: bazel_ci_prepare mkdir -p bin diff --git a/br/pkg/conn/conn.go b/br/pkg/conn/conn.go index 5adbe0a33ab1c..157b9cdf794c9 100644 --- a/br/pkg/conn/conn.go +++ b/br/pkg/conn/conn.go @@ -191,7 +191,6 @@ func NewMgr( return nil, errors.Trace(err) } // we must check tidb(tikv version) any time after concurrent ddl feature implemented in v6.2. - // when tidb < 6.2 we need set EnableConcurrentDDL false to make ddl works. // we will keep this check until 7.0, which allow the breaking changes. // NOTE: must call it after domain created! // FIXME: remove this check in v7.0 @@ -281,7 +280,8 @@ func (mgr *Mgr) GetTS(ctx context.Context) (uint64, error) { } // GetMergeRegionSizeAndCount returns the tikv config `coprocessor.region-split-size` and `coprocessor.region-split-key`. -func (mgr *Mgr) GetMergeRegionSizeAndCount(ctx context.Context, client *http.Client) (uint64, uint64, error) { +// returns the default config when failed. +func (mgr *Mgr) GetMergeRegionSizeAndCount(ctx context.Context, client *http.Client) (uint64, uint64) { regionSplitSize := DefaultMergeRegionSizeBytes regionSplitKeys := DefaultMergeRegionKeyCount type coprocessor struct { @@ -310,9 +310,10 @@ func (mgr *Mgr) GetMergeRegionSizeAndCount(ctx context.Context, client *http.Cli return nil }) if err != nil { - return 0, 0, errors.Trace(err) + log.Warn("meet error when getting config from TiKV; using default", logutil.ShortError(err)) + return DefaultMergeRegionSizeBytes, DefaultMergeRegionKeyCount } - return regionSplitSize, regionSplitKeys, nil + return regionSplitSize, regionSplitKeys } // GetConfigFromTiKV get configs from all alive tikv stores. diff --git a/br/pkg/conn/conn_test.go b/br/pkg/conn/conn_test.go index 01ce8bc08203e..fc822fac123d9 100644 --- a/br/pkg/conn/conn_test.go +++ b/br/pkg/conn/conn_test.go @@ -292,6 +292,38 @@ func TestGetMergeRegionSizeAndCount(t *testing.T) { regionSplitSize: DefaultMergeRegionSizeBytes, regionSplitKeys: DefaultMergeRegionKeyCount, }, + { + stores: []*metapb.Store{ + { + Id: 1, + State: metapb.StoreState_Up, + Labels: []*metapb.StoreLabel{ + { + Key: "engine", + Value: "tiflash", + }, + }, + }, + { + Id: 2, + State: metapb.StoreState_Up, + Labels: []*metapb.StoreLabel{ + { + Key: "engine", + Value: "tikv", + }, + }, + }, + }, + content: []string{ + "", + // Assuming the TiKV has failed due to some reason. + "", + }, + // no tikv detected in this case + regionSplitSize: DefaultMergeRegionSizeBytes, + regionSplitKeys: DefaultMergeRegionKeyCount, + }, { stores: []*metapb.Store{ { @@ -388,8 +420,7 @@ func TestGetMergeRegionSizeAndCount(t *testing.T) { httpCli := mockServer.Client() mgr := &Mgr{PdController: &pdutil.PdController{}} mgr.PdController.SetPDClient(pdCli) - rs, rk, err := mgr.GetMergeRegionSizeAndCount(ctx, httpCli) - require.NoError(t, err) + rs, rk := mgr.GetMergeRegionSizeAndCount(ctx, httpCli) require.Equal(t, ca.regionSplitSize, rs) require.Equal(t, ca.regionSplitKeys, rk) mockServer.Close() diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index 3b1675921afd3..19c3ef1c4c460 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -203,9 +203,32 @@ func (gs *tidbSession) CreatePlacementPolicy(ctx context.Context, policy *model. return d.CreatePlacementPolicyWithInfo(gs.se, policy, ddl.OnExistIgnore) } +// SplitBatchCreateTable provide a way to split batch into small batch when batch size is large than 6 MB. +// The raft entry has limit size of 6 MB, a batch of CreateTables may hit this limitation +// TODO: shall query string be set for each split batch create, it looks does not matter if we set once for all. +func (gs *tidbSession) SplitBatchCreateTable(schema model.CIStr, info []*model.TableInfo, cs ...ddl.CreateTableWithInfoConfigurier) error { + var err error + d := domain.GetDomain(gs.se).DDL() + if err = d.BatchCreateTableWithInfo(gs.se, schema, info, append(cs, ddl.OnExistIgnore)...); kv.ErrEntryTooLarge.Equal(err) { + if len(info) == 1 { + return err + } + mid := len(info) / 2 + err = gs.SplitBatchCreateTable(schema, info[:mid]) + if err != nil { + return err + } + err = gs.SplitBatchCreateTable(schema, info[mid:]) + if err != nil { + return err + } + return nil + } + return err +} + // CreateTables implements glue.BatchCreateTableSession. func (gs *tidbSession) CreateTables(ctx context.Context, tables map[string][]*model.TableInfo, cs ...ddl.CreateTableWithInfoConfigurier) error { - d := domain.GetDomain(gs.se).DDL() var dbName model.CIStr // Disable foreign key check when batch create tables. @@ -233,8 +256,7 @@ func (gs *tidbSession) CreateTables(ctx context.Context, tables map[string][]*mo cloneTables = append(cloneTables, table) } gs.se.SetValue(sessionctx.QueryString, queryBuilder.String()) - err := d.BatchCreateTableWithInfo(gs.se, dbName, cloneTables, append(cs, ddl.OnExistIgnore)...) - if err != nil { + if err := gs.SplitBatchCreateTable(dbName, cloneTables); err != nil { //It is possible to failure when TiDB does not support model.ActionCreateTables. //In this circumstance, BatchCreateTableWithInfo returns errno.ErrInvalidDDLJob, //we fall back to old way that creating table one by one diff --git a/br/pkg/lightning/restore/tidb.go b/br/pkg/lightning/restore/tidb.go index ddc8bb6c5ff19..98c780e65dc98 100644 --- a/br/pkg/lightning/restore/tidb.go +++ b/br/pkg/lightning/restore/tidb.go @@ -95,8 +95,10 @@ func DBFromConfig(ctx context.Context, dsn config.DBStore) (*sql.DB, error) { "tidb_opt_write_row_id": "1", // always set auto-commit to ON "autocommit": "1", - // alway set transaction mode to optimistic + // always set transaction mode to optimistic "tidb_txn_mode": "optimistic", + // disable foreign key checks + "foreign_key_checks": "0", } if dsn.Vars != nil { @@ -143,47 +145,6 @@ func (timgr *TiDBManager) Close() { timgr.db.Close() } -func InitSchema(ctx context.Context, g glue.Glue, database string, tablesSchema map[string]string) error { - logger := log.FromContext(ctx).With(zap.String("db", database)) - sqlExecutor := g.GetSQLExecutor() - - var createDatabase strings.Builder - createDatabase.WriteString("CREATE DATABASE IF NOT EXISTS ") - common.WriteMySQLIdentifier(&createDatabase, database) - err := sqlExecutor.ExecuteWithLog(ctx, createDatabase.String(), "create database", logger) - if err != nil { - return errors.Trace(err) - } - - task := logger.Begin(zap.InfoLevel, "create tables") - var sqlCreateStmts []string -loopCreate: - for tbl, sqlCreateTable := range tablesSchema { - task.Debug("create table", zap.String("schema", sqlCreateTable)) - - sqlCreateStmts, err = createIfNotExistsStmt(g.GetParser(), sqlCreateTable, database, tbl) - if err != nil { - break - } - - // TODO: maybe we should put these createStems into a transaction - for _, s := range sqlCreateStmts { - err = sqlExecutor.ExecuteWithLog( - ctx, - s, - "create table", - logger.With(zap.String("table", common.UniqueTable(database, tbl))), - ) - if err != nil { - break loopCreate - } - } - } - task.End(zap.ErrorLevel, err) - - return errors.Trace(err) -} - func createIfNotExistsStmt(p *parser.Parser, createTable, dbName, tblName string) ([]string, error) { stmts, _, err := p.ParseSQL(createTable) if err != nil { @@ -191,7 +152,7 @@ func createIfNotExistsStmt(p *parser.Parser, createTable, dbName, tblName string } var res strings.Builder - ctx := format.NewRestoreCtx(format.DefaultRestoreFlags|format.RestoreTiDBSpecialComment, &res) + ctx := format.NewRestoreCtx(format.DefaultRestoreFlags|format.RestoreTiDBSpecialComment|format.RestoreWithTTLEnableOff, &res) retStmts := make([]string, 0, len(stmts)) for _, stmt := range stmts { @@ -199,6 +160,9 @@ func createIfNotExistsStmt(p *parser.Parser, createTable, dbName, tblName string case *ast.CreateDatabaseStmt: node.Name = model.NewCIStr(dbName) node.IfNotExists = true + case *ast.DropDatabaseStmt: + node.Name = model.NewCIStr(dbName) + node.IfExists = true case *ast.CreateTableStmt: node.Table.Schema = model.NewCIStr(dbName) node.Table.Name = model.NewCIStr(tblName) diff --git a/br/pkg/lightning/restore/tidb_test.go b/br/pkg/lightning/restore/tidb_test.go index 9b204b2da22b1..a3710d822d2dd 100644 --- a/br/pkg/lightning/restore/tidb_test.go +++ b/br/pkg/lightning/restore/tidb_test.go @@ -165,97 +165,6 @@ func TestCreateTableIfNotExistsStmt(t *testing.T) { `, "m")) } -func TestInitSchema(t *testing.T) { - s := newTiDBSuite(t) - ctx := context.Background() - - s.mockDB. - ExpectExec("CREATE DATABASE IF NOT EXISTS `db`"). - WillReturnResult(sqlmock.NewResult(1, 1)) - s.mockDB. - ExpectExec("\\QCREATE TABLE IF NOT EXISTS `db`.`t1` (`a` INT PRIMARY KEY,`b` VARCHAR(200));\\E"). - WillReturnResult(sqlmock.NewResult(2, 1)) - s.mockDB. - ExpectExec("\\QSET @@SESSION.`FOREIGN_KEY_CHECKS`=0;\\E"). - WillReturnResult(sqlmock.NewResult(0, 0)) - s.mockDB. - ExpectExec("\\QCREATE TABLE IF NOT EXISTS `db`.`t2` (`xx` TEXT) AUTO_INCREMENT = 11203;\\E"). - WillReturnResult(sqlmock.NewResult(2, 1)) - s.mockDB. - ExpectClose() - - s.mockDB.MatchExpectationsInOrder(false) // maps are unordered. - err := InitSchema(ctx, s.tiGlue, "db", map[string]string{ - "t1": "create table t1 (a int primary key, b varchar(200));", - "t2": "/*!40014 SET FOREIGN_KEY_CHECKS=0*/;CREATE TABLE `db`.`t2` (xx TEXT) AUTO_INCREMENT=11203;", - }) - s.mockDB.MatchExpectationsInOrder(true) - require.NoError(t, err) -} - -func TestInitSchemaSyntaxError(t *testing.T) { - s := newTiDBSuite(t) - ctx := context.Background() - - s.mockDB. - ExpectExec("CREATE DATABASE IF NOT EXISTS `db`"). - WillReturnResult(sqlmock.NewResult(1, 1)) - s.mockDB. - ExpectClose() - - err := InitSchema(ctx, s.tiGlue, "db", map[string]string{ - "t1": "create table `t1` with invalid syntax;", - }) - require.Error(t, err) -} - -func TestInitSchemaErrorLost(t *testing.T) { - s := newTiDBSuite(t) - ctx := context.Background() - - s.mockDB. - ExpectExec("CREATE DATABASE IF NOT EXISTS `db`"). - WillReturnResult(sqlmock.NewResult(1, 1)) - - s.mockDB. - ExpectExec("CREATE TABLE IF NOT EXISTS.*"). - WillReturnError(&mysql.MySQLError{ - Number: tmysql.ErrTooBigFieldlength, - Message: "Column length too big", - }) - - s.mockDB. - ExpectClose() - - err := InitSchema(ctx, s.tiGlue, "db", map[string]string{ - "t1": "create table `t1` (a int);", - "t2": "create table t2 (a int primary key, b varchar(200));", - }) - require.Regexp(t, ".*Column length too big.*", err.Error()) -} - -func TestInitSchemaUnsupportedSchemaError(t *testing.T) { - s := newTiDBSuite(t) - ctx := context.Background() - - s.mockDB. - ExpectExec("CREATE DATABASE IF NOT EXISTS `db`"). - WillReturnResult(sqlmock.NewResult(1, 1)) - s.mockDB. - ExpectExec("CREATE TABLE IF NOT EXISTS `db`.`t1`.*"). - WillReturnError(&mysql.MySQLError{ - Number: tmysql.ErrTooBigFieldlength, - Message: "Column length too big", - }) - s.mockDB. - ExpectClose() - - err := InitSchema(ctx, s.tiGlue, "db", map[string]string{ - "t1": "create table `t1` (a VARCHAR(999999999));", - }) - require.Regexp(t, ".*Column length too big.*", err.Error()) -} - func TestDropTable(t *testing.T) { s := newTiDBSuite(t) ctx := context.Background() diff --git a/br/pkg/restore/db.go b/br/pkg/restore/db.go index ae62162c3e890..1f3f5d949e26e 100644 --- a/br/pkg/restore/db.go +++ b/br/pkg/restore/db.go @@ -284,7 +284,7 @@ func (db *DB) tableIDAllocFilter() ddl.AllocTableIDIf { if db.preallocedIDs == nil { return true } - prealloced := db.preallocedIDs.Prealloced(ti.ID) + prealloced := db.preallocedIDs.PreallocedFor(ti) if prealloced { log.Info("reusing table ID", zap.Stringer("table", ti.Name)) } @@ -308,6 +308,10 @@ func (db *DB) CreateTables(ctx context.Context, tables []*metautil.Table, return errors.Trace(err) } } + + if ttlInfo := table.Info.TTLInfo; ttlInfo != nil { + ttlInfo.Enable = false + } } if err := batchSession.CreateTables(ctx, m, db.tableIDAllocFilter()); err != nil { return err @@ -336,6 +340,10 @@ func (db *DB) CreateTable(ctx context.Context, table *metautil.Table, } } + if ttlInfo := table.Info.TTLInfo; ttlInfo != nil { + ttlInfo.Enable = false + } + err := db.se.CreateTable(ctx, table.DB.Name, table.Info, db.tableIDAllocFilter()) if err != nil { log.Error("create table failed", diff --git a/br/pkg/restore/prealloc_table_id/BUILD.bazel b/br/pkg/restore/prealloc_table_id/BUILD.bazel index 8ce80b039178a..cfdb0432fd446 100644 --- a/br/pkg/restore/prealloc_table_id/BUILD.bazel +++ b/br/pkg/restore/prealloc_table_id/BUILD.bazel @@ -5,7 +5,10 @@ go_library( srcs = ["alloc.go"], importpath = "github.com/pingcap/tidb/br/pkg/restore/prealloc_table_id", visibility = ["//visibility:public"], - deps = ["//br/pkg/metautil"], + deps = [ + "//br/pkg/metautil", + "//parser/model", + ], ) go_test( diff --git a/br/pkg/restore/prealloc_table_id/alloc.go b/br/pkg/restore/prealloc_table_id/alloc.go index 9232ed84a8fc8..8554de5e9891b 100644 --- a/br/pkg/restore/prealloc_table_id/alloc.go +++ b/br/pkg/restore/prealloc_table_id/alloc.go @@ -7,6 +7,7 @@ import ( "math" "github.com/pingcap/tidb/br/pkg/metautil" + "github.com/pingcap/tidb/parser/model" ) const ( @@ -48,6 +49,14 @@ func New(tables []*metautil.Table) *PreallocIDs { if t.Info.ID > max && t.Info.ID < insaneTableIDThreshold { max = t.Info.ID } + + if t.Info.Partition != nil && t.Info.Partition.Definitions != nil { + for _, part := range t.Info.Partition.Definitions { + if part.ID > max && part.ID < insaneTableIDThreshold { + max = part.ID + } + } + } } return &PreallocIDs{ end: max + 1, @@ -86,3 +95,17 @@ func (p *PreallocIDs) Alloc(m Allocator) error { func (p *PreallocIDs) Prealloced(tid int64) bool { return p.allocedFrom <= tid && tid < p.end } + +func (p *PreallocIDs) PreallocedFor(ti *model.TableInfo) bool { + if !p.Prealloced(ti.ID) { + return false + } + if ti.Partition != nil && ti.Partition.Definitions != nil { + for _, part := range ti.Partition.Definitions { + if !p.Prealloced(part.ID) { + return false + } + } + } + return true +} diff --git a/br/pkg/restore/prealloc_table_id/alloc_test.go b/br/pkg/restore/prealloc_table_id/alloc_test.go index 8cf6b95fb070e..c1c3f018a2de8 100644 --- a/br/pkg/restore/prealloc_table_id/alloc_test.go +++ b/br/pkg/restore/prealloc_table_id/alloc_test.go @@ -27,6 +27,7 @@ func (t *testAllocator) AdvanceGlobalIDs(n int) (int64, error) { func TestAllocator(t *testing.T) { type Case struct { tableIDs []int64 + partitions map[int64][]int64 hasAllocatedTo int64 successfullyAllocated []int64 shouldAllocatedTo int64 @@ -57,16 +58,41 @@ func TestAllocator(t *testing.T) { successfullyAllocated: []int64{5, 6}, shouldAllocatedTo: 7, }, + { + tableIDs: []int64{1, 2, 5, 6, 7}, + hasAllocatedTo: 6, + successfullyAllocated: []int64{6, 7}, + shouldAllocatedTo: 13, + partitions: map[int64][]int64{ + 7: {8, 9, 10, 11, 12}, + }, + }, + { + tableIDs: []int64{1, 2, 5, 6, 7, 13}, + hasAllocatedTo: 9, + successfullyAllocated: []int64{13}, + shouldAllocatedTo: 14, + partitions: map[int64][]int64{ + 7: {8, 9, 10, 11, 12}, + }, + }, } run := func(t *testing.T, c Case) { tables := make([]*metautil.Table, 0, len(c.tableIDs)) for _, id := range c.tableIDs { - tables = append(tables, &metautil.Table{ + table := metautil.Table{ Info: &model.TableInfo{ - ID: id, + ID: id, + Partition: &model.PartitionInfo{}, }, - }) + } + if c.partitions != nil { + for _, part := range c.partitions[id] { + table.Info.Partition.Definitions = append(table.Info.Partition.Definitions, model.PartitionDefinition{ID: part}) + } + } + tables = append(tables, &table) } ids := prealloctableid.New(tables) @@ -74,9 +100,9 @@ func TestAllocator(t *testing.T) { require.NoError(t, ids.Alloc(&allocator)) allocated := make([]int64, 0, len(c.successfullyAllocated)) - for _, t := range c.tableIDs { - if ids.Prealloced(t) { - allocated = append(allocated, t) + for _, t := range tables { + if ids.PreallocedFor(t.Info) { + allocated = append(allocated, t.Info.ID) } } require.ElementsMatch(t, allocated, c.successfullyAllocated) diff --git a/br/pkg/stream/BUILD.bazel b/br/pkg/stream/BUILD.bazel index f75f9f37d81ea..e5fbc5c87b870 100644 --- a/br/pkg/stream/BUILD.bazel +++ b/br/pkg/stream/BUILD.bazel @@ -56,8 +56,11 @@ go_test( "//br/pkg/storage", "//br/pkg/streamhelper", "//meta", + "//parser/ast", "//parser/model", + "//parser/mysql", "//tablecodec", + "//types", "//util/codec", "//util/table-filter", "@com_github_pingcap_kvproto//pkg/brpb", diff --git a/br/pkg/stream/rewrite_meta_rawkv.go b/br/pkg/stream/rewrite_meta_rawkv.go index 7398abdbb2cb9..3c559ec124ad8 100644 --- a/br/pkg/stream/rewrite_meta_rawkv.go +++ b/br/pkg/stream/rewrite_meta_rawkv.go @@ -336,6 +336,11 @@ func (sr *SchemasReplace) rewriteTableInfo(value []byte, dbID int64) ([]byte, bo } } + // Force to disable TTL_ENABLE when restore + if newTableInfo.TTLInfo != nil { + newTableInfo.TTLInfo.Enable = false + } + if sr.AfterTableRewritten != nil { sr.AfterTableRewritten(false, newTableInfo) } diff --git a/br/pkg/stream/rewrite_meta_rawkv_test.go b/br/pkg/stream/rewrite_meta_rawkv_test.go index d2cbe24e8295d..cd3cf00d46305 100644 --- a/br/pkg/stream/rewrite_meta_rawkv_test.go +++ b/br/pkg/stream/rewrite_meta_rawkv_test.go @@ -7,7 +7,10 @@ import ( "encoding/json" "testing" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" filter "github.com/pingcap/tidb/util/table-filter" "github.com/stretchr/testify/require" ) @@ -312,6 +315,52 @@ func TestRewriteValueForExchangePartition(t *testing.T) { require.Equal(t, tableInfo.ID, pt1ID+100) } +func TestRewriteValueForTTLTable(t *testing.T) { + var ( + dbId int64 = 40 + tableID int64 = 100 + colID int64 = 1000 + colName = "t" + tableName = "t1" + tableInfo model.TableInfo + ) + + tbl := model.TableInfo{ + ID: tableID, + Name: model.NewCIStr(tableName), + Columns: []*model.ColumnInfo{ + { + ID: colID, + Name: model.NewCIStr(colName), + FieldType: *types.NewFieldType(mysql.TypeTimestamp), + }, + }, + TTLInfo: &model.TTLInfo{ + ColumnName: model.NewCIStr(colName), + IntervalExprStr: "1", + IntervalTimeUnit: int(ast.TimeUnitDay), + Enable: true, + }, + } + value, err := json.Marshal(&tbl) + require.Nil(t, err) + + sr := MockEmptySchemasReplace(nil) + newValue, needRewrite, err := sr.rewriteTableInfo(value, dbId) + require.Nil(t, err) + require.True(t, needRewrite) + + err = json.Unmarshal(newValue, &tableInfo) + require.Nil(t, err) + require.Equal(t, tableInfo.Name.String(), tableName) + require.Equal(t, tableInfo.ID, sr.DbMap[dbId].TableMap[tableID].NewTableID) + require.NotNil(t, tableInfo.TTLInfo) + require.Equal(t, colName, tableInfo.TTLInfo.ColumnName.O) + require.Equal(t, "1", tableInfo.TTLInfo.IntervalExprStr) + require.Equal(t, int(ast.TimeUnitDay), tableInfo.TTLInfo.IntervalTimeUnit) + require.False(t, tableInfo.TTLInfo.Enable) +} + // db:70->80 - // | - t0:71->81 - // | | - p0:72->82 diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index 83c22a29e61db..0a1ba11cad84e 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -494,10 +494,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf // according to https://github.com/pingcap/tidb/issues/34167. // we should get the real config from tikv to adapt the dynamic region. httpCli := httputil.NewClient(mgr.GetTLSConfig()) - mergeRegionSize, mergeRegionCount, err = mgr.GetMergeRegionSizeAndCount(ctx, httpCli) - if err != nil { - return errors.Trace(err) - } + mergeRegionSize, mergeRegionCount = mgr.GetMergeRegionSizeAndCount(ctx, httpCli) } keepaliveCfg.PermitWithoutStream = true diff --git a/br/pkg/task/restore_raw.go b/br/pkg/task/restore_raw.go index 6c15cd9989512..7b80ac18b4d87 100644 --- a/br/pkg/task/restore_raw.go +++ b/br/pkg/task/restore_raw.go @@ -80,10 +80,7 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR // according to https://github.com/pingcap/tidb/issues/34167. // we should get the real config from tikv to adapt the dynamic region. httpCli := httputil.NewClient(mgr.GetTLSConfig()) - mergeRegionSize, mergeRegionCount, err = mgr.GetMergeRegionSizeAndCount(ctx, httpCli) - if err != nil { - return errors.Trace(err) - } + mergeRegionSize, mergeRegionCount = mgr.GetMergeRegionSizeAndCount(ctx, httpCli) } keepaliveCfg := GetKeepalive(&cfg.Config) diff --git a/br/pkg/version/BUILD.bazel b/br/pkg/version/BUILD.bazel index 8171081ae5df1..7a15014e378e6 100644 --- a/br/pkg/version/BUILD.bazel +++ b/br/pkg/version/BUILD.bazel @@ -10,7 +10,6 @@ go_library( "//br/pkg/logutil", "//br/pkg/utils", "//br/pkg/version/build", - "//sessionctx/variable", "//util/engine", "@com_github_coreos_go_semver//semver", "@com_github_pingcap_errors//:errors", @@ -29,7 +28,6 @@ go_test( flaky = True, deps = [ "//br/pkg/version/build", - "//sessionctx/variable", "@com_github_coreos_go_semver//semver", "@com_github_data_dog_go_sqlmock//:go-sqlmock", "@com_github_pingcap_kvproto//pkg/metapb", diff --git a/br/pkg/version/version.go b/br/pkg/version/version.go index c49e3d1ada923..9cb974d48e13f 100644 --- a/br/pkg/version/version.go +++ b/br/pkg/version/version.go @@ -18,7 +18,6 @@ import ( "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version/build" - "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/engine" pd "github.com/tikv/pd/client" "go.uber.org/zap" @@ -166,9 +165,7 @@ func CheckVersionForDDL(s *metapb.Store, tikvVersion *semver.Version) error { // use tikvVersion instead of tidbVersion since br doesn't have mysql client to connect tidb. requireVersion := semver.New("6.2.0-alpha") if tikvVersion.Compare(*requireVersion) < 0 { - log.Info("detected the old version of tidb cluster. set enable concurrent ddl to false") - variable.EnableConcurrentDDL.Store(false) - return nil + return errors.Errorf("detected the old version of tidb cluster, require: >= 6.2.0, but got %s", tikvVersion.String()) } return nil } diff --git a/br/pkg/version/version_test.go b/br/pkg/version/version_test.go index 1fc654b3990b6..927eeee119d5b 100644 --- a/br/pkg/version/version_test.go +++ b/br/pkg/version/version_test.go @@ -13,7 +13,6 @@ import ( "github.com/coreos/go-semver/semver" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/version/build" - "github.com/pingcap/tidb/sessionctx/variable" "github.com/stretchr/testify/require" pd "github.com/tikv/pd/client" ) @@ -321,50 +320,40 @@ func TestCheckClusterVersion(t *testing.T) { mock.getAllStores = func() []*metapb.Store { return []*metapb.Store{{Version: "v6.4.0"}} } - originVal := variable.EnableConcurrentDDL.Load() err := CheckClusterVersion(context.Background(), &mock, CheckVersionForDDL) require.NoError(t, err) - require.Equal(t, originVal, variable.EnableConcurrentDDL.Load()) } { mock.getAllStores = func() []*metapb.Store { return []*metapb.Store{{Version: "v6.2.0"}} } - originVal := variable.EnableConcurrentDDL.Load() err := CheckClusterVersion(context.Background(), &mock, CheckVersionForDDL) require.NoError(t, err) - require.Equal(t, originVal, variable.EnableConcurrentDDL.Load()) } { mock.getAllStores = func() []*metapb.Store { return []*metapb.Store{{Version: "v6.2.0-alpha"}} } - originVal := variable.EnableConcurrentDDL.Load() err := CheckClusterVersion(context.Background(), &mock, CheckVersionForDDL) require.NoError(t, err) - require.Equal(t, originVal, variable.EnableConcurrentDDL.Load()) } { mock.getAllStores = func() []*metapb.Store { return []*metapb.Store{{Version: "v6.1.0"}} } - variable.EnableConcurrentDDL.Store(true) err := CheckClusterVersion(context.Background(), &mock, CheckVersionForDDL) - require.NoError(t, err) - require.False(t, variable.EnableConcurrentDDL.Load()) + require.Error(t, err) } { mock.getAllStores = func() []*metapb.Store { return []*metapb.Store{{Version: "v5.4.0"}} } - variable.EnableConcurrentDDL.Store(true) err := CheckClusterVersion(context.Background(), &mock, CheckVersionForDDL) - require.NoError(t, err) - require.False(t, variable.EnableConcurrentDDL.Load()) + require.Error(t, err) } } diff --git a/br/tests/br_ttl/run.sh b/br/tests/br_ttl/run.sh new file mode 100644 index 0000000000000..cfb1a38c8281b --- /dev/null +++ b/br/tests/br_ttl/run.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# +# Copyright 2022 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +PROGRESS_FILE="$TEST_DIR/progress_file" +BACKUPMETAV1_LOG="$TEST_DIR/backup.log" +BACKUPMETAV2_LOG="$TEST_DIR/backupv2.log" +RESTORE_LOG="$TEST_DIR/restore.log" +rm -rf $PROGRESS_FILE + +run_sql "create schema $DB;" +run_sql "create table $DB.ttl_test_tbl(id int primary key, t datetime) TTL=\`t\` + interval 1 day TTL_ENABLE='ON'" + +# backup db +echo "full backup meta v2 start..." +unset BR_LOG_TO_TERM +rm -f $BACKUPMETAV2_LOG +run_br backup full --log-file $BACKUPMETAV2_LOG -s "local://$TEST_DIR/${DB}v2" --pd $PD_ADDR --use-backupmeta-v2 + +echo "full backup meta v1 start..." +rm -f $BACKUPMETAV1_LOG +run_br backup full --log-file $BACKUPMETAV1_LOG -s "local://$TEST_DIR/$DB" --pd $PD_ADDR + +TTL_MARK='![ttl]' +CREATE_SQL_CONTAINS="/*T${TTL_MARK} TTL=\`t\` + INTERVAL 1 DAY */ /*T${TTL_MARK} TTL_ENABLE='OFF' */" + +# restore v2 +run_sql "DROP DATABASE $DB;" +echo "restore ttl table start v2..." +run_br restore db --db $DB -s "local://$TEST_DIR/${DB}v2" --pd $PD_ADDR +run_sql "show create table $DB.ttl_test_tbl;" +check_contains "$CREATE_SQL_CONTAINS" + +# restore v1 +run_sql "DROP DATABASE $DB;" +echo "restore ttl table start v1..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR +run_sql "show create table $DB.ttl_test_tbl;" +check_contains "$CREATE_SQL_CONTAINS" diff --git a/br/tests/lightning_foreign_key/config.toml b/br/tests/lightning_foreign_key/config.toml new file mode 100644 index 0000000000000..3c85a830bfa22 --- /dev/null +++ b/br/tests/lightning_foreign_key/config.toml @@ -0,0 +1,3 @@ +[tikv-importer] +# Set on-duplicate=error to force using insert statement to write data. +on-duplicate = "error" diff --git a/br/tests/lightning_foreign_key/data/fk.child-schema.sql b/br/tests/lightning_foreign_key/data/fk.child-schema.sql new file mode 100644 index 0000000000000..18c361bf4c2e0 --- /dev/null +++ b/br/tests/lightning_foreign_key/data/fk.child-schema.sql @@ -0,0 +1 @@ +create table child (id int key, pid int, constraint fk_1 foreign key (pid) references parent(id)); diff --git a/br/tests/lightning_foreign_key/data/fk.child.sql b/br/tests/lightning_foreign_key/data/fk.child.sql new file mode 100644 index 0000000000000..12e531eb96a34 --- /dev/null +++ b/br/tests/lightning_foreign_key/data/fk.child.sql @@ -0,0 +1 @@ +insert into child values (1,1),(2,2),(3,3),(4,4); diff --git a/br/tests/lightning_foreign_key/data/fk.parent-schema.sql b/br/tests/lightning_foreign_key/data/fk.parent-schema.sql new file mode 100644 index 0000000000000..8ae8af2de6a2e --- /dev/null +++ b/br/tests/lightning_foreign_key/data/fk.parent-schema.sql @@ -0,0 +1 @@ +create table parent(id int key, a int); diff --git a/br/tests/lightning_foreign_key/data/fk.parent.sql b/br/tests/lightning_foreign_key/data/fk.parent.sql new file mode 100644 index 0000000000000..7a31a9f18db0f --- /dev/null +++ b/br/tests/lightning_foreign_key/data/fk.parent.sql @@ -0,0 +1 @@ +insert into parent values (1,1),(2,2),(3,3),(4,4); diff --git a/br/tests/lightning_foreign_key/data/fk.t-schema.sql b/br/tests/lightning_foreign_key/data/fk.t-schema.sql new file mode 100644 index 0000000000000..98f00b9cadca8 --- /dev/null +++ b/br/tests/lightning_foreign_key/data/fk.t-schema.sql @@ -0,0 +1,8 @@ +CREATE TABLE `t` +( + `a` bigint(20) NOT NULL, + `b` bigint(20) DEFAULT NULL, + PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */, + KEY `fk_1` (`b`), + CONSTRAINT `fk_1` FOREIGN KEY (`b`) REFERENCES `test`.`t2` (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; diff --git a/br/tests/lightning_foreign_key/data/fk.t.csv b/br/tests/lightning_foreign_key/data/fk.t.csv new file mode 100644 index 0000000000000..cd0368580a4c8 --- /dev/null +++ b/br/tests/lightning_foreign_key/data/fk.t.csv @@ -0,0 +1,6 @@ +a,b +1,1 +2,2 +3,3 +4,4 +5,5 diff --git a/br/tests/lightning_foreign_key/run.sh b/br/tests/lightning_foreign_key/run.sh new file mode 100755 index 0000000000000..1c045b61f43be --- /dev/null +++ b/br/tests/lightning_foreign_key/run.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright 2022 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +run_sql 'DROP DATABASE IF EXISTS fk;' +run_sql 'CREATE DATABASE IF NOT EXISTS fk;' +# Create existing tables that import data will reference. +run_sql 'CREATE TABLE fk.t2 (a BIGINT PRIMARY KEY);' + +for BACKEND in tidb local; do + run_sql 'DROP TABLE IF EXISTS fk.t, fk.parent, fk.child;' + + run_lightning --backend $BACKEND + run_sql 'SELECT GROUP_CONCAT(a) FROM fk.t ORDER BY a;' + check_contains '1,2,3,4,5' + + run_sql 'SELECT count(1), sum(a) FROM fk.parent;' + check_contains 'count(1): 4' + check_contains 'sum(a): 10' + + run_sql 'SELECT count(1), sum(pid) FROM fk.child;' + check_contains 'count(1): 4' + check_contains 'sum(pid): 10' +done diff --git a/br/tests/lightning_ttl/config.toml b/br/tests/lightning_ttl/config.toml new file mode 100644 index 0000000000000..d2152b47c922a --- /dev/null +++ b/br/tests/lightning_ttl/config.toml @@ -0,0 +1,2 @@ +[tikv-importer] +backend = 'local' diff --git a/br/tests/lightning_ttl/data/ttldb-schema-create.sql b/br/tests/lightning_ttl/data/ttldb-schema-create.sql new file mode 100644 index 0000000000000..46609f11e6635 --- /dev/null +++ b/br/tests/lightning_ttl/data/ttldb-schema-create.sql @@ -0,0 +1 @@ +CREATE DATABASE `ttldb`; diff --git a/br/tests/lightning_ttl/data/ttldb.t1-schema.sql b/br/tests/lightning_ttl/data/ttldb.t1-schema.sql new file mode 100644 index 0000000000000..7531d7f18ae01 --- /dev/null +++ b/br/tests/lightning_ttl/data/ttldb.t1-schema.sql @@ -0,0 +1,4 @@ +CREATE TABLE `t1` ( + `id` int(11) PRIMARY KEY, + `t` datetime +) TTL = `t` + INTERVAL 1 DAY TTL_ENABLE = 'ON'; diff --git a/br/tests/lightning_ttl/run.sh b/br/tests/lightning_ttl/run.sh new file mode 100644 index 0000000000000..4a1d9ffc04d57 --- /dev/null +++ b/br/tests/lightning_ttl/run.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# +# Copyright 2022 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +run_sql 'drop database if exists ttldb;' +run_lightning + +TTL_MARK='![ttl]' +CREATE_SQL_CONTAINS="/*T${TTL_MARK} TTL=\`t\` + INTERVAL 1 DAY */ /*T${TTL_MARK} TTL_ENABLE='OFF' */" + +run_sql 'show create table ttldb.t1' +check_contains "$CREATE_SQL_CONTAINS" diff --git a/cmd/explaintest/r/explain_complex.result b/cmd/explaintest/r/explain_complex.result index 8d8b47237b453..d8e1f186a4028 100644 --- a/cmd/explaintest/r/explain_complex.result +++ b/cmd/explaintest/r/explain_complex.result @@ -243,6 +243,7 @@ created_on datetime DEFAULT NULL, updated_on datetime DEFAULT NULL, UNIQUE KEY org_employee_position_pk (hotel_id,user_id,position_id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +set tidb_cost_model_version=2; explain format = 'brief' SELECT d.id, d.ctx, d.name, d.left_value, d.right_value, d.depth, d.leader_id, d.status, d.created_on, d.updated_on FROM org_department AS d LEFT JOIN org_position AS p ON p.department_id = d.id AND p.status = 1000 LEFT JOIN org_employee_position AS ep ON ep.position_id = p.id AND ep.status = 1000 WHERE (d.ctx = 1 AND (ep.user_id = 62 OR d.id = 20 OR d.id = 20) AND d.status = 1000) GROUP BY d.id ORDER BY d.left_value; id estRows task access object operator info Sort 1.00 root test.org_department.left_value @@ -262,6 +263,7 @@ Sort 1.00 root test.org_department.left_value └─TableReader(Probe) 9.99 root data:Selection └─Selection 9.99 cop[tikv] eq(test.org_employee_position.status, 1000), not(isnull(test.org_employee_position.position_id)) └─TableFullScan 10000.00 cop[tikv] table:ep keep order:false, stats:pseudo +set tidb_cost_model_version=1; create table test.Tab_A (id int primary key,bid int,cid int,name varchar(20),type varchar(20),num int,amt decimal(11,2)); create table test.Tab_B (id int primary key,name varchar(20)); create table test.Tab_C (id int primary key,name varchar(20),amt decimal(11,2)); diff --git a/cmd/explaintest/t/explain_complex.test b/cmd/explaintest/t/explain_complex.test index d9d1b9285f727..187ec571857d8 100644 --- a/cmd/explaintest/t/explain_complex.test +++ b/cmd/explaintest/t/explain_complex.test @@ -174,7 +174,9 @@ CREATE TABLE org_position ( UNIQUE KEY org_employee_position_pk (hotel_id,user_id,position_id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +set tidb_cost_model_version=2; explain format = 'brief' SELECT d.id, d.ctx, d.name, d.left_value, d.right_value, d.depth, d.leader_id, d.status, d.created_on, d.updated_on FROM org_department AS d LEFT JOIN org_position AS p ON p.department_id = d.id AND p.status = 1000 LEFT JOIN org_employee_position AS ep ON ep.position_id = p.id AND ep.status = 1000 WHERE (d.ctx = 1 AND (ep.user_id = 62 OR d.id = 20 OR d.id = 20) AND d.status = 1000) GROUP BY d.id ORDER BY d.left_value; +set tidb_cost_model_version=1; create table test.Tab_A (id int primary key,bid int,cid int,name varchar(20),type varchar(20),num int,amt decimal(11,2)); create table test.Tab_B (id int primary key,name varchar(20)); diff --git a/config/config.go b/config/config.go index 2352a17acf0d1..bc25b8c9b9ec3 100644 --- a/config/config.go +++ b/config/config.go @@ -261,6 +261,9 @@ type Config struct { // EnableGlobalKill indicates whether to enable global kill. TrxSummary TrxSummary `toml:"transaction-summary" json:"transaction-summary"` EnableGlobalKill bool `toml:"enable-global-kill" json:"enable-global-kill"` + // InitializeSQLFile is a file that will be executed after first bootstrap only. + // It can be used to set GLOBAL system variable values + InitializeSQLFile string `toml:"initialize-sql-file" json:"initialize-sql-file"` // The following items are deprecated. We need to keep them here temporarily // to support the upgrade process. They can be removed in future. diff --git a/ddl/backfilling.go b/ddl/backfilling.go index 203c6744c4619..946c20c55bdf6 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -75,6 +75,8 @@ func (bT backfillerType) String() string { return "clean up index" case typeAddIndexMergeTmpWorker: return "merge temporary index" + case typeReorgPartitionWorker: + return "reorganize partition" default: return "unknown" } @@ -127,6 +129,7 @@ func GetLeaseGoTime(currTime time.Time, lease time.Duration) types.Time { // 1: add-index // 2: modify-column-type // 3: clean-up global index +// 4: reorganize partition // // They all have a write reorganization state to back fill data into the rows existed. // Backfilling is time consuming, to accelerate this process, TiDB has built some sub diff --git a/ddl/cluster.go b/ddl/cluster.go index fbcfa9cd8a49f..227963b3951d5 100644 --- a/ddl/cluster.go +++ b/ddl/cluster.go @@ -19,6 +19,7 @@ import ( "context" "encoding/hex" "fmt" + "math" "strings" "time" @@ -147,6 +148,17 @@ func getTiDBSuperReadOnly(sess sessionctx.Context) (string, error) { return val, nil } +func isFlashbackSupportedDDLAction(action model.ActionType) bool { + switch action { + case model.ActionSetTiFlashReplica, model.ActionUpdateTiFlashReplicaStatus, model.ActionAlterPlacementPolicy, + model.ActionAlterTablePlacement, model.ActionAlterTablePartitionPlacement, model.ActionCreatePlacementPolicy, + model.ActionDropPlacementPolicy, model.ActionModifySchemaDefaultPlacement: + return false + default: + return true + } +} + func checkAndSetFlashbackClusterInfo(sess sessionctx.Context, d *ddlCtx, t *meta.Meta, job *model.Job, flashbackTS uint64) (err error) { if err = ValidateFlashbackTS(d.ctx, sess, flashbackTS); err != nil { return err @@ -170,19 +182,47 @@ func checkAndSetFlashbackClusterInfo(sess sessionctx.Context, d *ddlCtx, t *meta return errors.Trace(err) } - flashbackSchemaVersion, err := meta.NewSnapshotMeta(d.store.GetSnapshot(kv.NewVersion(flashbackTS))).GetSchemaVersion() + flashbackSnapshotMeta := meta.NewSnapshotMeta(d.store.GetSnapshot(kv.NewVersion(flashbackTS))) + flashbackSchemaVersion, err := flashbackSnapshotMeta.GetSchemaVersion() if err != nil { return errors.Trace(err) } - // If flashbackSchemaVersion not same as nowSchemaVersion, we've done ddl during [flashbackTs, now). + flashbackTSString := oracle.GetTimeFromTS(flashbackTS).String() + + // Check if there is an upgrade during [flashbackTS, now) + sql := fmt.Sprintf("select VARIABLE_VALUE from mysql.tidb as of timestamp '%s' where VARIABLE_NAME='tidb_server_version'", flashbackTSString) + rows, err := newSession(sess).execute(d.ctx, sql, "check_tidb_server_version") + if err != nil || len(rows) == 0 { + return errors.Errorf("Get history `tidb_server_version` failed, can't do flashback") + } + sql = fmt.Sprintf("select 1 from mysql.tidb where VARIABLE_NAME='tidb_server_version' and VARIABLE_VALUE=%s", rows[0].GetString(0)) + rows, err = newSession(sess).execute(d.ctx, sql, "check_tidb_server_version") + if err != nil { + return errors.Trace(err) + } + if len(rows) == 0 { + return errors.Errorf("Detected TiDB upgrade during [%s, now), can't do flashback", flashbackTSString) + } + + // Check is there a DDL task at flashbackTS. + sql = fmt.Sprintf("select count(*) from mysql.%s as of timestamp '%s'", JobTable, flashbackTSString) + rows, err = newSession(sess).execute(d.ctx, sql, "check_history_job") + if err != nil || len(rows) == 0 { + return errors.Errorf("Get history ddl jobs failed, can't do flashback") + } + if rows[0].GetInt64(0) != 0 { + return errors.Errorf("Detected another DDL job at %s, can't do flashback", flashbackTSString) + } + + // If flashbackSchemaVersion not same as nowSchemaVersion, we should check all schema diffs during [flashbackTs, now). for i := flashbackSchemaVersion + 1; i <= nowSchemaVersion; i++ { diff, err := t.GetSchemaDiff(i) if err != nil { return errors.Trace(err) } - if diff != nil && diff.Type != model.ActionFlashbackCluster { - return errors.Errorf("Detected schema change due to another DDL job during [%s, now), can't do flashback", oracle.GetTimeFromTS(flashbackTS)) + if diff != nil && !isFlashbackSupportedDDLAction(diff.Type) { + return errors.Errorf("Detected unsupported DDL job type(%s) during [%s, now), can't do flashback", diff.Type.String(), flashbackTSString) } } @@ -211,7 +251,7 @@ type flashbackID struct { func addToSlice(schema string, tableName string, tableID int64, flashbackIDs []flashbackID) []flashbackID { var excluded bool - if filter.IsSystemSchema(schema) && !strings.HasPrefix(tableName, "stats_") { + if filter.IsSystemSchema(schema) && !strings.HasPrefix(tableName, "stats_") && tableName != "gc_delete_range" { excluded = true } flashbackIDs = append(flashbackIDs, flashbackID{ @@ -270,6 +310,14 @@ func GetFlashbackKeyRanges(sess sessionctx.Context) ([]kv.KeyRange, error) { }) } + // The meta data key ranges. + metaStartKey := tablecodec.EncodeMetaKey(meta.DBkey(0), meta.TableKey(0)) + metaEndKey := tablecodec.EncodeMetaKey(meta.DBkey(math.MaxInt64), meta.TableKey(math.MaxInt64)) + keyRanges = append(keyRanges, kv.KeyRange{ + StartKey: metaStartKey, + EndKey: metaEndKey, + }) + return keyRanges, nil } @@ -633,7 +681,7 @@ func (w *worker) onFlashbackCluster(d *ddlCtx, t *meta.Meta, job *model.Job) (ve asyncNotifyEvent(d, &util.Event{Tp: model.ActionFlashbackCluster}) job.State = model.JobStateDone job.SchemaState = model.StatePublic - return ver, nil + return updateSchemaVersion(d, t, job) } return ver, nil } diff --git a/ddl/cluster_test.go b/ddl/cluster_test.go index 4c1ec291f87f2..12c77c42edafe 100644 --- a/ddl/cluster_test.go +++ b/ddl/cluster_test.go @@ -42,15 +42,16 @@ func TestGetFlashbackKeyRanges(t *testing.T) { kvRanges, err := ddl.GetFlashbackKeyRanges(se) require.NoError(t, err) - // The results are 6 key ranges - // 0: (stats_meta,stats_histograms,stats_buckets) + // The results are 8 key ranges + // 0: (stats_meta,stats_histograms,stats_buckets, gc_delete_range) // 1: (stats_feedback) // 2: (stats_top_n) // 3: (stats_extended) // 4: (stats_fm_sketch) // 5: (stats_history, stats_meta_history) // 6: (stats_table_locked) - require.Len(t, kvRanges, 7) + // 7: meta Ranges + require.Len(t, kvRanges, 8) tk.MustExec("use test") tk.MustExec("CREATE TABLE employees (" + @@ -64,7 +65,7 @@ func TestGetFlashbackKeyRanges(t *testing.T) { ");") tk.MustExec("truncate table mysql.analyze_jobs") - // truncate all `stats_` tables, make table ID consecutive. + // truncate all `stats_` and `gc_delete_range` tables, make table ID consecutive. tk.MustExec("truncate table mysql.stats_meta") tk.MustExec("truncate table mysql.stats_histograms") tk.MustExec("truncate table mysql.stats_buckets") @@ -75,14 +76,15 @@ func TestGetFlashbackKeyRanges(t *testing.T) { tk.MustExec("truncate table mysql.stats_history") tk.MustExec("truncate table mysql.stats_meta_history") tk.MustExec("truncate table mysql.stats_table_locked") + tk.MustExec("truncate table mysql.gc_delete_range") kvRanges, err = ddl.GetFlashbackKeyRanges(se) require.NoError(t, err) - require.Len(t, kvRanges, 2) + require.Len(t, kvRanges, 3) tk.MustExec("truncate table test.employees") kvRanges, err = ddl.GetFlashbackKeyRanges(se) require.NoError(t, err) - require.Len(t, kvRanges, 1) + require.Len(t, kvRanges, 2) } func TestFlashbackCloseAndResetPDSchedule(t *testing.T) { diff --git a/ddl/column.go b/ddl/column.go index d229c901e0969..4697a47f74f3d 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -806,7 +806,7 @@ func doReorgWorkForModifyColumnMultiSchema(w *worker, d *ddlCtx, t *meta.Meta, j func doReorgWorkForModifyColumn(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table, oldCol, changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) (done bool, ver int64, err error) { job.ReorgMeta.ReorgTp = model.ReorgTypeTxn - rh := newReorgHandler(t, w.sess, w.concurrentDDL) + rh := newReorgHandler(t, w.sess) dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { return false, ver, errors.Trace(err) @@ -1684,6 +1684,15 @@ func updateColumnDefaultValue(d *ddlCtx, t *meta.Meta, job *model.Job, newCol *m job.State = model.JobStateCancelled return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(newCol.Name, tblInfo.Name) } + + if hasDefaultValue, _, err := checkColumnDefaultValue(newContext(d.store), table.ToColumn(oldCol.Clone()), newCol.DefaultValue); err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } else if !hasDefaultValue { + job.State = model.JobStateCancelled + return ver, dbterror.ErrInvalidDefaultValue.GenWithStackByArgs(newCol.Name) + } + // The newCol's offset may be the value of the old schema version, so we can't use newCol directly. oldCol.DefaultValue = newCol.DefaultValue oldCol.DefaultValueBit = newCol.DefaultValueBit diff --git a/ddl/column_change_test.go b/ddl/column_change_test.go index 4528564d2f231..be393dd488668 100644 --- a/ddl/column_change_test.go +++ b/ddl/column_change_test.go @@ -437,3 +437,36 @@ func testNewContext(store kv.Storage) sessionctx.Context { ctx.Store = store return ctx } + +func TestIssue40150(t *testing.T) { + store, _ := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec("CREATE TABLE t40150 (a int) PARTITION BY HASH (a) PARTITIONS 2") + tk.MustContainErrMsg(`alter table t40150 rename column a to c`, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") +} + +func TestIssue40135(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk1 := testkit.NewTestKit(t, store) + tk1.MustExec("use test") + + tk.MustExec("CREATE TABLE t40135 ( a tinyint DEFAULT NULL, b varchar(32) DEFAULT 'md') PARTITION BY HASH (a) PARTITIONS 2") + one := true + hook := &ddl.TestDDLCallback{Do: dom} + var checkErr error + hook.OnJobRunBeforeExported = func(job *model.Job) { + if one { + one = false + _, checkErr = tk1.Exec("alter table t40135 change column a aNew SMALLINT NULL DEFAULT '-14996'") + } + } + dom.DDL().SetHook(hook) + tk.MustExec("alter table t40135 modify column a MEDIUMINT NULL DEFAULT '6243108' FIRST") + + require.ErrorContains(t, checkErr, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") +} diff --git a/ddl/column_modify_test.go b/ddl/column_modify_test.go index a75026e744796..669d130e487a2 100644 --- a/ddl/column_modify_test.go +++ b/ddl/column_modify_test.go @@ -289,8 +289,7 @@ func TestDropColumn(t *testing.T) { tk.MustExec("drop table if exists t1") tk.MustExec("create table t1 (a int,b int) partition by hash(a) partitions 4;") err := tk.ExecToErr("alter table t1 drop column a") - // TODO: refine the error message to compatible with MySQL - require.EqualError(t, err, "[planner:1054]Unknown column 'a' in 'expression'") + require.EqualError(t, err, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") } func TestChangeColumn(t *testing.T) { diff --git a/ddl/concurrentddltest/BUILD.bazel b/ddl/concurrentddltest/BUILD.bazel deleted file mode 100644 index 82e2adf1fe9c2..0000000000000 --- a/ddl/concurrentddltest/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_test") - -go_test( - name = "concurrentddltest_test", - timeout = "moderate", - srcs = [ - "main_test.go", - "switch_test.go", - ], - flaky = True, - race = "on", - shard_count = 2, - deps = [ - "//config", - "//ddl", - "//kv", - "//meta", - "//sessionctx/variable", - "//testkit", - "//testkit/testsetup", - "//util", - "@com_github_stretchr_testify//require", - "@org_uber_go_atomic//:atomic", - "@org_uber_go_goleak//:goleak", - ], -) diff --git a/ddl/concurrentddltest/switch_test.go b/ddl/concurrentddltest/switch_test.go deleted file mode 100644 index 6cd26811008e6..0000000000000 --- a/ddl/concurrentddltest/switch_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrentddltest - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/util" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -func TestConcurrentDDLSwitch(t *testing.T) { - store := testkit.CreateMockStore(t) - - type table struct { - columnIdx int - indexIdx int - } - - var tables []*table - tblCount := 20 - for i := 0; i < tblCount; i++ { - tables = append(tables, &table{1, 0}) - } - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set global tidb_enable_metadata_lock=0") - tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt=1") - tk.MustExec("set @@global.tidb_ddl_reorg_batch_size=32") - - for i := range tables { - tk.MustExec(fmt.Sprintf("create table t%d (col0 int)", i)) - for j := 0; j < 1000; j++ { - tk.MustExec(fmt.Sprintf("insert into t%d values (%d)", i, j)) - } - } - - ddls := make([]string, 0, tblCount) - ddlCount := 100 - for i := 0; i < ddlCount; i++ { - tblIdx := rand.Intn(tblCount) - if rand.Intn(2) == 0 { - ddls = append(ddls, fmt.Sprintf("alter table t%d add index idx%d (col0)", tblIdx, tables[tblIdx].indexIdx)) - tables[tblIdx].indexIdx++ - } else { - ddls = append(ddls, fmt.Sprintf("alter table t%d add column col%d int", tblIdx, tables[tblIdx].columnIdx)) - tables[tblIdx].columnIdx++ - } - } - - c := atomic.NewInt32(0) - ch := make(chan struct{}) - go func() { - var wg util.WaitGroupWrapper - for i := range ddls { - wg.Add(1) - go func(idx int) { - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(ddls[idx]) - c.Add(1) - wg.Done() - }(i) - } - wg.Wait() - ch <- struct{}{} - }() - - // sleep 2s to make sure the ddl jobs is into table. - time.Sleep(2 * time.Second) - ticker := time.NewTicker(time.Second) - count := 0 - done := false - for !done { - select { - case <-ch: - done = true - case <-ticker.C: - var b bool - var err error - err = kv.RunInNewTxn(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), store, false, func(ctx context.Context, txn kv.Transaction) error { - b, err = meta.NewMeta(txn).IsConcurrentDDL() - return err - }) - require.NoError(t, err) - rs, err := testkit.NewTestKit(t, store).Exec(fmt.Sprintf("set @@global.tidb_enable_concurrent_ddl=%t", !b)) - if rs != nil { - require.NoError(t, rs.Close()) - } - if err == nil { - count++ - if b { - tk := testkit.NewTestKit(t, store) - tk.Session().GetSessionVars().MemQuotaQuery = -1 - tk.MustQuery("select count(*) from mysql.tidb_ddl_job").Check(testkit.Rows("0")) - tk.MustQuery("select count(*) from mysql.tidb_ddl_reorg").Check(testkit.Rows("0")) - } - } - } - } - - require.Equal(t, int32(ddlCount), c.Load()) - require.Greater(t, count, 0) - - tk = testkit.NewTestKit(t, store) - tk.Session().GetSessionVars().MemQuotaQuery = -1 - tk.MustExec("use test") - for i, tbl := range tables { - tk.MustQuery(fmt.Sprintf("select count(*) from information_schema.columns where TABLE_SCHEMA = 'test' and TABLE_NAME = 't%d'", i)).Check(testkit.Rows(fmt.Sprintf("%d", tbl.columnIdx))) - tk.MustExec(fmt.Sprintf("admin check table t%d", i)) - for j := 0; j < tbl.indexIdx; j++ { - tk.MustExec(fmt.Sprintf("admin check index t%d idx%d", i, j)) - } - } -} - -func TestConcurrentDDLSwitchWithMDL(t *testing.T) { - if !variable.EnableConcurrentDDL.Load() { - t.Skip("skip test if concurrent DDL is disabled") - } - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustGetErrMsg("set global tidb_enable_concurrent_ddl=off", "can not disable concurrent ddl when metadata lock is enabled") - tk.MustExec("set global tidb_enable_metadata_lock=0") - tk.MustExec("set global tidb_enable_concurrent_ddl=off") - tk.MustExec("create table test.t(a int)") -} diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index 8b2ea57a4ccdb..f944f865eadc9 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -2246,14 +2246,6 @@ func TestExchangePartitionTableCompatiable(t *testing.T) { "alter table pt8 exchange partition p0 with table nt8;", dbterror.ErrTablesDifferentMetadata, }, - { - // foreign key test - // Partition table doesn't support to add foreign keys in mysql - "create table pt9 (id int not null primary key auto_increment,t_id int not null) partition by hash(id) partitions 1;", - "create table nt9 (id int not null primary key auto_increment, t_id int not null,foreign key fk_id (t_id) references pt5(id));", - "alter table pt9 exchange partition p0 with table nt9;", - dbterror.ErrPartitionExchangeForeignKey, - }, { // Generated column (virtual) "create table pt10 (id int not null, lname varchar(30), fname varchar(100) generated always as (concat(lname,' ')) virtual) partition by hash(id) partitions 1;", @@ -4540,19 +4532,51 @@ func TestAlterModifyColumnOnPartitionedTableRename(t *testing.T) { tk.MustExec("create database " + schemaName) tk.MustExec("use " + schemaName) tk.MustExec(`create table t (a int, b char) partition by range (a) (partition p0 values less than (10))`) - tk.MustContainErrMsg(`alter table t change a c int`, "[planner:1054]Unknown column 'a' in 'expression'") + tk.MustContainErrMsg(`alter table t change a c int`, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") tk.MustExec(`drop table t`) tk.MustExec(`create table t (a char, b char) partition by range columns (a) (partition p0 values less than ('z'))`) - tk.MustContainErrMsg(`alter table t change a c char`, "[ddl:8200]New column does not match partition definitions: [ddl:1567]partition column name cannot be found") + tk.MustContainErrMsg(`alter table t change a c char`, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") tk.MustExec(`drop table t`) tk.MustExec(`create table t (a int, b char) partition by list (a) (partition p0 values in (10))`) - tk.MustContainErrMsg(`alter table t change a c int`, "[planner:1054]Unknown column 'a' in 'expression'") + tk.MustContainErrMsg(`alter table t change a c int`, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") tk.MustExec(`drop table t`) tk.MustExec(`create table t (a char, b char) partition by list columns (a) (partition p0 values in ('z'))`) - tk.MustContainErrMsg(`alter table t change a c char`, "[ddl:8200]New column does not match partition definitions: [ddl:1567]partition column name cannot be found") + tk.MustContainErrMsg(`alter table t change a c char`, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") tk.MustExec(`drop table t`) tk.MustExec(`create table t (a int, b char) partition by hash (a) partitions 3`) - tk.MustContainErrMsg(`alter table t change a c int`, "[planner:1054]Unknown column 'a' in 'expression'") + tk.MustContainErrMsg(`alter table t change a c int`, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") +} + +func TestDropPartitionKeyColumn(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("create database DropPartitionKeyColumn") + defer tk.MustExec("drop database DropPartitionKeyColumn") + tk.MustExec("use DropPartitionKeyColumn") + + tk.MustExec("create table t1 (a tinyint, b char) partition by range (a) ( partition p0 values less than (10) )") + err := tk.ExecToErr("alter table t1 drop column a") + require.Error(t, err) + require.Equal(t, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed", err.Error()) + tk.MustExec("alter table t1 drop column b") + + tk.MustExec("create table t2 (a tinyint, b char) partition by range (a-1) ( partition p0 values less than (10) )") + err = tk.ExecToErr("alter table t2 drop column a") + require.Error(t, err) + require.Equal(t, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed", err.Error()) + tk.MustExec("alter table t2 drop column b") + + tk.MustExec("create table t3 (a tinyint, b char) partition by hash(a) partitions 4;") + err = tk.ExecToErr("alter table t3 drop column a") + require.Error(t, err) + require.Equal(t, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed", err.Error()) + tk.MustExec("alter table t3 drop column b") + + tk.MustExec("create table t4 (a char, b char) partition by list columns (a) ( partition p0 values in ('0'), partition p1 values in ('a'), partition p2 values in ('b'));") + err = tk.ExecToErr("alter table t4 drop column a") + require.Error(t, err) + require.Equal(t, "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed", err.Error()) + tk.MustExec("alter table t4 drop column b") } func TestReorgPartitionConcurrent(t *testing.T) { diff --git a/ddl/db_table_test.go b/ddl/db_table_test.go index f4e5290317d56..cbb3211b54985 100644 --- a/ddl/db_table_test.go +++ b/ddl/db_table_test.go @@ -871,8 +871,7 @@ func TestDDLWithInvalidTableInfo(t *testing.T) { tk.MustExec("create table t (a bigint, b int, c int generated always as (b+1)) partition by hash(a) partitions 4;") // Test drop partition column. - // TODO: refine the error message to compatible with MySQL - tk.MustGetErrMsg("alter table t drop column a;", "[planner:1054]Unknown column 'a' in 'expression'") + tk.MustGetErrMsg("alter table t drop column a;", "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed") // Test modify column with invalid expression. tk.MustGetErrMsg("alter table t modify column c int GENERATED ALWAYS AS ((case when (a = 0) then 0when (a > 0) then (b / a) end));", "[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 97 near \"then (b / a) end));\" ") // Test add column with invalid expression. diff --git a/ddl/db_test.go b/ddl/db_test.go index 88b3bf5b2b0df..2746154bd609b 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -20,6 +20,7 @@ import ( "math" "strconv" "strings" + "sync" "testing" "time" @@ -1789,3 +1790,37 @@ func TestHashPartitionAddColumn(t *testing.T) { dom.DDL().SetHook(hook) tk.MustExec("alter table t add column c int") } + +func TestSetInvalidDefaultValueAfterModifyColumn(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t(a int, b int)") + + var wg sync.WaitGroup + var checkErr error + one := false + hook := &ddl.TestDDLCallback{Do: dom} + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.SchemaState != model.StateDeleteOnly { + return + } + if !one { + one = true + } else { + return + } + wg.Add(1) + go func() { + tk2 := testkit.NewTestKit(t, store) + tk2.MustExec("use test") + _, checkErr = tk2.Exec("alter table t alter column a set default 1") + wg.Done() + }() + } + dom.DDL().SetHook(hook) + tk.MustExec("alter table t modify column a text(100)") + wg.Wait() + require.EqualError(t, checkErr, "[ddl:1101]BLOB/TEXT/JSON column 'a' can't have a default value") +} diff --git a/ddl/ddl.go b/ddl/ddl.go index 8c4d5235ea7ad..4cbdcfde9eeef 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -252,10 +252,6 @@ type DDL interface { GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.InfoSchema // DoDDLJob does the DDL job, it's exported for test. DoDDLJob(ctx sessionctx.Context, job *model.Job) error - // MoveJobFromQueue2Table move existing DDLs from queue to table. - MoveJobFromQueue2Table(bool) error - // MoveJobFromTable2Queue move existing DDLs from table to queue. - MoveJobFromTable2Queue() error } type limitJobTask struct { @@ -270,7 +266,6 @@ type ddl struct { limitJobCh chan *limitJobTask *ddlCtx - workers map[workerType]*worker sessPool *sessionPool delRangeMgr delRangeManager enableTiFlashPoll *atomicutil.Bool @@ -439,9 +434,9 @@ func (dc *ddlCtx) setDDLSourceForDiagnosis(job *model.Job) { ctx, exists := dc.jobCtx.jobCtxMap[job.ID] if !exists { ctx = NewJobContext() - ctx.setDDLLabelForDiagnosis(job) dc.jobCtx.jobCtxMap[job.ID] = ctx } + ctx.setDDLLabelForDiagnosis(job) } func (dc *ddlCtx) getResourceGroupTaggerForTopSQL(job *model.Job) tikvrpc.ResourceGroupTagger { @@ -624,7 +619,6 @@ func newDDL(ctx context.Context, options ...Option) *ddl { // Register functions for enable/disable ddl when changing system variable `tidb_enable_ddl`. variable.EnableDDL = d.EnableDDL variable.DisableDDL = d.DisableDDL - variable.SwitchConcurrentDDL = d.SwitchConcurrentDDL variable.SwitchMDL = d.SwitchMDL return d @@ -656,7 +650,7 @@ func (d *ddl) newDeleteRangeManager(mock bool) delRangeManager { func (d *ddl) prepareWorkers4ConcurrencyDDL() { workerFactory := func(tp workerType) func() (pools.Resource, error) { return func() (pools.Resource, error) { - wk := newWorker(d.ctx, tp, d.sessPool, d.delRangeMgr, d.ddlCtx, true) + wk := newWorker(d.ctx, tp, d.sessPool, d.delRangeMgr, d.ddlCtx) sessForJob, err := d.sessPool.get() if err != nil { return nil, err @@ -679,23 +673,6 @@ func (d *ddl) prepareWorkers4ConcurrencyDDL() { d.wg.Run(d.startDispatchLoop) } -func (d *ddl) prepareWorkers4legacyDDL() { - d.workers = make(map[workerType]*worker, 2) - d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx, false) - d.workers[addIdxWorker] = newWorker(d.ctx, addIdxWorker, d.sessPool, d.delRangeMgr, d.ddlCtx, false) - for _, worker := range d.workers { - worker.wg.Add(1) - w := worker - go w.start(d.ddlCtx) - - metrics.DDLCounter.WithLabelValues(fmt.Sprintf("%s_%s", metrics.CreateDDL, worker.String())).Inc() - - // When the start function is called, we will send a fake job to let worker - // checks owner firstly and try to find whether a job exists and run. - asyncNotify(worker.ddlJobCh) - } -} - // Start implements DDL.Start interface. func (d *ddl) Start(ctxPool *pools.ResourcePool) error { logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) @@ -713,7 +690,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) d.prepareWorkers4ConcurrencyDDL() - d.prepareWorkers4legacyDDL() if config.TableLockEnabled() { d.wg.Add(1) @@ -799,9 +775,6 @@ func (d *ddl) close() { d.generalDDLWorkerPool.close() } - for _, worker := range d.workers { - worker.Close() - } // d.delRangeMgr using sessions from d.sessPool. // Put it before d.sessPool.close to reduce the time spent by d.sessPool.close. if d.delRangeMgr != nil { @@ -921,24 +894,10 @@ func (d *ddl) asyncNotifyWorker(job *model.Job) { if !config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { return } - if variable.EnableConcurrentDDL.Load() { - if d.isOwner() { - asyncNotify(d.ddlJobCh) - } else { - d.asyncNotifyByEtcd(addingDDLJobConcurrent, job) - } + if d.isOwner() { + asyncNotify(d.ddlJobCh) } else { - var worker *worker - if job.MayNeedReorg() { - worker = d.workers[addIdxWorker] - } else { - worker = d.workers[generalWorker] - } - if d.ownerManager.IsOwner() { - asyncNotify(worker.ddlJobCh) - } else { - d.asyncNotifyByEtcd(worker.addingDDLJobKey, job) - } + d.asyncNotifyByEtcd(addingDDLJobConcurrent, job) } } @@ -1055,7 +1014,7 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { continue } sessVars.StmtCtx.DDLJobID = 0 // Avoid repeat. - errs, err := CancelJobs(se, d.store, []int64{jobID}) + errs, err := CancelJobs(se, []int64{jobID}) d.sessPool.put(se) if len(errs) > 0 { logutil.BgLogger().Warn("error canceling DDL job", zap.Error(errs[0])) @@ -1182,55 +1141,12 @@ func (d *ddl) startCleanDeadTableLock() { } } -// SwitchConcurrentDDL changes the DDL to concurrent DDL if toConcurrentDDL is true, otherwise, queue based DDL. -func (d *ddl) SwitchConcurrentDDL(toConcurrentDDL bool) error { - if !d.isOwner() { - return kv.RunInNewTxn(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), d.store, true, func(ctx context.Context, txn kv.Transaction) error { - isConcurrentDDL, err := meta.NewMeta(txn).IsConcurrentDDL() - if err != nil { - return err - } - if isConcurrentDDL != toConcurrentDDL { - return errors.New("please set it on the DDL owner node") - } - return nil - }) - } - - if variable.EnableMDL.Load() && !toConcurrentDDL { - return errors.New("can not disable concurrent ddl when metadata lock is enabled") - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - defer cancel() - d.waiting.Store(true) - defer d.waiting.Store(false) - if err := d.wait4Switch(ctx); err != nil { - return err - } - - var err error - if toConcurrentDDL { - err = d.MoveJobFromQueue2Table(false) - } else { - err = d.MoveJobFromTable2Queue() - } - if err == nil { - variable.EnableConcurrentDDL.Store(toConcurrentDDL) - logutil.BgLogger().Info("[ddl] SwitchConcurrentDDL", zap.Bool("toConcurrentDDL", toConcurrentDDL)) - } else { - logutil.BgLogger().Warn("[ddl] SwitchConcurrentDDL", zap.Bool("toConcurrentDDL", toConcurrentDDL), zap.Error(err)) - } - return err -} - // SwitchMDL enables MDL or disable DDL. func (d *ddl) SwitchMDL(enable bool) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - // Disable MDL for test. - if enable && !variable.DefTiDBEnableConcurrentDDL { + if enable { sql := fmt.Sprintf("UPDATE HIGH_PRIORITY %[1]s.%[2]s SET VARIABLE_VALUE = %[4]d WHERE VARIABLE_NAME = '%[3]s'", mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBEnableMDL, 0) sess, err := d.sessPool.get() @@ -1288,23 +1204,6 @@ func (d *ddl) SwitchMDL(enable bool) error { return nil } -func (d *ddl) wait4Switch(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - d.runningJobs.RLock() - if len(d.runningJobs.ids) == 0 { - d.runningJobs.RUnlock() - return nil - } - d.runningJobs.RUnlock() - time.Sleep(time.Second * 1) - } -} - // RecoverInfo contains information needed by DDL.RecoverTable. type RecoverInfo struct { SchemaID int64 @@ -1419,13 +1318,8 @@ func GetDDLInfo(s sessionctx.Context) (*Info, error) { } t := meta.NewMeta(txn) info.Jobs = make([]*model.Job, 0, 2) - enable := variable.EnableConcurrentDDL.Load() var generalJob, reorgJob *model.Job - if enable { - generalJob, reorgJob, err = get2JobsFromTable(sess) - } else { - generalJob, reorgJob, err = get2JobsFromQueue(t) - } + generalJob, reorgJob, err = get2JobsFromTable(sess) if err != nil { return nil, errors.Trace(err) } @@ -1446,7 +1340,7 @@ func GetDDLInfo(s sessionctx.Context) (*Info, error) { return info, nil } - _, info.ReorgHandle, _, _, err = newReorgHandler(t, sess, enable).GetDDLReorgHandle(reorgJob) + _, info.ReorgHandle, _, _, err = newReorgHandler(t, sess).GetDDLReorgHandle(reorgJob) if err != nil { if meta.ErrDDLReorgElementNotExist.Equal(err) { return info, nil @@ -1457,19 +1351,6 @@ func GetDDLInfo(s sessionctx.Context) (*Info, error) { return info, nil } -func get2JobsFromQueue(t *meta.Meta) (*model.Job, *model.Job, error) { - generalJob, err := t.GetDDLJobByIdx(0) - if err != nil { - return nil, nil, errors.Trace(err) - } - reorgJob, err := t.GetDDLJobByIdx(0, meta.AddIndexJobListKey) - if err != nil { - return nil, nil, errors.Trace(err) - } - - return generalJob, reorgJob, nil -} - func get2JobsFromTable(sess *session) (*model.Job, *model.Job, error) { var generalJob, reorgJob *model.Job jobs, err := getJobsBySQL(sess, JobTable, "not reorg order by job_id limit 1") @@ -1491,82 +1372,8 @@ func get2JobsFromTable(sess *session) (*model.Job, *model.Job, error) { } // CancelJobs cancels the DDL jobs. -func CancelJobs(se sessionctx.Context, store kv.Storage, ids []int64) (errs []error, err error) { - if variable.EnableConcurrentDDL.Load() { - return cancelConcurrencyJobs(se, ids) - } - - err = kv.RunInNewTxn(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), store, true, func(ctx context.Context, txn kv.Transaction) error { - errs, err = cancelLegacyJobs(txn, ids) - return err - }) - return -} - -func cancelLegacyJobs(txn kv.Transaction, ids []int64) ([]error, error) { - if len(ids) == 0 { - return nil, nil - } - - errs := make([]error, len(ids)) - t := meta.NewMeta(txn) - generalJobs, err := getDDLJobsInQueue(t, meta.DefaultJobListKey) - if err != nil { - return nil, errors.Trace(err) - } - addIdxJobs, err := getDDLJobsInQueue(t, meta.AddIndexJobListKey) - if err != nil { - return nil, errors.Trace(err) - } - jobs := append(generalJobs, addIdxJobs...) - jobsMap := make(map[int64]int) - for i, id := range ids { - jobsMap[id] = i - } - for j, job := range jobs { - i, ok := jobsMap[job.ID] - if !ok { - logutil.BgLogger().Debug("the job that needs to be canceled isn't equal to current job", - zap.Int64("need to canceled job ID", job.ID), - zap.Int64("current job ID", job.ID)) - continue - } - delete(jobsMap, job.ID) - // These states can't be cancelled. - if job.IsDone() || job.IsSynced() { - errs[i] = dbterror.ErrCancelFinishedDDLJob.GenWithStackByArgs(job.ID) - continue - } - // If the state is rolling back, it means the work is cleaning the data after cancelling the job. - if job.IsCancelled() || job.IsRollingback() || job.IsRollbackDone() { - continue - } - if !job.IsRollbackable() { - errs[i] = dbterror.ErrCannotCancelDDLJob.GenWithStackByArgs(job.ID) - continue - } - - job.State = model.JobStateCancelling - // Make sure RawArgs isn't overwritten. - err := json.Unmarshal(job.RawArgs, &job.Args) - if err != nil { - errs[i] = errors.Trace(err) - continue - } - if j >= len(generalJobs) { - offset := int64(j - len(generalJobs)) - err = t.UpdateDDLJob(offset, job, true, meta.AddIndexJobListKey) - } else { - err = t.UpdateDDLJob(int64(j), job, true) - } - if err != nil { - errs[i] = errors.Trace(err) - } - } - for id, i := range jobsMap { - errs[i] = dbterror.ErrDDLJobNotFound.GenWithStackByArgs(id) - } - return errs, nil +func CancelJobs(se sessionctx.Context, ids []int64) (errs []error, err error) { + return cancelConcurrencyJobs(se, ids) } // cancelConcurrencyJobs cancels the DDL jobs that are in the concurrent state. @@ -1645,45 +1452,9 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) return errs, nil } -func getDDLJobsInQueue(t *meta.Meta, jobListKey meta.JobListKeyType) ([]*model.Job, error) { - cnt, err := t.DDLJobQueueLen(jobListKey) - if err != nil { - return nil, errors.Trace(err) - } - jobs := make([]*model.Job, cnt) - for i := range jobs { - jobs[i], err = t.GetDDLJobByIdx(int64(i), jobListKey) - if err != nil { - return nil, errors.Trace(err) - } - } - return jobs, nil -} - // GetAllDDLJobs get all DDL jobs and sorts jobs by job.ID. func GetAllDDLJobs(sess sessionctx.Context, t *meta.Meta) ([]*model.Job, error) { - if variable.EnableConcurrentDDL.Load() { - return getJobsBySQL(newSession(sess), JobTable, "1 order by job_id") - } - - return getDDLJobs(t) -} - -// getDDLJobs get all DDL jobs and sorts jobs by job.ID. -func getDDLJobs(t *meta.Meta) ([]*model.Job, error) { - generalJobs, err := getDDLJobsInQueue(t, meta.DefaultJobListKey) - if err != nil { - return nil, errors.Trace(err) - } - addIdxJobs, err := getDDLJobsInQueue(t, meta.AddIndexJobListKey) - if err != nil { - return nil, errors.Trace(err) - } - jobs := append(generalJobs, addIdxJobs...) - slices.SortFunc(jobs, func(i, j *model.Job) bool { - return i.ID < j.ID - }) - return jobs, nil + return getJobsBySQL(newSession(sess), JobTable, "1 order by job_id") } // MaxHistoryJobs is exported for testing. @@ -1786,7 +1557,11 @@ func (s *session) execute(ctx context.Context, query string, label string) ([]ch defer func() { metrics.DDLJobTableDuration.WithLabelValues(label + "-" + metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) }() - rs, err := s.Context.(sqlexec.SQLExecutor).ExecuteInternal(kv.WithInternalSourceType(ctx, kv.InternalTxnDDL), query) + + if ctx.Value(kv.RequestSourceKey) == nil { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) + } + rs, err := s.Context.(sqlexec.SQLExecutor).ExecuteInternal(ctx, query) if err != nil { return nil, errors.Trace(err) } @@ -1869,19 +1644,11 @@ func GetHistoryJobByID(sess sessionctx.Context, id int64) (*model.Job, error) { return job, errors.Trace(err) } -// AddHistoryDDLJobForTest used for test. -func AddHistoryDDLJobForTest(sess sessionctx.Context, t *meta.Meta, job *model.Job, updateRawArgs bool) error { - return AddHistoryDDLJob(newSession(sess), t, job, updateRawArgs, variable.EnableConcurrentDDL.Load()) -} - // AddHistoryDDLJob record the history job. -func AddHistoryDDLJob(sess *session, t *meta.Meta, job *model.Job, updateRawArgs bool, concurrentDDL bool) error { - if concurrentDDL { - // only add history job into table if it is concurrent DDL. - err := addHistoryDDLJob2Table(sess, job, updateRawArgs) - if err != nil { - logutil.BgLogger().Info("[ddl] failed to add DDL job to history table", zap.Error(err)) - } +func AddHistoryDDLJob(sess *session, t *meta.Meta, job *model.Job, updateRawArgs bool) error { + err := addHistoryDDLJob2Table(sess, job, updateRawArgs) + if err != nil { + logutil.BgLogger().Info("[ddl] failed to add DDL job to history table", zap.Error(err)) } // we always add history DDL job to job list at this moment. return t.AddHistoryDDLJob(job, updateRawArgs) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index a5e89e4996d0a..2ae8a46134d4b 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -4306,6 +4306,9 @@ func checkIsDroppableColumn(ctx sessionctx.Context, is infoschema.InfoSchema, sc if err = isDroppableColumn(tblInfo, colName); err != nil { return false, errors.Trace(err) } + if err = checkDropColumnWithPartitionConstraint(t, colName); err != nil { + return false, errors.Trace(err) + } // Check the column with foreign key. err = checkDropColumnWithForeignKeyConstraint(is, schema.Name.L, tblInfo, colName.L) if err != nil { @@ -4326,6 +4329,24 @@ func checkIsDroppableColumn(ctx sessionctx.Context, is infoschema.InfoSchema, sc return true, nil } +// checkDropColumnWithPartitionConstraint is used to check the partition constraint of the drop column. +func checkDropColumnWithPartitionConstraint(t table.Table, colName model.CIStr) error { + if t.Meta().Partition == nil { + return nil + } + pt, ok := t.(table.PartitionedTable) + if !ok { + // Should never happen! + return errors.Trace(dbterror.ErrDependentByPartitionFunctional.GenWithStackByArgs(colName.L)) + } + for _, name := range pt.GetPartitionColumnNames() { + if strings.EqualFold(name.L, colName.L) { + return errors.Trace(dbterror.ErrDependentByPartitionFunctional.GenWithStackByArgs(colName.L)) + } + } + return nil +} + func checkVisibleColumnCnt(t table.Table, addCnt, dropCnt int) error { tblInfo := t.Meta() visibleColumCnt := 0 @@ -4706,9 +4727,15 @@ func GetModifiableColumnJob( for _, name := range pt.GetPartitionColumnNames() { if strings.EqualFold(name.L, col.Name.L) { isPartitioningColumn = true + break } } if isPartitioningColumn { + // TODO: update the partitioning columns with new names if column is renamed + // Would be an extension from MySQL which does not support it. + if col.Name.L != newCol.Name.L { + return nil, dbterror.ErrDependentByPartitionFunctional.GenWithStackByArgs(col.Name.L) + } if !isColTypeAllowedAsPartitioningCol(newCol.FieldType) { return nil, dbterror.ErrNotAllowedTypeInPartition.GenWithStackByArgs(newCol.Name.O) } @@ -4752,7 +4779,6 @@ func GetModifiableColumnJob( newTblInfo.Columns = newCols var buf bytes.Buffer - // TODO: update the partitioning columns with new names if column is renamed AppendPartitionInfo(tblInfo.GetPartitionInfo(), &buf, mysql.ModeNone) // The parser supports ALTER TABLE ... PARTITION BY ... even if the ddl code does not yet :) // Ignoring warnings @@ -5084,6 +5110,11 @@ func (d *ddl) RenameColumn(ctx sessionctx.Context, ident ast.Ident, spec *ast.Al } } + err = checkDropColumnWithPartitionConstraint(tbl, oldColName) + if err != nil { + return errors.Trace(err) + } + tzName, tzOffset := ddlutil.GetTimeZone(ctx) newCol := oldCol.Clone() diff --git a/ddl/ddl_api_test.go b/ddl/ddl_api_test.go index f4010015f5456..9f36cc95f806c 100644 --- a/ddl/ddl_api_test.go +++ b/ddl/ddl_api_test.go @@ -115,73 +115,6 @@ func TestGetDDLJobsIsSort(t *testing.T) { require.NoError(t, err) } -func TestGetHistoryDDLJobs(t *testing.T) { - store := testkit.CreateMockStore(t) - - // delete the internal DDL record. - err := kv.RunInNewTxn(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), store, false, func(ctx context.Context, txn kv.Transaction) error { - return meta.NewMeta(txn).ClearAllHistoryJob() - }) - require.NoError(t, err) - testkit.NewTestKit(t, store).MustExec("delete from mysql.tidb_ddl_history") - - tk := testkit.NewTestKit(t, store) - sess := tk.Session() - tk.MustExec("begin") - - txn, err := sess.Txn(true) - require.NoError(t, err) - - m := meta.NewMeta(txn) - cnt := 11 - jobs := make([]*model.Job, cnt) - for i := 0; i < cnt; i++ { - jobs[i] = &model.Job{ - ID: int64(i), - SchemaID: 1, - Type: model.ActionCreateTable, - } - err = ddl.AddHistoryDDLJobForTest(sess, m, jobs[i], true) - require.NoError(t, err) - - historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, ddl.DefNumHistoryJobs) - require.NoError(t, err) - - if i+1 > ddl.MaxHistoryJobs { - require.Len(t, historyJobs, ddl.MaxHistoryJobs) - } else { - require.Len(t, historyJobs, i+1) - } - } - - delta := cnt - ddl.MaxHistoryJobs - historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, ddl.DefNumHistoryJobs) - require.NoError(t, err) - require.Len(t, historyJobs, ddl.MaxHistoryJobs) - - l := len(historyJobs) - 1 - for i, job := range historyJobs { - require.Equal(t, jobs[delta+l-i].ID, job.ID) - require.Equal(t, int64(1), job.SchemaID) - require.Equal(t, model.ActionCreateTable, job.Type) - } - - var historyJobs2 []*model.Job - err = ddl.IterHistoryDDLJobs(txn, func(jobs []*model.Job) (b bool, e error) { - for _, job := range jobs { - historyJobs2 = append(historyJobs2, job) - if len(historyJobs2) == ddl.DefNumHistoryJobs { - return true, nil - } - } - return false, nil - }) - require.NoError(t, err) - require.Equal(t, historyJobs, historyJobs2) - - tk.MustExec("rollback") -} - func TestIsJobRollbackable(t *testing.T) { cases := []struct { tp model.ActionType diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index a2e75119e4d12..6b210d2445c26 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -19,7 +19,6 @@ import ( "testing" "time" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -271,109 +270,6 @@ func TestBuildJobDependence(t *testing.T) { require.NoError(t, err) } -func TestNotifyDDLJob(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/NoDDLDispatchLoop", `return(true)`)) - defer require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/NoDDLDispatchLoop")) - - getFirstNotificationAfterStartDDL := func(d *ddl) { - select { - case <-d.workers[addIdxWorker].ddlJobCh: - default: - // The notification may be received by the worker. - } - select { - case <-d.workers[generalWorker].ddlJobCh: - default: - // The notification may be received by the worker. - } - - select { - case <-d.ddlJobCh: - default: - } - } - - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - getFirstNotificationAfterStartDDL(d) - // Ensure that the notification is not handled in workers `start` function. - d.cancel() - for _, worker := range d.workers { - worker.Close() - } - - job := &model.Job{ - SchemaID: 1, - TableID: 2, - Type: model.ActionCreateTable, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{}, - } - // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB. - // This DDL request is a general DDL job. - d.asyncNotifyWorker(job) - select { - case <-d.workers[generalWorker].ddlJobCh: - case <-d.ddlJobCh: - default: - require.FailNow(t, "do not get the general job notification") - } - // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB. - // This DDL request is a add index DDL job. - job.Type = model.ActionAddIndex - d.asyncNotifyWorker(job) - select { - case <-d.workers[addIdxWorker].ddlJobCh: - case <-d.ddlJobCh: - default: - require.FailNow(t, "do not get the add index job notification") - } - - // Test the notification mechanism that the owner and the server receiving the DDL request are not on the same TiDB. - // And the etcd client is nil. - d1, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d1.Stop()) - }() - getFirstNotificationAfterStartDDL(d1) - // Ensure that the notification is not handled by worker's "start". - d1.cancel() - for _, worker := range d1.workers { - worker.Close() - } - d1.ownerManager.RetireOwner() - d1.asyncNotifyWorker(job) - job.Type = model.ActionCreateTable - d1.asyncNotifyWorker(job) - require.False(t, d1.OwnerManager().IsOwner()) - select { - case <-d1.workers[addIdxWorker].ddlJobCh: - require.FailNow(t, "should not get the add index job notification") - case <-d1.workers[generalWorker].ddlJobCh: - require.FailNow(t, "should not get the general job notification") - case <-d1.ddlJobCh: - require.FailNow(t, "should not get the job notification") - default: - } -} - func TestError(t *testing.T) { kvErrs := []*terror.Error{ dbterror.ErrDDLJobNotFound, diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 89e515db8e1bc..7843fac34a69e 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -97,8 +97,6 @@ type worker struct { logCtx context.Context lockSeqNum bool - concurrentDDL bool - *ddlCtx } @@ -119,11 +117,11 @@ func NewJobContext() *JobContext { cacheSQL: "", cacheNormalizedSQL: "", cacheDigest: nil, - tp: "unknown", + tp: "", } } -func newWorker(ctx context.Context, tp workerType, sessPool *sessionPool, delRangeMgr delRangeManager, dCtx *ddlCtx, concurrentDDL bool) *worker { +func newWorker(ctx context.Context, tp workerType, sessPool *sessionPool, delRangeMgr delRangeManager, dCtx *ddlCtx) *worker { worker := &worker{ id: ddlWorkerID.Add(1), tp: tp, @@ -132,7 +130,6 @@ func newWorker(ctx context.Context, tp workerType, sessPool *sessionPool, delRan ddlCtx: dCtx, sessPool: sessPool, delRangeManager: delRangeMgr, - concurrentDDL: concurrentDDL, } worker.addingDDLJobKey = addingDDLJobPrefix + worker.typeStr() worker.logCtx = logutil.WithKeyValue(context.Background(), "worker", worker.String()) @@ -165,59 +162,6 @@ func (w *worker) Close() { logutil.Logger(w.logCtx).Info("[ddl] DDL worker closed", zap.Duration("take time", time.Since(startTime))) } -// start is used for async online schema changing, it will try to become the owner firstly, -// then wait or pull the job queue to handle a schema change job. -func (w *worker) start(d *ddlCtx) { - logutil.Logger(w.logCtx).Info("[ddl] start DDL worker") - defer w.wg.Done() - defer tidbutil.Recover( - metrics.LabelDDLWorker, - fmt.Sprintf("DDL ID %s, %s start", d.uuid, w), - nil, true, - ) - - // We use 4 * lease time to check owner's timeout, so here, we will update owner's status - // every 2 * lease time. If lease is 0, we will use default 1s. - // But we use etcd to speed up, normally it takes less than 1s now, so we use 1s as the max value. - checkTime := chooseLeaseTime(2*d.lease, 1*time.Second) - - ticker := time.NewTicker(checkTime) - defer ticker.Stop() - var notifyDDLJobByEtcdCh clientv3.WatchChan - if d.etcdCli != nil { - notifyDDLJobByEtcdCh = d.etcdCli.Watch(context.Background(), w.addingDDLJobKey) - } - - rewatchCnt := 0 - for { - ok := true - select { - case <-ticker.C: - logutil.Logger(w.logCtx).Debug("[ddl] wait to check DDL status again", zap.Duration("interval", checkTime)) - case <-w.ddlJobCh: - case _, ok = <-notifyDDLJobByEtcdCh: - case <-w.ctx.Done(): - return - } - - if !ok { - logutil.Logger(w.logCtx).Warn("[ddl] start worker watch channel closed", zap.String("watch key", w.addingDDLJobKey)) - notifyDDLJobByEtcdCh = d.etcdCli.Watch(context.Background(), w.addingDDLJobKey) - rewatchCnt++ - if rewatchCnt > 10 { - time.Sleep(time.Duration(rewatchCnt) * time.Second) - } - continue - } - - rewatchCnt = 0 - err := w.handleDDLJobQueue(d) - if err != nil { - logutil.Logger(w.logCtx).Warn("[ddl] handle DDL job failed", zap.Error(err)) - } - } -} - func (d *ddl) asyncNotifyByEtcd(addingDDLJobKey string, job *model.Job) { if d.etcdCli == nil { return @@ -239,37 +183,6 @@ func asyncNotify(ch chan struct{}) { } } -// buildJobDependence sets the curjob's dependency-ID. -// The dependency-job's ID must less than the current job's ID, and we need the largest one in the list. -func buildJobDependence(t *meta.Meta, curJob *model.Job) error { - // Jobs in the same queue are ordered. If we want to find a job's dependency-job, we need to look for - // it from the other queue. So if the job is "ActionAddIndex" job, we need find its dependency-job from DefaultJobList. - jobListKey := meta.DefaultJobListKey - if !curJob.MayNeedReorg() { - jobListKey = meta.AddIndexJobListKey - } - jobs, err := t.GetAllDDLJobsInQueue(jobListKey) - if err != nil { - return errors.Trace(err) - } - - for _, job := range jobs { - if curJob.ID < job.ID { - continue - } - isDependent, err := curJob.IsDependentOn(job) - if err != nil { - return errors.Trace(err) - } - if isDependent { - logutil.BgLogger().Info("[ddl] current DDL job depends on other job", zap.String("currentJob", curJob.String()), zap.String("dependentJob", job.String())) - curJob.DependencyID = job.ID - break - } - } - return nil -} - func (d *ddl) limitDDLJobs() { defer tidbutil.Recover(metrics.LabelDDL, "limitDDLJobs", nil, true) @@ -295,7 +208,7 @@ func (d *ddl) addBatchDDLJobs(tasks []*limitJobTask) { startTime := time.Now() var err error // DDLForce2Queue is a flag to tell DDL worker to always push the job to the DDL queue. - toTable := variable.EnableConcurrentDDL.Load() && !variable.DDLForce2Queue.Load() + toTable := !variable.DDLForce2Queue.Load() if toTable { err = d.addBatchDDLJobs2Table(tasks) } else { @@ -315,6 +228,37 @@ func (d *ddl) addBatchDDLJobs(tasks []*limitJobTask) { } } +// buildJobDependence sets the curjob's dependency-ID. +// The dependency-job's ID must less than the current job's ID, and we need the largest one in the list. +func buildJobDependence(t *meta.Meta, curJob *model.Job) error { + // Jobs in the same queue are ordered. If we want to find a job's dependency-job, we need to look for + // it from the other queue. So if the job is "ActionAddIndex" job, we need find its dependency-job from DefaultJobList. + jobListKey := meta.DefaultJobListKey + if !curJob.MayNeedReorg() { + jobListKey = meta.AddIndexJobListKey + } + jobs, err := t.GetAllDDLJobsInQueue(jobListKey) + if err != nil { + return errors.Trace(err) + } + + for _, job := range jobs { + if curJob.ID < job.ID { + continue + } + isDependent, err := curJob.IsDependentOn(job) + if err != nil { + return errors.Trace(err) + } + if isDependent { + logutil.BgLogger().Info("[ddl] current DDL job depends on other job", zap.String("currentJob", curJob.String()), zap.String("dependentJob", job.String())) + curJob.DependencyID = job.ID + break + } + } + return nil +} + func (d *ddl) addBatchDDLJobs2Queue(tasks []*limitJobTask) error { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) return kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { @@ -444,13 +388,6 @@ func injectFailPointForGetJob(job *model.Job) { }) } -// getFirstDDLJob gets the first DDL job form DDL queue. -func (w *worker) getFirstDDLJob(t *meta.Meta) (*model.Job, error) { - job, err := t.GetDDLJobByIdx(0) - injectFailPointForGetJob(job) - return job, errors.Trace(err) -} - // handleUpdateJobError handles the too large DDL job. func (w *worker) handleUpdateJobError(t *meta.Meta, job *model.Job, err error) error { if err == nil { @@ -471,7 +408,7 @@ func (w *worker) handleUpdateJobError(t *meta.Meta, job *model.Job, err error) e // updateDDLJob updates the DDL job information. // Every time we enter another state except final state, we must call this function. -func (w *worker) updateDDLJob(t *meta.Meta, job *model.Job, meetErr bool) error { +func (w *worker) updateDDLJob(job *model.Job, meetErr bool) error { failpoint.Inject("mockErrEntrySizeTooLarge", func(val failpoint.Value) { if val.(bool) { failpoint.Return(kv.ErrEntryTooLarge) @@ -482,13 +419,7 @@ func (w *worker) updateDDLJob(t *meta.Meta, job *model.Job, meetErr bool) error logutil.Logger(w.logCtx).Info("[ddl] meet something wrong before update DDL job, shouldn't update raw args", zap.String("job", job.String())) } - var err error - if w.concurrentDDL { - err = updateDDLJob2Table(w.sess, job, updateRawArgs) - } else { - err = t.UpdateDDLJob(0, job, updateRawArgs) - } - return errors.Trace(err) + return errors.Trace(updateDDLJob2Table(w.sess, job, updateRawArgs)) } // registerMDLInfo registers metadata lock info. @@ -631,11 +562,7 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { if err != nil { return errors.Trace(err) } - if w.concurrentDDL { - err = w.deleteDDLJob(job) - } else { - _, err = t.DeQueueDDLJob() - } + err = w.deleteDDLJob(job) if err != nil { return errors.Trace(err) } @@ -650,7 +577,7 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { } w.writeDDLSeqNum(job) w.removeJobCtx(job) - err = AddHistoryDDLJob(w.sess, t, job, updateRawArgs, w.concurrentDDL) + err = AddHistoryDDLJob(w.sess, t, job, updateRawArgs) return errors.Trace(err) } @@ -714,13 +641,6 @@ func isDependencyJobDone(t *meta.Meta, job *model.Job) (bool, error) { return true, nil } -func newMetaWithQueueTp(txn kv.Transaction, tp workerType) *meta.Meta { - if tp == addIdxWorker { - return meta.NewMeta(txn, meta.AddIndexJobListKey) - } - return meta.NewMeta(txn) -} - func (w *JobContext) setDDLLabelForTopSQL(job *model.Job) { if !topsqlstate.TopSQLEnabled() || job == nil { return @@ -761,6 +681,9 @@ func getDDLRequestSource(job *model.Job) string { } func (w *JobContext) setDDLLabelForDiagnosis(job *model.Job) { + if w.tp != "" { + return + } w.tp = getDDLRequestSource(job) w.ddlJobCtx = kv.WithInternalSourceType(w.ddlJobCtx, w.ddlJobSourceType()) } @@ -794,7 +717,7 @@ func (w *worker) HandleDDLJobTable(d *ddlCtx, job *model.Job) (int64, error) { if err != nil { return 0, err } - if !variable.EnableConcurrentDDL.Load() || d.waiting.Load() { + if d.waiting.Load() { w.sess.rollback() return 0, nil } @@ -864,7 +787,7 @@ func (w *worker) HandleDDLJobTable(d *ddlCtx, job *model.Job) (int64, error) { d.unlockSchemaVersion(job.ID) return 0, err } - err = w.updateDDLJob(t, job, runJobErr != nil) + err = w.updateDDLJob(job, runJobErr != nil) if err = w.handleUpdateJobError(t, job, err); err != nil { w.sess.rollback() d.unlockSchemaVersion(job.ID) @@ -908,152 +831,6 @@ func (w *JobContext) ddlJobSourceType() string { return w.tp } -// handleDDLJobQueue handles DDL jobs in DDL Job queue. -func (w *worker) handleDDLJobQueue(d *ddlCtx) error { - once := true - waitDependencyJobCnt := 0 - for { - if isChanClosed(w.ctx.Done()) { - return nil - } - - var ( - job *model.Job - schemaVer int64 - runJobErr error - ) - waitTime := 2 * d.lease - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) - err := kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { - d.runningJobs.Lock() - // We are not owner, return and retry checking later. - if !d.isOwner() || variable.EnableConcurrentDDL.Load() || d.waiting.Load() { - d.runningJobs.Unlock() - return nil - } - - var err error - t := newMetaWithQueueTp(txn, w.tp) - - // We become the owner. Get the first job and run it. - job, err = w.getFirstDDLJob(t) - if job == nil || err != nil { - d.runningJobs.Unlock() - return errors.Trace(err) - } - d.runningJobs.ids[job.ID] = struct{}{} - d.runningJobs.Unlock() - - defer d.deleteRunningDDLJobMap(job.ID) - - // only general ddls allowed to be executed when TiKV is disk full. - if w.tp == addIdxWorker && job.IsRunning() { - txn.SetDiskFullOpt(kvrpcpb.DiskFullOpt_NotAllowedOnFull) - } - - w.setDDLLabelForTopSQL(job) - w.setDDLSourceForDiagnosis(job) - jobContext := w.jobContext(job) - if tagger := w.getResourceGroupTaggerForTopSQL(job); tagger != nil { - txn.SetOption(kv.ResourceGroupTagger, tagger) - } - if isDone, err1 := isDependencyJobDone(t, job); err1 != nil || !isDone { - return errors.Trace(err1) - } - - if once { - err = waitSchemaSynced(d, job, waitTime) - if err == nil { - once = false - } - return err - } - - if job.IsDone() || job.IsRollbackDone() { - if !job.IsRollbackDone() { - job.State = model.JobStateSynced - } - err = w.finishDDLJob(t, job) - return errors.Trace(err) - } - - d.mu.RLock() - d.mu.hook.OnJobRunBefore(job) - d.mu.RUnlock() - - // set request source type to DDL type - txn.SetOption(kv.RequestSourceType, jobContext.ddlJobSourceType()) - // If running job meets error, we will save this error in job Error - // and retry later if the job is not cancelled. - schemaVer, runJobErr = w.runDDLJob(d, t, job) - if job.IsCancelled() { - txn.Reset() - err = w.finishDDLJob(t, job) - return errors.Trace(err) - } - if runJobErr != nil && !job.IsRollingback() && !job.IsRollbackDone() { - // If the running job meets an error - // and the job state is rolling back, it means that we have already handled this error. - // Some DDL jobs (such as adding indexes) may need to update the table info and the schema version, - // then shouldn't discard the KV modification. - // And the job state is rollback done, it means the job was already finished, also shouldn't discard too. - // Otherwise, we should discard the KV modification when running job. - txn.Reset() - // If error happens after updateSchemaVersion(), then the schemaVer is updated. - // Result in the retry duration is up to 2 * lease. - schemaVer = 0 - } - err = w.updateDDLJob(t, job, runJobErr != nil) - if err = w.handleUpdateJobError(t, job, err); err != nil { - return errors.Trace(err) - } - writeBinlog(d.binlogCli, txn, job) - return nil - }) - - if runJobErr != nil { - // wait a while to retry again. If we don't wait here, DDL will retry this job immediately, - // which may act like a deadlock. - logutil.Logger(w.logCtx).Info("[ddl] run DDL job failed, sleeps a while then retries it.", - zap.Duration("waitTime", GetWaitTimeWhenErrorOccurred()), zap.Error(runJobErr)) - time.Sleep(GetWaitTimeWhenErrorOccurred()) - } - if job != nil { - d.unlockSchemaVersion(job.ID) - } - - if err != nil { - w.unlockSeqNum(err) - return errors.Trace(err) - } else if job == nil { - // No job now, return and retry getting later. - return nil - } - w.unlockSeqNum(err) - w.waitDependencyJobFinished(job, &waitDependencyJobCnt) - - // Here means the job enters another state (delete only, write only, public, etc...) or is cancelled. - // If the job is done or still running or rolling back, we will wait 2 * lease time to guarantee other servers to update - // the newest schema. - waitSchemaChanged(context.Background(), d, waitTime, schemaVer, job) - - if RunInGoTest { - // d.mu.hook is initialed from domain / test callback, which will force the owner host update schema diff synchronously. - d.mu.RLock() - d.mu.hook.OnSchemaStateChanged(schemaVer) - d.mu.RUnlock() - } - - d.mu.RLock() - d.mu.hook.OnJobUpdated(job) - d.mu.RUnlock() - - if job.IsSynced() || job.IsCancelled() || job.IsRollbackDone() { - asyncNotify(d.ddlJobDoneCh) - } - } -} - func skipWriteBinlog(job *model.Job) bool { switch job.Type { // ActionUpdateTiFlashReplicaStatus is a TiDB internal DDL, @@ -1606,6 +1383,11 @@ func updateSchemaVersion(d *ddlCtx, t *meta.Meta, job *model.Job, multiInfos ... OldTableID: recoverTabsInfo[i].TableInfo.ID, } } + case model.ActionFlashbackCluster: + diff.TableID = -1 + if job.SchemaState == model.StatePublic { + diff.RegenerateSchemaMap = true + } default: diff.TableID = job.TableID } diff --git a/ddl/ddl_workerpool_test.go b/ddl/ddl_workerpool_test.go index e9f324ce9dff8..d8768507b8102 100644 --- a/ddl/ddl_workerpool_test.go +++ b/ddl/ddl_workerpool_test.go @@ -26,7 +26,7 @@ import ( func TestDDLWorkerPool(t *testing.T) { f := func() func() (pools.Resource, error) { return func() (pools.Resource, error) { - wk := newWorker(nil, addIdxWorker, nil, nil, nil, true) + wk := newWorker(nil, addIdxWorker, nil, nil, nil) return wk, nil } } diff --git a/ddl/fktest/foreign_key_test.go b/ddl/fktest/foreign_key_test.go index 349d9b39ea682..df461fa048e5c 100644 --- a/ddl/fktest/foreign_key_test.go +++ b/ddl/fktest/foreign_key_test.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "sync" "testing" "time" @@ -667,6 +668,28 @@ func TestCreateTableWithForeignKeyError(t *testing.T) { create: "create table t2 (id int key, constraint fk foreign key (id) references t1(name5678901234567890123456789012345678901234567890123456789012345));", err: "[ddl:1059]Identifier name 'name5678901234567890123456789012345678901234567890123456789012345' is too long", }, + // Test foreign key with temporary table + { + refer: "create temporary table t1 (id int key);", + create: "create table t2 (id int key, constraint fk foreign key (id) references t1(id));", + err: "[schema:1824]Failed to open the referenced table 't1'", + }, + { + refer: "create table t1 (id int key);", + create: "create temporary table t2 (id int key, constraint fk foreign key (id) references t1(id));", + err: "[schema:1215]Cannot add foreign key constraint", + }, + // Test foreign key with partition table + { + refer: "create table t1 (id int key) partition by hash(id) partitions 3;", + create: "create table t2 (id int key, constraint fk foreign key (id) references t1(id));", + err: "[schema:1506]Foreign key clause is not yet supported in conjunction with partitioning", + }, + { + refer: "create table t1 (id int key);", + create: "create table t2 (id int key, constraint fk foreign key (id) references t1(id)) partition by hash(id) partitions 3;", + err: "[schema:1506]Foreign key clause is not yet supported in conjunction with partitioning", + }, } for _, ca := range cases { tk.MustExec("drop table if exists t2") @@ -1415,6 +1438,40 @@ func TestAlterTableAddForeignKeyError(t *testing.T) { alter: "alter table t2 add constraint name5678901234567890123456789012345678901234567890123456789012345 foreign key (b) references t1(id)", err: "[ddl:1059]Identifier name 'name5678901234567890123456789012345678901234567890123456789012345' is too long", }, + // Test foreign key with temporary table. + { + prepares: []string{ + "create temporary table t1 (id int key);", + "create table t2 (a int, b int unique);", + }, + alter: "alter table t2 add constraint fk foreign key (b) references t1(id)", + err: "[schema:1824]Failed to open the referenced table 't1'", + }, + { + prepares: []string{ + "create table t1 (id int key);", + "create temporary table t2 (a int, b int unique);", + }, + alter: "alter table t2 add constraint fk foreign key (b) references t1(id)", + err: "[ddl:8200]TiDB doesn't support ALTER TABLE for local temporary table", + }, + // Test foreign key with partition table + { + prepares: []string{ + "create table t1 (id int key) partition by hash(id) partitions 3;", + "create table t2 (id int key);", + }, + alter: "alter table t2 add constraint fk foreign key (id) references t1(id)", + err: "[schema:1506]Foreign key clause is not yet supported in conjunction with partitioning", + }, + { + prepares: []string{ + "create table t1 (id int key);", + "create table t2 (id int key) partition by hash(id) partitions 3;;", + }, + alter: "alter table t2 add constraint fk foreign key (id) references t1(id)", + err: "[schema:1506]Foreign key clause is not yet supported in conjunction with partitioning", + }, } for i, ca := range cases { tk.MustExec("drop table if exists t2") @@ -1608,3 +1665,148 @@ func TestAddForeignKeyInBigTable(t *testing.T) { tk.MustExec("alter table employee add foreign key fk_1(pid) references employee(id)") require.Less(t, time.Since(start), time.Minute) } + +func TestForeignKeyWithCacheTable(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set @@foreign_key_checks=1;") + tk.MustExec("use test") + // Test foreign key refer cache table. + tk.MustExec("create table t1 (id int key);") + tk.MustExec("insert into t1 values (1),(2),(3),(4)") + tk.MustExec("alter table t1 cache;") + tk.MustExec("create table t2 (b int);") + tk.MustExec("alter table t2 add constraint fk foreign key (b) references t1(id) on delete cascade on update cascade") + tk.MustExec("insert into t2 values (1),(2),(3),(4)") + tk.MustGetDBError("insert into t2 values (5)", plannercore.ErrNoReferencedRow2) + tk.MustExec("update t1 set id = id+10 where id=1") + tk.MustExec("delete from t1 where id<10") + tk.MustQuery("select * from t1").Check(testkit.Rows("11")) + tk.MustQuery("select * from t2").Check(testkit.Rows("11")) + tk.MustExec("alter table t1 nocache;") + tk.MustExec("drop table t1,t2;") + + // Test add foreign key on cache table. + tk.MustExec("create table t1 (id int key);") + tk.MustExec("create table t2 (b int);") + tk.MustExec("alter table t2 add constraint fk foreign key (b) references t1(id) on delete cascade on update cascade") + tk.MustExec("alter table t2 cache;") + tk.MustExec("insert into t1 values (1),(2),(3),(4)") + tk.MustExec("insert into t2 values (1),(2),(3),(4)") + tk.MustGetDBError("insert into t2 values (5)", plannercore.ErrNoReferencedRow2) + tk.MustExec("update t1 set id = id+10 where id=1") + tk.MustExec("delete from t1 where id<10") + tk.MustQuery("select * from t1").Check(testkit.Rows("11")) + tk.MustQuery("select * from t2").Check(testkit.Rows("11")) + tk.MustExec("alter table t2 nocache;") + tk.MustExec("drop table t1,t2;") +} + +func TestForeignKeyAndConcurrentDDL(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set @@foreign_key_checks=1;") + tk.MustExec("use test") + // Test foreign key refer cache table. + tk.MustExec("create table t1 (a int, b int, c int, index(a), index(b), index(c));") + tk.MustExec("create table t2 (a int, b int, c int, index(a), index(b), index(c));") + tk2 := testkit.NewTestKit(t, store) + tk2.MustExec("set @@foreign_key_checks=1;") + tk2.MustExec("use test") + passCases := []struct { + prepare []string + ddl1 string + ddl2 string + }{ + { + ddl1: "alter table t2 add constraint fk_1 foreign key (a) references t1(a)", + ddl2: "alter table t2 add constraint fk_2 foreign key (b) references t1(b)", + }, + { + ddl1: "alter table t2 drop foreign key fk_1", + ddl2: "alter table t2 drop foreign key fk_2", + }, + { + prepare: []string{ + "alter table t2 drop index a", + }, + ddl1: "alter table t2 add index(a)", + ddl2: "alter table t2 add constraint fk_1 foreign key (a) references t1(a)", + }, + { + ddl1: "alter table t2 drop index c", + ddl2: "alter table t2 add constraint fk_2 foreign key (b) references t1(b)", + }, + } + for _, ca := range passCases { + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tk.MustExec(ca.ddl1) + }() + go func() { + defer wg.Done() + tk2.MustExec(ca.ddl2) + }() + wg.Wait() + } + errorCases := []struct { + prepare []string + ddl1 string + err1 string + ddl2 string + err2 string + }{ + { + ddl1: "alter table t2 add constraint fk foreign key (a) references t1(a)", + err1: "[ddl:1826]Duplicate foreign key constraint name 'fk'", + ddl2: "alter table t2 add constraint fk foreign key (b) references t1(b)", + err2: "[ddl:1826]Duplicate foreign key constraint name 'fk'", + }, + { + prepare: []string{ + "alter table t2 add constraint fk_1 foreign key (a) references t1(a)", + }, + ddl1: "alter table t2 drop foreign key fk_1", + err1: "[schema:1091]Can't DROP 'fk_1'; check that column/key exists", + ddl2: "alter table t2 drop foreign key fk_1", + err2: "[schema:1091]Can't DROP 'fk_1'; check that column/key exists", + }, + { + ddl1: "alter table t2 drop index a", + err1: "[ddl:1553]Cannot drop index 'a': needed in a foreign key constraint", + ddl2: "alter table t2 add constraint fk_1 foreign key (a) references t1(a)", + err2: "[ddl:-1]Failed to add the foreign key constraint. Missing index for 'fk_1' foreign key columns in the table 't2'", + }, + } + tk.MustExec("drop table t1,t2") + tk.MustExec("create table t1 (a int, b int, c int, index(a), index(b), index(c));") + tk.MustExec("create table t2 (a int, b int, c int, index(a), index(b), index(c));") + for i, ca := range errorCases { + for _, sql := range ca.prepare { + tk.MustExec(sql) + } + var wg sync.WaitGroup + var err1, err2 error + wg.Add(2) + go func() { + defer wg.Done() + err1 = tk.ExecToErr(ca.ddl1) + }() + go func() { + defer wg.Done() + err2 = tk2.ExecToErr(ca.ddl2) + }() + wg.Wait() + if (err1 == nil && err2 == nil) || (err1 != nil && err2 != nil) { + require.Failf(t, "both ddl1 and ddl2 execute success, but expect 1 error", fmt.Sprintf("idx: %v, err1: %v, err2: %v", i, err1, err2)) + } + if err1 != nil { + require.Equal(t, ca.err1, err1.Error()) + } + if err2 != nil { + require.Equal(t, ca.err2, err2.Error()) + } + } +} diff --git a/ddl/foreign_key.go b/ddl/foreign_key.go index e2506770e27c4..1a06719cb404b 100644 --- a/ddl/foreign_key.go +++ b/ddl/foreign_key.go @@ -266,6 +266,9 @@ func checkTableForeignKey(referTblInfo, tblInfo *model.TableInfo, fkInfo *model. if referTblInfo.TTLInfo != nil { return dbterror.ErrUnsupportedTTLReferencedByFK } + if referTblInfo.GetPartitionInfo() != nil || tblInfo.GetPartitionInfo() != nil { + return infoschema.ErrForeignKeyOnPartitioned + } // check refer columns in parent table. for i := range fkInfo.RefCols { diff --git a/ddl/index.go b/ddl/index.go index d3b37f093409b..88bbb899684ee 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -703,15 +703,18 @@ func pickBackfillType(w *worker, job *model.Job) model.ReorgType { return job.ReorgMeta.ReorgTp } if IsEnableFastReorg() { - canUseIngest := canUseIngest(w) - if ingest.LitInitialized && canUseIngest { - job.ReorgMeta.ReorgTp = model.ReorgTypeLitMerge - return model.ReorgTypeLitMerge + var useIngest bool + if ingest.LitInitialized { + useIngest = canUseIngest(w) + if useIngest { + job.ReorgMeta.ReorgTp = model.ReorgTypeLitMerge + return model.ReorgTypeLitMerge + } } // The lightning environment is unavailable, but we can still use the txn-merge backfill. logutil.BgLogger().Info("[ddl] fallback to txn-merge backfill process", zap.Bool("lightning env initialized", ingest.LitInitialized), - zap.Bool("can use ingest", canUseIngest)) + zap.Bool("can use ingest", useIngest)) job.ReorgMeta.ReorgTp = model.ReorgTypeTxnMerge return model.ReorgTypeTxnMerge } @@ -877,7 +880,7 @@ func doReorgWorkForCreateIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Jo func runReorgJobAndHandleErr(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table, indexInfo *model.IndexInfo, mergingTmpIdx bool) (done bool, ver int64, err error) { elements := []*meta.Element{{ID: indexInfo.ID, TypeKey: meta.IndexElementKey}} - rh := newReorgHandler(t, w.sess, w.concurrentDDL) + rh := newReorgHandler(t, w.sess) dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { return false, ver, errors.Trace(err) diff --git a/ddl/job_table.go b/ddl/job_table.go index 771a83b8f8264..06f745506145a 100644 --- a/ddl/job_table.go +++ b/ddl/job_table.go @@ -174,7 +174,7 @@ func (d *ddl) startDispatchLoop() { if isChanClosed(d.ctx.Done()) { return } - if !variable.EnableConcurrentDDL.Load() || !d.isOwner() || d.waiting.Load() { + if !d.isOwner() || d.waiting.Load() { d.once.Store(true) time.Sleep(time.Second) continue @@ -396,7 +396,8 @@ func updateDDLJob2Table(sctx *session, job *model.Job, updateRawArgs bool) error // getDDLReorgHandle gets DDL reorg handle. func getDDLReorgHandle(sess *session, job *model.Job) (element *meta.Element, startKey, endKey kv.Key, physicalTableID int64, err error) { sql := fmt.Sprintf("select ele_id, ele_type, start_key, end_key, physical_id from mysql.tidb_ddl_reorg where job_id = %d", job.ID) - rows, err := sess.execute(context.Background(), sql, "get_handle") + ctx := kv.WithInternalSourceType(context.Background(), getDDLRequestSource(job)) + rows, err := sess.execute(ctx, sql, "get_handle") if err != nil { return nil, nil, nil, 0, err } @@ -546,6 +547,19 @@ func addBackfillJobs(sess *session, tableName string, backfillJobs []*BackfillJo }) } +func runInTxn(se *session, f func(*session) error) (err error) { + err = se.begin() + if err != nil { + return err + } + err = f(se) + if err != nil { + se.rollback() + return + } + return errors.Trace(se.commit()) +} + // GetBackfillJobsForOneEle batch gets the backfill jobs in the tblName table that contains only one element. func GetBackfillJobsForOneEle(sess *session, batch int, excludedJobIDs []int64, lease time.Duration) ([]*BackfillJob, error) { eJobIDsBuilder := strings.Builder{} @@ -717,135 +731,3 @@ func updateBackfillJob(sess *session, tableName string, backfillJob *BackfillJob _, err = sess.execute(context.Background(), sql, label) return err } - -// MoveJobFromQueue2Table move existing DDLs in queue to table. -func (d *ddl) MoveJobFromQueue2Table(inBootstrap bool) error { - sess, err := d.sessPool.get() - if err != nil { - return err - } - defer d.sessPool.put(sess) - return runInTxn(newSession(sess), func(se *session) error { - txn, err := se.txn() - if err != nil { - return errors.Trace(err) - } - t := meta.NewMeta(txn) - isConcurrentDDL, err := t.IsConcurrentDDL() - if !inBootstrap && (isConcurrentDDL || err != nil) { - return errors.Trace(err) - } - systemDBID, err := t.GetSystemDBID() - if err != nil { - return errors.Trace(err) - } - for _, tp := range []workerType{addIdxWorker, generalWorker} { - t := newMetaWithQueueTp(txn, tp) - jobs, err := t.GetAllDDLJobsInQueue() - if err != nil { - return errors.Trace(err) - } - for _, job := range jobs { - // In bootstrap, we can ignore the internal DDL. - if inBootstrap && job.SchemaID == systemDBID { - continue - } - err = insertDDLJobs2Table(se, false, job) - if err != nil { - return errors.Trace(err) - } - if tp == generalWorker { - // General job do not have reorg info. - continue - } - element, start, end, pid, err := t.GetDDLReorgHandle(job) - if meta.ErrDDLReorgElementNotExist.Equal(err) { - continue - } - if err != nil { - return errors.Trace(err) - } - err = initDDLReorgHandle(se, job.ID, start, end, pid, element) - if err != nil { - return errors.Trace(err) - } - } - } - - if err = t.ClearALLDDLJob(); err != nil { - return errors.Trace(err) - } - if err = t.ClearAllDDLReorgHandle(); err != nil { - return errors.Trace(err) - } - return t.SetConcurrentDDL(true) - }) -} - -// MoveJobFromTable2Queue move existing DDLs in table to queue. -func (d *ddl) MoveJobFromTable2Queue() error { - sess, err := d.sessPool.get() - if err != nil { - return err - } - defer d.sessPool.put(sess) - return runInTxn(newSession(sess), func(se *session) error { - txn, err := se.txn() - if err != nil { - return errors.Trace(err) - } - t := meta.NewMeta(txn) - isConcurrentDDL, err := t.IsConcurrentDDL() - if !isConcurrentDDL || err != nil { - return errors.Trace(err) - } - jobs, err := getJobsBySQL(se, "tidb_ddl_job", "1 order by job_id") - if err != nil { - return errors.Trace(err) - } - - for _, job := range jobs { - jobListKey := meta.DefaultJobListKey - if job.MayNeedReorg() { - jobListKey = meta.AddIndexJobListKey - } - if err := t.EnQueueDDLJobNoUpdate(job, jobListKey); err != nil { - return errors.Trace(err) - } - } - - reorgHandle, err := se.execute(context.Background(), "select job_id, start_key, end_key, physical_id, ele_id, ele_type from mysql.tidb_ddl_reorg", "get_handle") - if err != nil { - return errors.Trace(err) - } - for _, row := range reorgHandle { - if err := t.UpdateDDLReorgHandle(row.GetInt64(0), row.GetBytes(1), row.GetBytes(2), row.GetInt64(3), &meta.Element{ID: row.GetInt64(4), TypeKey: row.GetBytes(5)}); err != nil { - return errors.Trace(err) - } - } - - // clean up these 2 tables. - _, err = se.execute(context.Background(), "delete from mysql.tidb_ddl_job", "delete_old_ddl") - if err != nil { - return errors.Trace(err) - } - _, err = se.execute(context.Background(), "delete from mysql.tidb_ddl_reorg", "delete_old_reorg") - if err != nil { - return errors.Trace(err) - } - return t.SetConcurrentDDL(false) - }) -} - -func runInTxn(se *session, f func(*session) error) (err error) { - err = se.begin() - if err != nil { - return err - } - err = f(se) - if err != nil { - se.rollback() - return - } - return errors.Trace(se.commit()) -} diff --git a/ddl/job_table_test.go b/ddl/job_table_test.go index ca30cf903107d..8948796e73243 100644 --- a/ddl/job_table_test.go +++ b/ddl/job_table_test.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" @@ -40,9 +39,6 @@ import ( // This test checks the chosen job records to see if there are wrong scheduling, if job A and job B cannot run concurrently, // then the all the record of job A must before or after job B, no cross record between these 2 jobs should be in between. func TestDDLScheduling(t *testing.T) { - if !variable.EnableConcurrentDDL.Load() { - t.Skipf("test requires concurrent ddl") - } store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) diff --git a/ddl/main_test.go b/ddl/main_test.go index 3418d16a23ece..6a8642ae34380 100644 --- a/ddl/main_test.go +++ b/ddl/main_test.go @@ -64,6 +64,8 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), + goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), } goleak.VerifyTestMain(m, opts...) diff --git a/ddl/metadatalocktest/mdl_test.go b/ddl/metadatalocktest/mdl_test.go index 64bdf77d55707..fd307968cad73 100644 --- a/ddl/metadatalocktest/mdl_test.go +++ b/ddl/metadatalocktest/mdl_test.go @@ -257,6 +257,47 @@ func TestMDLBasicBatchPointGet(t *testing.T) { require.Less(t, ts1, ts2) } +func TestMDLAddForeignKey(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + sv := server.CreateMockServer(t, store) + + sv.SetDomain(dom) + dom.InfoSyncer().SetSessionManager(sv) + defer sv.Close() + + conn1 := server.CreateMockConn(t, sv) + tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) + conn2 := server.CreateMockConn(t, sv) + tkDDL := testkit.NewTestKitWithSession(t, store, conn2.Context().Session) + tk.MustExec("use test") + tk.MustExec("set global tidb_enable_metadata_lock=1") + tk.MustExec("create table t1(id int key);") + tk.MustExec("create table t2(id int key);") + + tk.MustExec("begin") + tk.MustExec("insert into t2 values(1);") + + var wg sync.WaitGroup + var ddlErr error + wg.Add(1) + var ts2 time.Time + go func() { + defer wg.Done() + ddlErr = tkDDL.ExecToErr("alter table test.t2 add foreign key (id) references t1(id)") + ts2 = time.Now() + }() + + time.Sleep(2 * time.Second) + + ts1 := time.Now() + tk.MustExec("commit") + + wg.Wait() + require.Error(t, ddlErr) + require.Equal(t, "[ddl:1452]Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `fk_1` FOREIGN KEY (`id`) REFERENCES `t1` (`id`))", ddlErr.Error()) + require.Less(t, ts1, ts2) +} + func TestMDLRRUpdateSchema(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) sv := server.CreateMockServer(t, store) diff --git a/ddl/multi_schema_change_test.go b/ddl/multi_schema_change_test.go index e211e9d51ca77..9f84ca6aeeed8 100644 --- a/ddl/multi_schema_change_test.go +++ b/ddl/multi_schema_change_test.go @@ -1263,7 +1263,7 @@ func (c *cancelOnceHook) OnJobUpdated(job *model.Job) { return } c.triggered = true - errs, err := ddl.CancelJobs(c.s, c.store, []int64{job.ID}) + errs, err := ddl.CancelJobs(c.s, []int64{job.ID}) if errs[0] != nil { c.cancelErr = errs[0] return diff --git a/ddl/partition.go b/ddl/partition.go index 982fc216788c3..9e55693da4feb 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -1761,7 +1761,7 @@ func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) ( elements = append(elements, &meta.Element{ID: idxInfo.ID, TypeKey: meta.IndexElementKey}) } } - rh := newReorgHandler(t, w.sess, w.concurrentDDL) + rh := newReorgHandler(t, w.sess) reorgInfo, err := getReorgInfoFromPartitions(d.jobContext(job), d, rh, job, dbInfo, pt, physicalTableIDs, elements) if err != nil || reorgInfo.first { @@ -2161,7 +2161,7 @@ func (w *worker) onExchangeTablePartition(d *ddlCtx, t *meta.Meta, job *model.Jo func doPartitionReorgWork(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table, physTblIDs []int64) (done bool, ver int64, err error) { job.ReorgMeta.ReorgTp = model.ReorgTypeTxn - rh := newReorgHandler(t, w.sess, w.concurrentDDL) + rh := newReorgHandler(t, w.sess) elements := BuildElements(tbl.Meta().Columns[0], tbl.Meta().Indices) partTbl, ok := tbl.(table.PartitionedTable) if !ok { diff --git a/ddl/placement/common.go b/ddl/placement/common.go index cd02622dd0562..7c77ead97e30e 100644 --- a/ddl/placement/common.go +++ b/ddl/placement/common.go @@ -54,4 +54,8 @@ const ( // EngineLabelTiKV is the label value used in some tests. And possibly TiKV will // set the engine label with a value of EngineLabelTiKV. EngineLabelTiKV = "tikv" + + // EngineLabelTiFlashCompute is for disaggregated tiflash mode, + // it's the lable of tiflash_compute nodes. + EngineLabelTiFlashCompute = "tiflash_compute" ) diff --git a/ddl/reorg.go b/ddl/reorg.go index 85cd00e47ae11..6f050a9b3fecd 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" @@ -276,8 +275,7 @@ func (w *worker) runReorgJob(rh *reorgHandler, reorgInfo *reorgInfo, tblInfo *mo // Update a reorgInfo's handle. // Since daemon-worker is triggered by timer to store the info half-way. // you should keep these infos is read-only (like job) / atomic (like doneKey & element) / concurrent safe. - err := rh.UpdateDDLReorgStartHandle(job, currentElement, doneKey) - + err := updateDDLReorgStartHandle(rh.s, job, currentElement, doneKey) logutil.BgLogger().Info("[ddl] run reorg job wait timeout", zap.Duration("wait time", waitTimeout), zap.ByteString("element type", currentElement.TypeKey), @@ -677,7 +675,7 @@ func getReorgInfo(ctx *JobContext, d *ddlCtx, rh *reorgHandler, job *model.Job, // We'll try to remove it in the next major TiDB version. if meta.ErrDDLReorgElementNotExist.Equal(err) { job.SnapshotVer = 0 - logutil.BgLogger().Warn("[ddl] get reorg info, the element does not exist", zap.String("job", job.String()), zap.Bool("enableConcurrentDDL", rh.enableConcurrentDDL)) + logutil.BgLogger().Warn("[ddl] get reorg info, the element does not exist", zap.String("job", job.String())) } return &info, errors.Trace(err) } @@ -777,8 +775,8 @@ func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key, pool *sessionPool) (err err sess.rollback() return err } - rh := newReorgHandler(meta.NewMeta(txn), sess, variable.EnableConcurrentDDL.Load()) - err = rh.UpdateDDLReorgHandle(r.Job, startKey, r.EndKey, r.PhysicalTableID, r.currElement) + rh := newReorgHandler(meta.NewMeta(txn), sess) + err = updateDDLReorgHandle(rh.s, r.Job.ID, startKey, r.EndKey, r.PhysicalTableID, r.currElement) err1 := sess.commit() if err == nil { err = err1 @@ -790,63 +788,33 @@ func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key, pool *sessionPool) (err err type reorgHandler struct { m *meta.Meta s *session - - enableConcurrentDDL bool } // NewReorgHandlerForTest creates a new reorgHandler, only used in test. func NewReorgHandlerForTest(t *meta.Meta, sess sessionctx.Context) *reorgHandler { - return newReorgHandler(t, newSession(sess), variable.EnableConcurrentDDL.Load()) + return newReorgHandler(t, newSession(sess)) } -func newReorgHandler(t *meta.Meta, sess *session, enableConcurrentDDL bool) *reorgHandler { - return &reorgHandler{m: t, s: sess, enableConcurrentDDL: enableConcurrentDDL} -} - -// UpdateDDLReorgStartHandle saves the job reorganization latest processed element and start handle for later resuming. -func (r *reorgHandler) UpdateDDLReorgStartHandle(job *model.Job, element *meta.Element, startKey kv.Key) error { - if r.enableConcurrentDDL { - return updateDDLReorgStartHandle(r.s, job, element, startKey) - } - return r.m.UpdateDDLReorgStartHandle(job, element, startKey) -} - -// UpdateDDLReorgHandle saves the job reorganization latest processed information for later resuming. -func (r *reorgHandler) UpdateDDLReorgHandle(job *model.Job, startKey, endKey kv.Key, physicalTableID int64, element *meta.Element) error { - if r.enableConcurrentDDL { - return updateDDLReorgHandle(r.s, job.ID, startKey, endKey, physicalTableID, element) - } - return r.m.UpdateDDLReorgHandle(job.ID, startKey, endKey, physicalTableID, element) +func newReorgHandler(t *meta.Meta, sess *session) *reorgHandler { + return &reorgHandler{m: t, s: sess} } // InitDDLReorgHandle initializes the job reorganization information. func (r *reorgHandler) InitDDLReorgHandle(job *model.Job, startKey, endKey kv.Key, physicalTableID int64, element *meta.Element) error { - if r.enableConcurrentDDL { - return initDDLReorgHandle(r.s, job.ID, startKey, endKey, physicalTableID, element) - } - return r.m.UpdateDDLReorgHandle(job.ID, startKey, endKey, physicalTableID, element) + return initDDLReorgHandle(r.s, job.ID, startKey, endKey, physicalTableID, element) } // RemoveReorgElementFailPoint removes the element of the reorganization information. func (r *reorgHandler) RemoveReorgElementFailPoint(job *model.Job) error { - if r.enableConcurrentDDL { - return removeReorgElement(r.s, job) - } - return r.m.RemoveReorgElement(job) + return removeReorgElement(r.s, job) } // RemoveDDLReorgHandle removes the job reorganization related handles. func (r *reorgHandler) RemoveDDLReorgHandle(job *model.Job, elements []*meta.Element) error { - if r.enableConcurrentDDL { - return removeDDLReorgHandle(r.s, job, elements) - } - return r.m.RemoveDDLReorgHandle(job, elements) + return removeDDLReorgHandle(r.s, job, elements) } // GetDDLReorgHandle gets the latest processed DDL reorganize position. func (r *reorgHandler) GetDDLReorgHandle(job *model.Job) (element *meta.Element, startKey, endKey kv.Key, physicalTableID int64, err error) { - if r.enableConcurrentDDL { - return getDDLReorgHandle(r.s, job) - } - return r.m.GetDDLReorgHandle(job) + return getDDLReorgHandle(r.s, job) } diff --git a/ddl/schema_test.go b/ddl/schema_test.go index 70206ed2f179f..3be4fb4e4d278 100644 --- a/ddl/schema_test.go +++ b/ddl/schema_test.go @@ -163,13 +163,13 @@ func testDropSchema(t *testing.T, ctx sessionctx.Context, d ddl.DDL, dbInfo *mod return job, ver } -func isDDLJobDone(test *testing.T, t *meta.Meta) bool { - job, err := t.GetDDLJobByIdx(0) - require.NoError(test, err) - if job == nil { +func isDDLJobDone(test *testing.T, t *meta.Meta, store kv.Storage) bool { + tk := testkit.NewTestKit(test, store) + rows := tk.MustQuery("select * from mysql.tidb_ddl_job").Rows() + + if len(rows) == 0 { return true } - time.Sleep(testLease) return false } @@ -185,7 +185,7 @@ func testCheckSchemaState(test *testing.T, store kv.Storage, dbInfo *model.DBInf require.NoError(test, err) if state == model.StateNone { - isDropped = isDDLJobDone(test, t) + isDropped = isDDLJobDone(test, t, store) if !isDropped { return nil } diff --git a/ddl/schematracker/checker.go b/ddl/schematracker/checker.go index b1533d0246fb1..6e38f6c8bdb79 100644 --- a/ddl/schematracker/checker.go +++ b/ddl/schematracker/checker.go @@ -541,16 +541,6 @@ func (d Checker) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { return d.realDDL.DoDDLJob(ctx, job) } -// MoveJobFromQueue2Table implements the DDL interface. -func (d Checker) MoveJobFromQueue2Table(bool) error { - panic("implement me") -} - -// MoveJobFromTable2Queue implements the DDL interface. -func (d Checker) MoveJobFromTable2Queue() error { - panic("implement me") -} - // StorageDDLInjector wraps kv.Storage to inject checker to domain's DDL in bootstrap time. type StorageDDLInjector struct { kv.Storage diff --git a/ddl/schematracker/dm_tracker.go b/ddl/schematracker/dm_tracker.go index 75f8fa35b429d..5d3f693deaa0f 100644 --- a/ddl/schematracker/dm_tracker.go +++ b/ddl/schematracker/dm_tracker.go @@ -1256,13 +1256,3 @@ func (SchemaTracker) GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infosc func (SchemaTracker) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { return nil } - -// MoveJobFromQueue2Table implements the DDL interface, it's no-op in DM's case. -func (SchemaTracker) MoveJobFromQueue2Table(b bool) error { - panic("implement me") -} - -// MoveJobFromTable2Queue implements the DDL interface, it's no-op in DM's case. -func (SchemaTracker) MoveJobFromTable2Queue() error { - panic("implement me") -} diff --git a/ddl/stat_test.go b/ddl/stat_test.go index 556b9eb5dadc7..db8abc45be30c 100644 --- a/ddl/stat_test.go +++ b/ddl/stat_test.go @@ -25,14 +25,12 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" @@ -152,20 +150,13 @@ func TestGetDDLInfo(t *testing.T) { } func addDDLJobs(sess session.Session, txn kv.Transaction, job *model.Job) error { - if variable.EnableConcurrentDDL.Load() { - b, err := job.Encode(true) - if err != nil { - return err - } - _, err = sess.Execute(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), fmt.Sprintf("insert into mysql.tidb_ddl_job(job_id, reorg, schema_ids, table_ids, job_meta, type, processing) values (%d, %t, %s, %s, %s, %d, %t)", - job.ID, job.MayNeedReorg(), strconv.Quote(strconv.FormatInt(job.SchemaID, 10)), strconv.Quote(strconv.FormatInt(job.TableID, 10)), wrapKey2String(b), job.Type, false)) + b, err := job.Encode(true) + if err != nil { return err } - m := meta.NewMeta(txn) - if job.MayNeedReorg() { - return m.EnQueueDDLJob(job, meta.AddIndexJobListKey) - } - return m.EnQueueDDLJob(job) + _, err = sess.Execute(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), fmt.Sprintf("insert into mysql.tidb_ddl_job(job_id, reorg, schema_ids, table_ids, job_meta, type, processing) values (%d, %t, %s, %s, %s, %d, %t)", + job.ID, job.MayNeedReorg(), strconv.Quote(strconv.FormatInt(job.SchemaID, 10)), strconv.Quote(strconv.FormatInt(job.TableID, 10)), wrapKey2String(b), job.Type, false)) + return err } func wrapKey2String(key []byte) string { diff --git a/ddl/tiflashtest/BUILD.bazel b/ddl/tiflashtest/BUILD.bazel index 8854778def892..2a803cf03c5af 100644 --- a/ddl/tiflashtest/BUILD.bazel +++ b/ddl/tiflashtest/BUILD.bazel @@ -28,6 +28,7 @@ go_test( "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//testutils", "@org_uber_go_goleak//:goleak", "@org_uber_go_zap//:zap", diff --git a/ddl/tiflashtest/ddl_tiflash_test.go b/ddl/tiflashtest/ddl_tiflash_test.go index 8922f7211f8b7..d1d0368138b18 100644 --- a/ddl/tiflashtest/ddl_tiflash_test.go +++ b/ddl/tiflashtest/ddl_tiflash_test.go @@ -45,6 +45,7 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/logutil" "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/testutils" "go.uber.org/zap" ) @@ -438,6 +439,44 @@ func TestTiFlashDropPartition(t *testing.T) { CheckTableAvailableWithTableName(s.dom, t, 1, []string{}, "test", "ddltiflash") } +func TestTiFlashFlashbackCluster(t *testing.T) { + s, teardown := createTiFlashContext(t) + defer teardown() + tk := testkit.NewTestKit(t, s.store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + tk.MustExec("insert into t values (1), (2), (3)") + + ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) + require.NoError(t, err) + + tk.MustExec("alter table t set tiflash replica 1") + time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable) + CheckTableAvailableWithTableName(s.dom, t, 1, []string{}, "test", "t") + + injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(10 * time.Second)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockFlashbackTest", `return(true)`)) + require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + + ChangeGCSafePoint(tk, time.Now().Add(-10*time.Second), "true", "10m0s") + defer func() { + ChangeGCSafePoint(tk, time.Now(), "true", "10m0s") + }() + + errorMsg := fmt.Sprintf("[ddl:-1]Detected unsupported DDL job type(%s) during [%s, now), can't do flashback", + model.ActionSetTiFlashReplica.String(), oracle.GetTimeFromTS(ts).String()) + tk.MustGetErrMsg(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)), errorMsg) + + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockFlashbackTest")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) + require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) +} + func CheckTableAvailableWithTableName(dom *domain.Domain, t *testing.T, count uint64, labels []string, db string, table string) { tb, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table)) require.NoError(t, err) diff --git a/distsql/distsql.go b/distsql/distsql.go index 1f18d084eb0b2..3c65205f3d331 100644 --- a/distsql/distsql.go +++ b/distsql/distsql.go @@ -38,11 +38,11 @@ import ( ) // DispatchMPPTasks dispatches all tasks and returns an iterator. -func DispatchMPPTasks(ctx context.Context, sctx sessionctx.Context, tasks []*kv.MPPDispatchRequest, fieldTypes []*types.FieldType, planIDs []int, rootID int, startTs uint64) (SelectResult, error) { +func DispatchMPPTasks(ctx context.Context, sctx sessionctx.Context, tasks []*kv.MPPDispatchRequest, fieldTypes []*types.FieldType, planIDs []int, rootID int, startTs uint64, mppQueryID kv.MPPQueryID) (SelectResult, error) { ctx = WithSQLKvExecCounterInterceptor(ctx, sctx.GetSessionVars().StmtCtx) _, allowTiFlashFallback := sctx.GetSessionVars().AllowFallbackToTiKV[kv.TiFlash] ctx = SetTiFlashMaxThreadsInContext(ctx, sctx) - resp := sctx.GetMPPClient().DispatchMPPTasks(ctx, sctx.GetSessionVars().KVVars, tasks, allowTiFlashFallback, startTs) + resp := sctx.GetMPPClient().DispatchMPPTasks(ctx, sctx.GetSessionVars().KVVars, tasks, allowTiFlashFallback, startTs, mppQueryID) if resp == nil { return nil, errors.New("client returns nil response") } diff --git a/docs/design/2022-09-29-reorganize-partition.md b/docs/design/2022-09-29-reorganize-partition.md new file mode 100644 index 0000000000000..56e380826efa7 --- /dev/null +++ b/docs/design/2022-09-29-reorganize-partition.md @@ -0,0 +1,180 @@ +# TiDB Design Documents + +- Author(s): [Mattias Jonsson](http://github.com/mjonss) +- Discussion PR: https://github.com/pingcap/tidb/issues/38535 +- Tracking Issue: https://github.com/pingcap/tidb/issues/15000 + +## Table of Contents + +* [Introduction](#introduction) +* [Motivation or Background](#motivation-or-background) +* [Detailed Design](#detailed-design) + * [Schema change states for REORGANIZE PARTITION](#schema-change-states-for-reorganize-partition) + * [Error Handling](#error-handling) + * [Notes](#notes) +* [Test Design](#test-design) + * [Benchmark Tests](#benchmark-tests) +* [Impacts & Risks](#impacts--risks) + +## Introduction + +Support ALTER TABLE t REORGANIZE PARTITION p1,p2 INTO (partition pNew1 values...) + +## Motivation or Background + +TiDB is currently lacking the support of changing the partitions of a partitioned table, it only supports adding and dropping LIST/RANGE partitions. +Supporting REORGANIZE PARTITIONs will allow RANGE partitioned tables to have a MAXVALUE partition to catch all values and split it into new ranges. Similar with LIST partitions where one can split or merge different partitions. + +When this is implemented, it will also allow future PRs transforming a non-partitioned table into a partitioned table as well as remove partitioning and make a partitioned table a normal non-partitioned table, as well as COALESCE PARTITION and ADD PARTITION for HASH partitioned tables, which is different ALTER statements but can use the same implementation as REORGANIZE PARTITION + +The operation should be online, and must handle multiple partitions as well as large data sets. + +Possible usage scenarios: +- Full table copy + - merging all partitions to a single table (ALTER TABLE t REMOVE PARTITIONING) + - splitting data from many to many partitions, like change the number of HASH partitions + - splitting a table to many partitions (ALTER TABLE t PARTITION BY ...) +- Partial table copy (not full table/all partitions) + - split one or more partitions + - merge two or more partitions + +These different use cases can have different optimizations, but the generic form must still be solved: +- N partitions, where each partition has M indexes + +First implementation should be based on the merge-txn (row-by-row batch read, update record key with new Physical Table ID, write) transactional batches and then create the indexes in batches index by index, partition by partition. +Later we can implement the ingest (lightning way) optimization, since DDL module are on the way of evolution to do reorg tasks more efficiency. + +## Detailed Design + +There are two parts of the design: +- Schema change states throughout the operation +- Reorganization implementation, which will be handled in the StateWriteReorganization state. + +Where the schema change states will clarify which different steps that will be done in which schema state transitions. + +### Schema change states for REORGANIZE PARTITION + +Since this operation will: +- create new partitions +- copy data from dropped partitions to new partitions and create their indexes +- change the partition definitions +- drop existing partitions + +It will use all these schema change stages: + + // StateNone means this schema element is absent and can't be used. + StateNone SchemaState = iota + - Check if the table structure after the ALTER is valid + - Use the generate physical table ids to each new partition (that was generated already by the client sending the ALTER command). + - Update the meta data with the new partitions (AddingDefinitions) and which partitions to be dropped (DroppingDefinitions), so that new transactions can double write. + - Set placement rules + - Set TiFlash Replicas + - Set legacy Bundles (non-sql placement) + - Set the state to StateDeleteOnly + + // StateDeleteOnly means we can only delete items for this schema element (the new partition). + StateDeleteOnly + - Set the state to StateWriteOnly + + // StateWriteOnly means we can use any write operation on this schema element, + // but outer can't read the changed data. + StateWriteOnly + - Set the state to StateWriteReorganization + + // StateWriteReorganization means we are re-organizing whole data after write only state. + StateWriteReorganization + - Copy the data from the partitions to be dropped (one at a time) and insert it into the new partitions. This needs a new backfillWorker implementation. + - Recreate the indexes one by one for the new partitions (one partition at a time) (create an element for each index and reuse the addIndexWorker). (Note: this can be optimized in the futute, either with the new fast add index implementation, based on lightning. Or by either writing the index entries at the same time as the records, in the previous step, or if the partitioning columns are included in the index or handle) + - Replace the old partitions with the new partitions in the metadata when the data copying is done + - Set the state to StateDeleteReorganization + + // StateDeleteReorganization means we are re-organizing whole data after delete only state. + StateDeleteReorganization - we are using this state in a slightly different way than the comment above says. + This state is needed since we cannot directly move from StateWriteReorganization to StatePublic. + Imagine that the StateWriteReorganization is complete and we are updating the schema version, then if a transaction seeing the new schema version is writing to the new partitions, then those changes needs to be written to the old partitions as well, so new transactions in other nodes using the older schema version can still see the changes. + - Remove the notion of new partitions (AddingDefinitions) and which partitions to be dropped (DroppingDefinitions) and double writing will stop when it goes to StatePublic. + - Register the range delete of the old partition data (in finishJob / deleteRange). + - Set the state to StatePublic + + // StatePublic means this schema element is ok for all write and read operations. + StatePublic + - Table structure is now complete and the table is ready to use with its new partitioning scheme + - Note that there is a background job for the GCWorker to do in its deleteRange function. + +During the reorganization happens in the background the normal write path needs to check if there are any new partitions in the metadata and also check if the updated/deleted/inserted row would match a new partition, and if so, also do the same operation in the new partition, just like during adding index or modify column operations currently does. (To be implemented in `(*partitionedTable) AddRecord/UpdateRecord/RemoveRecord`) + +Example of why an extra state between StateWriteReorganize and StatePublic is needed: + +```sql +-- table: +CREATE TABLE t (a int) PARTITION BY LIST (a) (PARTITION p0 VALUES IN (1,2,3,4,5), PARTITION p1 VALUES IN (6,7,8,9,10)); +-- during alter operation: +ALTER TABLE t REORGANIZE PARTITION p0 INTO (PARTITION p0a VALUES IN (1,2,3), PARTITION p0b VALUES IN (4,5)); +``` + +Partition within parentheses `(p0a [1] p0b [0])` is hidden or to be deleted by GC/DeleteRange. Values in the brackets after the partition `p0a [2]`. + +If we go directly from StateWriteReorganize to StatePublic, then clients one schema version behind will not see changes to the new partitions: + +| Data (TiKV/Unistore) | TiDB client 1 | TiDB client 2 | +| --------------------------------------- | ------------------------------------ | ------------------------------------------------------------ | +| p0 [] p1 [] StateWriteReorganize | | | +| p0 [] p1 [] (p0a [] p0b []) | | | +| (p0 []) p1 [] p0a [] p0b [] StatePublic | | | +| (p0 []) p1 [] p0a [2] p0b [] | StatePublic INSERT INTO T VALUES (2) | | +| (p0 []) p1 [] p0a [2] p0b [] | | StateWriteReorganize SELECT * FROM t => [] (only sees p0,p1) | + + +But if we add a state between StateWriteReorganize and StatePublic and double write to the old partitions during that state it works: + + +| Data (TiKV/Unistore) | TiDB client 1 | TiDB client 2 | +| ------------------------------------------------- | ---------------------------------------------- | -------------------------------------------------------------------- | +| p0 [] p1 [] (p0a [] p0b []) StateWriteReorganize | | | +| (p0 []) p1 [] p0a [] p0b [] StateDeleteReorganize | | | +| (p0 [2]) p1 [] p0a [2] p0b [] | StateDeleteReorganize INSERT INTO T VALUES (2) | | +| (p0 [2]) p1 [] p0a [2] p0b [] | | StateWriteReorganize SELECT * FROM t => [2] (only sees p0,p1) | +| (p0 [2]) p1 [] p0a [2] p0b [] StatePublic | | | +| (p0 [2]) p1 [] p0a [2] p0b [4] | StatePublic INSERT INTO T VALUES (4) | | +| (p0 [2]) p1 [] p0a [2] p0b [4] | | StateDeleteReorganize SELECT * FROM t => [2,4] (sees p0a,p0b,p1) | + + +### Error handling + +If any non-retryable error occurs, we will call onDropTablePartition and adjust the logic in that function to also handle the roll back of reorganize partition, in a similar way as it does with model.ActionAddTablePartition. + +### Notes + +Note that parser support already exists. +There should be no issues with upgrading, and downgrade will not be supported during the DDL. + +Notes: +- statistics should be removed from the old partitions. +- statistics will not be generated for the new partitions (future optimization possible, to get statistics during the data copying?) +- the global statistics (table level) will remain the same, since the data has not changed. +- this DDL will be online, while MySQL is blocking on MDL. + +## Test Design + +Re-use tests from other DDLs like Modify column, but adjust them for Reorganize partition. +A separate test plan will be created and a test report will be written and signed off when the tests are completed. + +### Benchmark Tests + +Correctness and functionality is higher priority than performance. + +## Impacts & Risks + +Impacts: +- better usability of partitioned tables +- online alter in TiDB, where MySQL is blocking +- all affected data needs to be read (CPU/IO/Network load on TiDB/PD/TiKV), even multiple times in case of indexes. +- all data needs to be writted (duplicated, both row-data and indexes), including transaction logs (more disk space on TiKV, CPU/IO/Network load on TiDB/PD/TiKV and TiFlash if configured on the table). + +Risks: +- introduction of bugs + - in the DDL code + - in the write path (double writing the changes for transactions running during the DDL) +- out of disk space +- out of memory +- general resource usage, resulting in lower performance of the cluster diff --git a/domain/domain.go b/domain/domain.go index 08f49ed018799..5827aa8d528d7 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -188,6 +188,7 @@ func (do *Domain) loadInfoSchema(startTS uint64) (infoschema.InfoSchema, bool, i // 1. Not first time bootstrap loading, which needs a full load. // 2. It is newer than the current one, so it will be "the current one" after this function call. // 3. There are less 100 diffs. + // 4. No regenrated schema diff. startTime := time.Now() if currentSchemaVersion != 0 && neededSchemaVersion > currentSchemaVersion && neededSchemaVersion-currentSchemaVersion < 100 { is, relatedChanges, err := do.tryLoadSchemaDiffs(m, currentSchemaVersion, neededSchemaVersion) @@ -347,6 +348,9 @@ func (do *Domain) tryLoadSchemaDiffs(m *meta.Meta, usedVersion, newVersion int64 if err != nil { return nil, nil, err } + if diff.RegenerateSchemaMap { + return nil, nil, errors.Errorf("Meets a schema diff with RegenerateSchemaMap flag") + } if canSkipSchemaCheckerDDL(diff.Type) { continue } @@ -1062,10 +1066,6 @@ func (do *Domain) Init( return err } - do.wg.Run(func() { - do.runTTLJobManager(ctx) - }) - return nil } @@ -2457,18 +2457,21 @@ func (do *Domain) serverIDKeeper() { } } -func (do *Domain) runTTLJobManager(ctx context.Context) { - ttlJobManager := ttlworker.NewJobManager(do.ddl.GetID(), do.sysSessionPool, do.store) - ttlJobManager.Start() - do.ttlJobManager = ttlJobManager +// StartTTLJobManager creates and starts the ttl job manager +func (do *Domain) StartTTLJobManager() { + do.wg.Run(func() { + ttlJobManager := ttlworker.NewJobManager(do.ddl.GetID(), do.sysSessionPool, do.store) + do.ttlJobManager = ttlJobManager + ttlJobManager.Start() - <-do.exit + <-do.exit - ttlJobManager.Stop() - err := ttlJobManager.WaitStopped(ctx, 30*time.Second) - if err != nil { - logutil.BgLogger().Warn("fail to wait until the ttl job manager stop", zap.Error(err)) - } + ttlJobManager.Stop() + err := ttlJobManager.WaitStopped(context.Background(), 30*time.Second) + if err != nil { + logutil.BgLogger().Warn("fail to wait until the ttl job manager stop", zap.Error(err)) + } + }) } // TTLJobManager returns the ttl job manager on this domain diff --git a/domain/plan_replayer.go b/domain/plan_replayer.go index c0f3231223c74..fc54d30759057 100644 --- a/domain/plan_replayer.go +++ b/domain/plan_replayer.go @@ -174,7 +174,7 @@ type planReplayerHandle struct { } // SendTask send dumpTask in background task handler -func (h *planReplayerHandle) SendTask(task *PlanReplayerDumpTask) { +func (h *planReplayerHandle) SendTask(task *PlanReplayerDumpTask) bool { select { case h.planReplayerTaskDumpHandle.taskCH <- task: // we directly remove the task key if we put task in channel successfully, if the task was failed to dump, @@ -182,11 +182,13 @@ func (h *planReplayerHandle) SendTask(task *PlanReplayerDumpTask) { if !task.IsContinuesCapture { h.planReplayerTaskCollectorHandle.removeTask(task.PlanReplayerTaskKey) } + return true default: // TODO: add metrics here // directly discard the task if the task channel is full in order not to block the query process - logutil.BgLogger().Info("discard one plan replayer dump task", - zap.String("sql digest", task.SQLDigest), zap.String("plan digest", task.PlanDigest)) + logutil.BgLogger().Warn("discard one plan replayer dump task", + zap.String("sql-digest", task.SQLDigest), zap.String("plan-digest", task.PlanDigest)) + return false } } @@ -209,9 +211,13 @@ func (h *planReplayerTaskCollectorHandle) CollectPlanReplayerTask() error { for _, key := range allKeys { unhandled, err := checkUnHandledReplayerTask(h.ctx, h.sctx, key) if err != nil { + logutil.BgLogger().Warn("[plan-replayer-task] collect plan replayer task failed", zap.Error(err)) return err } if unhandled { + logutil.BgLogger().Debug("[plan-replayer-task] collect plan replayer task success", + zap.String("sql-digest", key.SQLDigest), + zap.String("plan-digest", key.PlanDigest)) tasks = append(tasks, key) } } @@ -351,16 +357,36 @@ type planReplayerTaskDumpWorker struct { func (w *planReplayerTaskDumpWorker) run() { for task := range w.taskCH { + w.handleTask(task) + } +} + +func (w *planReplayerTaskDumpWorker) handleTask(task *PlanReplayerDumpTask) { + sqlDigest := task.SQLDigest + planDigest := task.PlanDigest + check := true + occupy := true + handleTask := true + defer func() { + logutil.BgLogger().Debug("[plan-replayer-capture] handle task", + zap.String("sql-digest", sqlDigest), + zap.String("plan-digest", planDigest), + zap.Bool("check", check), + zap.Bool("occupy", occupy), + zap.Bool("handle", handleTask)) + }() + if task.IsContinuesCapture { if w.status.checkTaskKeyFinishedBefore(task) { - continue + check = false + return } - successOccupy := w.status.occupyRunningTaskKey(task) - if !successOccupy { - continue - } - w.HandleTask(task) - w.status.releaseRunningTaskKey(task) } + occupy = w.status.occupyRunningTaskKey(task) + if !occupy { + return + } + handleTask = w.HandleTask(task) + w.status.releaseRunningTaskKey(task) } // HandleTask handled task @@ -373,7 +399,7 @@ func (w *planReplayerTaskDumpWorker) HandleTask(task *PlanReplayerDumpTask) (suc taskKey := task.PlanReplayerTaskKey unhandled, err := checkUnHandledReplayerTask(w.ctx, w.sctx, taskKey) if err != nil { - logutil.BgLogger().Warn("check plan replayer capture task failed", + logutil.BgLogger().Warn("[plan-replayer-capture] check task failed", zap.String("sqlDigest", taskKey.SQLDigest), zap.String("planDigest", taskKey.PlanDigest), zap.Error(err)) @@ -386,7 +412,7 @@ func (w *planReplayerTaskDumpWorker) HandleTask(task *PlanReplayerDumpTask) (suc file, fileName, err := replayer.GeneratePlanReplayerFile(task.IsContinuesCapture) if err != nil { - logutil.BgLogger().Warn("generate plan replayer capture task file failed", + logutil.BgLogger().Warn("[plan-replayer-capture] generate task file failed", zap.String("sqlDigest", taskKey.SQLDigest), zap.String("planDigest", taskKey.PlanDigest), zap.Error(err)) @@ -409,7 +435,7 @@ func (w *planReplayerTaskDumpWorker) HandleTask(task *PlanReplayerDumpTask) (suc } r, err := handle.GenJSONTableFromStats(schema.Name.String(), tbl.Meta(), stat.(*statistics.Table)) if err != nil { - logutil.BgLogger().Warn("generate plan replayer capture task json stats failed", + logutil.BgLogger().Warn("[plan-replayer-capture] generate task json stats failed", zap.String("sqlDigest", taskKey.SQLDigest), zap.String("planDigest", taskKey.PlanDigest), zap.Error(err)) @@ -421,7 +447,7 @@ func (w *planReplayerTaskDumpWorker) HandleTask(task *PlanReplayerDumpTask) (suc } err = DumpPlanReplayerInfo(w.ctx, w.sctx, task) if err != nil { - logutil.BgLogger().Warn("dump plan replayer capture task result failed", + logutil.BgLogger().Warn("[plan-replayer-capture] dump task result failed", zap.String("sqlDigest", taskKey.SQLDigest), zap.String("planDigest", taskKey.PlanDigest), zap.Error(err)) diff --git a/domain/plan_replayer_dump.go b/domain/plan_replayer_dump.go index 264631b6fbdb3..0dd4945873e58 100644 --- a/domain/plan_replayer_dump.go +++ b/domain/plan_replayer_dump.go @@ -66,6 +66,10 @@ const ( PlanReplayerTaskMetaIsCapture = "isCapture" // PlanReplayerTaskMetaIsContinues indicates whether this task is continues task PlanReplayerTaskMetaIsContinues = "isContinues" + // PlanReplayerTaskMetaSQLDigest indicates the sql digest of this task + PlanReplayerTaskMetaSQLDigest = "sqlDigest" + // PlanReplayerTaskMetaPlanDigest indicates the plan digest of this task + PlanReplayerTaskMetaPlanDigest = "planDigest" ) type tableNamePair struct { @@ -180,25 +184,53 @@ func DumpPlanReplayerInfo(ctx context.Context, sctx sessionctx.Context, execStmts := task.ExecStmts zw := zip.NewWriter(zf) var records []PlanReplayerStatusRecord + sqls := make([]string, 0) + for _, execStmt := range task.ExecStmts { + sqls = append(sqls, execStmt.Text()) + } + if task.IsCapture { + logutil.BgLogger().Info("[plan-replayer-dump] start to dump plan replayer result", + zap.String("sql-digest", task.SQLDigest), + zap.String("plan-digest", task.PlanDigest), + zap.Strings("sql", sqls), + zap.Bool("isContinues", task.IsContinuesCapture)) + } else { + logutil.BgLogger().Info("[plan-replayer-dump] start to dump plan replayer result", + zap.Strings("sqls", sqls)) + } defer func() { + errMsg := "" if err != nil { - logutil.BgLogger().Error("dump plan replayer failed", zap.Error(err)) + if task.IsCapture { + logutil.BgLogger().Info("[plan-replayer-dump] dump file failed", + zap.String("sql-digest", task.SQLDigest), + zap.String("plan-digest", task.PlanDigest), + zap.Strings("sql", sqls), + zap.Bool("isContinues", task.IsContinuesCapture)) + } else { + logutil.BgLogger().Info("[plan-replayer-dump] start to dump plan replayer result", + zap.Strings("sqls", sqls)) + } + errMsg = err.Error() } - err = zw.Close() - if err != nil { - logutil.BgLogger().Error("Closing zip writer failed", zap.Error(err), zap.String("filename", fileName)) + err1 := zw.Close() + if err1 != nil { + logutil.BgLogger().Error("[plan-replayer-dump] Closing zip writer failed", zap.Error(err), zap.String("filename", fileName)) + errMsg = errMsg + "," + err1.Error() } - err = zf.Close() - if err != nil { - logutil.BgLogger().Error("Closing zip file failed", zap.Error(err), zap.String("filename", fileName)) + err2 := zf.Close() + if err2 != nil { + logutil.BgLogger().Error("[plan-replayer-dump] Closing zip file failed", zap.Error(err), zap.String("filename", fileName)) + errMsg = errMsg + "," + err2.Error() + } + if len(errMsg) > 0 { for i, record := range records { - record.FailedReason = err.Error() + record.FailedReason = errMsg records[i] = record } } insertPlanReplayerStatus(ctx, sctx, records) }() - // Dump SQLMeta if err = dumpSQLMeta(zw, task); err != nil { return err @@ -299,6 +331,8 @@ func dumpSQLMeta(zw *zip.Writer, task *PlanReplayerDumpTask) error { varMap[PlanReplayerSQLMetaStartTS] = strconv.FormatUint(task.StartTS, 10) varMap[PlanReplayerTaskMetaIsCapture] = strconv.FormatBool(task.IsCapture) varMap[PlanReplayerTaskMetaIsContinues] = strconv.FormatBool(task.IsContinuesCapture) + varMap[PlanReplayerTaskMetaSQLDigest] = task.SQLDigest + varMap[PlanReplayerTaskMetaPlanDigest] = task.PlanDigest if err := toml.NewEncoder(cf).Encode(varMap); err != nil { return errors.AddStack(err) } diff --git a/domain/schema_validator.go b/domain/schema_validator.go index eb933adbc899c..4d7cf2e9b814a 100644 --- a/domain/schema_validator.go +++ b/domain/schema_validator.go @@ -185,7 +185,7 @@ func (s *schemaValidator) isRelatedTablesChanged(currVer int64, tableIDs []int64 affected := false for i, tblID := range item.relatedIDs { for _, relatedTblID := range tableIDs { - if tblID == relatedTblID { + if tblID == relatedTblID || relatedTblID == -1 { // if actionType >= 64, the value of left shift equals 0, and it will not impact amend txn changedTblMap[tblID] |= 1 << item.relatedActions[i] affected = true diff --git a/dumpling/tests/e2e_foreign_key/conf/diff_config.toml b/dumpling/tests/e2e_foreign_key/conf/diff_config.toml new file mode 100644 index 0000000000000..634c8416891eb --- /dev/null +++ b/dumpling/tests/e2e_foreign_key/conf/diff_config.toml @@ -0,0 +1,29 @@ +# diff Configuration. + +check-thread-count = 4 + +export-fix-sql = true + +check-struct-only = false + +[task] + output-dir = "./output" + + source-instances = ["mysql1"] + + target-instance = "tidb0" + + target-check-tables = ["e2e_foreign_key.parent", "e2e_foreign_key.child"] + +[data-sources] +[data-sources.mysql1] +host = "127.0.0.1" +port = 3306 +user = "root" +password = "" + +[data-sources.tidb0] +host = "127.0.0.1" +port = 4000 +user = "root" +password = "" diff --git a/dumpling/tests/e2e_foreign_key/conf/lightning.toml b/dumpling/tests/e2e_foreign_key/conf/lightning.toml new file mode 100644 index 0000000000000..e96fdaaf0daeb --- /dev/null +++ b/dumpling/tests/e2e_foreign_key/conf/lightning.toml @@ -0,0 +1,20 @@ +### tidb-lightning config + +[lightning] +server-mode = false +level = "error" +check-requirements = false + +[tikv-importer] +backend = "tidb" +on-duplicate = "error" + +[mydumper] +data-source-dir = "/tmp/dumpling_test_result/sql_res.e2e_foreign_key" + +[tidb] +host = "127.0.0.1" +port = 4000 +user = "root" +password = "" +status-port = 10080 diff --git a/dumpling/tests/e2e_foreign_key/data/e2e_foreign_key.sql b/dumpling/tests/e2e_foreign_key/data/e2e_foreign_key.sql new file mode 100644 index 0000000000000..4b8ff0b1df203 --- /dev/null +++ b/dumpling/tests/e2e_foreign_key/data/e2e_foreign_key.sql @@ -0,0 +1,8 @@ +create database e2e_foreign_key DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +use e2e_foreign_key; +create table parent (id int key); +create table child (id int key, pid int, constraint fk_1 foreign key (pid) references parent(id)); +insert into parent values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); +insert into child values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +set foreign_key_checks=0; +insert into child values (100,100); diff --git a/dumpling/tests/e2e_foreign_key/run.sh b/dumpling/tests/e2e_foreign_key/run.sh new file mode 100644 index 0000000000000..99285d627fdf7 --- /dev/null +++ b/dumpling/tests/e2e_foreign_key/run.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +set -eu +cur=$(cd `dirname $0`; pwd) + +DB_NAME="e2e_foreign_key" + +# drop database on tidb +export DUMPLING_TEST_PORT=4000 +run_sql "drop database if exists $DB_NAME;" + +# drop database on mysql +export DUMPLING_TEST_PORT=3306 +run_sql "drop database if exists $DB_NAME;" + +# build data on mysql +run_sql_file "$DUMPLING_BASE_NAME/data/e2e_foreign_key.sql" + +# dumping +export DUMPLING_TEST_DATABASE=$DB_NAME +run_dumpling + +cat "$cur/conf/lightning.toml" +# use lightning import data to tidb +run_lightning $cur/conf/lightning.toml + +# check mysql and tidb data +check_sync_diff $cur/conf/diff_config.toml diff --git a/errno/errcode.go b/errno/errcode.go index de4cebc55e144..bc9a73bbdc0c9 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -915,6 +915,7 @@ const ( ErrFunctionalIndexRowValueIsNotAllowed = 3800 ErrDependentByFunctionalIndex = 3837 ErrCannotConvertString = 3854 + ErrDependentByPartitionFunctional = 3855 ErrInvalidJSONValueForFuncIndex = 3903 ErrJSONValueOutOfRangeForFuncIndex = 3904 ErrFunctionalIndexDataIsTooLong = 3907 diff --git a/errno/errname.go b/errno/errname.go index 93b08fbe0e1a4..b7f02df1c13f5 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -908,6 +908,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrFKIncompatibleColumns: mysql.Message("Referencing column '%s' and referenced column '%s' in foreign key constraint '%s' are incompatible.", nil), ErrFunctionalIndexRowValueIsNotAllowed: mysql.Message("Expression of expression index '%s' cannot refer to a row value", nil), ErrDependentByFunctionalIndex: mysql.Message("Column '%s' has an expression index dependency and cannot be dropped or renamed", nil), + ErrDependentByPartitionFunctional: mysql.Message("Column '%s' has a partitioning function dependency and cannot be dropped or renamed", nil), ErrCannotConvertString: mysql.Message("Cannot convert string '%.64s' from %s to %s", nil), ErrInvalidJSONValueForFuncIndex: mysql.Message("Invalid JSON value for CAST for expression index '%s'", nil), ErrJSONValueOutOfRangeForFuncIndex: mysql.Message("Out of range JSON value for CAST for expression index '%s'", nil), diff --git a/errors.toml b/errors.toml index 7d5be840c0432..8d656b718ec05 100644 --- a/errors.toml +++ b/errors.toml @@ -1176,6 +1176,11 @@ error = ''' Column '%s' has an expression index dependency and cannot be dropped or renamed ''' +["ddl:3855"] +error = ''' +Column '%s' has a partitioning function dependency and cannot be dropped or renamed +''' + ["ddl:4135"] error = ''' Sequence '%-.64s.%-.64s' has run out @@ -1696,6 +1701,21 @@ error = ''' Invalid data type for JSON data in argument %d to function %s; a JSON string or JSON type is required. ''' +["expression:3752"] +error = ''' +Value is out of range for expression index '%s' at row %d +''' + +["expression:3903"] +error = ''' +Invalid JSON value for CAST for expression index '%s' +''' + +["expression:3907"] +error = ''' +Data too long for expression index '%s' +''' + ["expression:8128"] error = ''' Invalid TABLESAMPLE: %s @@ -2411,6 +2431,11 @@ error = ''' Changing schema from '%-.192s' to '%-.192s' is not allowed. ''' +["schema:1506"] +error = ''' +Foreign key clause is not yet supported in conjunction with partitioning +''' + ["schema:1822"] error = ''' Failed to add the foreign key constraint. Missing index for constraint '%s' in the referenced table '%s' diff --git a/executor/BUILD.bazel b/executor/BUILD.bazel index 46ae254888db6..e1ecaca57456f 100644 --- a/executor/BUILD.bazel +++ b/executor/BUILD.bazel @@ -249,7 +249,7 @@ go_library( go_test( name = "executor_test", - timeout = "moderate", + timeout = "short", srcs = [ "adapter_test.go", "admin_test.go", @@ -332,7 +332,6 @@ go_test( "utils_test.go", "window_test.go", "write_concurrent_test.go", - "write_test.go", ], data = glob(["testdata/**"]), embed = [":executor"], diff --git a/executor/adapter.go b/executor/adapter.go index 5e12cce1ccc69..c087a50e5f5f0 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "math" "runtime/trace" "strconv" "strings" @@ -295,8 +296,12 @@ func (a *ExecStmt) PointGet(ctx context.Context) (*recordSet, error) { } a.Ctx.GetSessionVars().StmtCtx.Priority = kv.PriorityHigh + var pointExecutor *PointGetExecutor + useMaxTS := startTs == math.MaxUint64 + // try to reuse point get executor - if a.PsStmt.Executor != nil { + // We should only use the cached the executor when the startTS is MaxUint64 + if a.PsStmt.Executor != nil && useMaxTS { exec, ok := a.PsStmt.Executor.(*PointGetExecutor) if !ok { logutil.Logger(ctx).Error("invalid executor type, not PointGetExecutor for point get path") @@ -306,17 +311,21 @@ func (a *ExecStmt) PointGet(ctx context.Context) (*recordSet, error) { pointGetPlan := a.PsStmt.PreparedAst.CachedPlan.(*plannercore.PointGetPlan) exec.Init(pointGetPlan) a.PsStmt.Executor = exec + pointExecutor = exec } } - if a.PsStmt.Executor == nil { + + if pointExecutor == nil { b := newExecutorBuilder(a.Ctx, a.InfoSchema, a.Ti) - newExecutor := b.build(a.Plan) + pointExecutor = b.build(a.Plan).(*PointGetExecutor) if b.err != nil { return nil, b.err } - a.PsStmt.Executor = newExecutor + + if useMaxTS { + a.PsStmt.Executor = pointExecutor + } } - pointExecutor := a.PsStmt.Executor.(*PointGetExecutor) if err = pointExecutor.Open(ctx); err != nil { terror.Call(pointExecutor.Close) @@ -1407,6 +1416,10 @@ func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults boo sessVars.DurationParse = 0 // Clean the stale read flag when statement execution finish sessVars.StmtCtx.IsStaleness = false + // Clean the MPP query info + sessVars.StmtCtx.MPPQueryInfo.QueryID.Store(0) + sessVars.StmtCtx.MPPQueryInfo.QueryTS.Store(0) + sessVars.StmtCtx.MPPQueryInfo.AllocatedMPPTaskID.Store(0) if sessVars.StmtCtx.ReadFromTableCache { metrics.ReadFromTableCacheCounter.Inc() diff --git a/executor/admin.go b/executor/admin.go index a0484ce957b30..21378b21b1677 100644 --- a/executor/admin.go +++ b/executor/admin.go @@ -163,7 +163,7 @@ func (e *CheckIndexRangeExec) constructIndexScanPB() *tipb.Executor { idxExec := &tipb.IndexScan{ TableId: e.table.ID, IndexId: e.index.ID, - Columns: util.ColumnsToProto(e.cols, e.table.PKIsHandle), + Columns: util.ColumnsToProto(e.cols, e.table.PKIsHandle, true), } return &tipb.Executor{Tp: tipb.ExecType_TypeIndexScan, IdxScan: idxExec} } @@ -814,7 +814,7 @@ func (e *CleanupIndexExec) constructIndexScanPB() *tipb.Executor { idxExec := &tipb.IndexScan{ TableId: e.physicalID, IndexId: e.index.Meta().ID, - Columns: util.ColumnsToProto(e.columns, e.table.Meta().PKIsHandle), + Columns: util.ColumnsToProto(e.columns, e.table.Meta().PKIsHandle, true), PrimaryColumnIds: tables.TryGetCommonPkColumnIds(e.table.Meta()), } return &tipb.Executor{Tp: tipb.ExecType_TypeIndexScan, IdxScan: idxExec} diff --git a/executor/aggfuncs/BUILD.bazel b/executor/aggfuncs/BUILD.bazel index 5c01950eef836..a1d4a57dde1f5 100644 --- a/executor/aggfuncs/BUILD.bazel +++ b/executor/aggfuncs/BUILD.bazel @@ -89,7 +89,7 @@ go_test( embed = [":aggfuncs"], flaky = True, race = "on", - shard_count = 10, + shard_count = 20, deps = [ "//expression", "//expression/aggregation", diff --git a/executor/builder.go b/executor/builder.go index f60a7a78f5a52..b3ef5dfc9b52d 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2580,7 +2580,7 @@ func (b *executorBuilder) buildAnalyzeSamplingPushdown(task plannercore.AnalyzeC SampleSize: int64(opts[ast.AnalyzeOptNumSamples]), SampleRate: sampleRate, SketchSize: maxSketchSize, - ColumnsInfo: util.ColumnsToProto(task.ColsInfo, task.TblInfo.PKIsHandle), + ColumnsInfo: util.ColumnsToProto(task.ColsInfo, task.TblInfo.PKIsHandle, false), ColumnGroups: colGroups, } if task.TblInfo != nil { @@ -2741,7 +2741,7 @@ func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plannercore.AnalyzeCo BucketSize: int64(opts[ast.AnalyzeOptNumBuckets]), SampleSize: MaxRegionSampleSize, SketchSize: maxSketchSize, - ColumnsInfo: util.ColumnsToProto(cols, task.HandleCols != nil && task.HandleCols.IsInt()), + ColumnsInfo: util.ColumnsToProto(cols, task.HandleCols != nil && task.HandleCols.IsInt(), false), CmsketchDepth: &depth, CmsketchWidth: &width, } @@ -3402,6 +3402,7 @@ func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) Exe is: b.is, originalPlan: v.GetTablePlan(), startTS: startTs, + mppQueryID: kv.MPPQueryID{QueryTs: getMPPQueryTS(b.ctx), LocalQueryID: getMPPQueryID(b.ctx), ServerID: domain.GetDomain(b.ctx).ServerID()}, } return gather } @@ -3409,10 +3410,6 @@ func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) Exe // buildTableReader builds a table reader executor. It first build a no range table reader, // and then update it ranges from table scan plan. func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) Executor { - if v.StoreType != kv.TiKV && b.isStaleness { - b.err = errors.New("stale requests require tikv backend") - return nil - } failpoint.Inject("checkUseMPP", func(val failpoint.Value) { if !b.ctx.GetSessionVars().InRestrictedSQL && val.(bool) != useMPPExecution(b.ctx, v) { if val.(bool) { diff --git a/executor/compiler.go b/executor/compiler.go index e2c2a29794d1d..821561899f4e7 100644 --- a/executor/compiler.go +++ b/executor/compiler.go @@ -157,20 +157,22 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (_ *ExecS } } } + + if err = sessiontxn.OptimizeWithPlanAndThenWarmUp(c.Ctx, stmt.Plan); err != nil { + return nil, err + } + if c.Ctx.GetSessionVars().IsPlanReplayerCaptureEnabled() && !c.Ctx.GetSessionVars().InRestrictedSQL { - if _, ok := stmtNode.(*ast.SelectStmt); ok { - startTS, err := sessiontxn.GetTxnManager(c.Ctx).GetStmtReadTS() - if err != nil { - return nil, err - } - if c.Ctx.GetSessionVars().EnablePlanReplayedContinuesCapture { - checkPlanReplayerContinuesCapture(c.Ctx, stmtNode, startTS) - } else { - checkPlanReplayerCaptureTask(c.Ctx, stmtNode, startTS) - } + startTS, err := sessiontxn.GetTxnManager(c.Ctx).GetStmtReadTS() + if err != nil { + return nil, err + } + if c.Ctx.GetSessionVars().EnablePlanReplayedContinuesCapture { + checkPlanReplayerContinuesCapture(c.Ctx, stmtNode, startTS) + } else { + checkPlanReplayerCaptureTask(c.Ctx, stmtNode, startTS) } } - return stmt, nil } @@ -183,9 +185,17 @@ func checkPlanReplayerCaptureTask(sctx sessionctx.Context, stmtNode ast.StmtNode if handle == nil { return } + captured := false tasks := handle.GetTasks() _, sqlDigest := sctx.GetSessionVars().StmtCtx.SQLDigest() _, planDigest := getPlanDigest(sctx.GetSessionVars().StmtCtx) + defer func() { + logutil.BgLogger().Debug("[plan-replayer-capture] check capture task", + zap.String("sql-digest", sqlDigest.String()), + zap.String("plan-digest", planDigest.String()), + zap.Int("tasks", len(tasks)), + zap.Bool("captured", captured)) + }() key := replayer.PlanReplayerTaskKey{ SQLDigest: sqlDigest.String(), PlanDigest: planDigest.String(), @@ -193,7 +203,7 @@ func checkPlanReplayerCaptureTask(sctx sessionctx.Context, stmtNode ast.StmtNode for _, task := range tasks { if task.SQLDigest == sqlDigest.String() { if task.PlanDigest == "*" || task.PlanDigest == planDigest.String() { - sendPlanReplayerDumpTask(key, sctx, stmtNode, startTS, false) + captured = sendPlanReplayerDumpTask(key, sctx, stmtNode, startTS, false) return } } @@ -215,16 +225,26 @@ func checkPlanReplayerContinuesCapture(sctx sessionctx.Context, stmtNode ast.Stm SQLDigest: sqlDigest.String(), PlanDigest: planDigest.String(), } + captured := false + defer func() { + logutil.BgLogger().Debug("[plan-replayer-capture] check continues capture task", + zap.String("sql-digest", sqlDigest.String()), + zap.String("plan-digest", planDigest.String()), + zap.Bool("captured", captured)) + }() + existed := sctx.GetSessionVars().CheckPlanReplayerFinishedTaskKey(key) if existed { return } - sendPlanReplayerDumpTask(key, sctx, stmtNode, startTS, true) - sctx.GetSessionVars().AddPlanReplayerFinishedTaskKey(key) + captured = sendPlanReplayerDumpTask(key, sctx, stmtNode, startTS, true) + if captured { + sctx.GetSessionVars().AddPlanReplayerFinishedTaskKey(key) + } } func sendPlanReplayerDumpTask(key replayer.PlanReplayerTaskKey, sctx sessionctx.Context, stmtNode ast.StmtNode, - startTS uint64, isContinuesCapture bool) { + startTS uint64, isContinuesCapture bool) bool { stmtCtx := sctx.GetSessionVars().StmtCtx handle := sctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) dumpTask := &domain.PlanReplayerDumpTask{ @@ -239,7 +259,7 @@ func sendPlanReplayerDumpTask(key replayer.PlanReplayerTaskKey, sctx sessionctx. IsCapture: true, IsContinuesCapture: isContinuesCapture, } - domain.GetDomain(sctx).GetPlanReplayerHandle().SendTask(dumpTask) + return domain.GetDomain(sctx).GetPlanReplayerHandle().SendTask(dumpTask) } // needLowerPriority checks whether it's needed to lower the execution priority diff --git a/executor/executor.go b/executor/executor.go index 9f95e63aaed20..603996ad7764f 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -353,7 +353,7 @@ func (e *CancelDDLJobsExec) Open(ctx context.Context) error { if err != nil { return err } - e.errs, err = ddl.CancelJobs(newSess, e.ctx.GetStore(), e.jobIDs) + e.errs, err = ddl.CancelJobs(newSess, e.jobIDs) e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), newSess) return err } diff --git a/executor/executor_required_rows_test.go b/executor/executor_required_rows_test.go index cbca9914b5bc2..c3ac762050d24 100644 --- a/executor/executor_required_rows_test.go +++ b/executor/executor_required_rows_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/parser/ast" @@ -211,6 +212,7 @@ func defaultCtx() sessionctx.Context { ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, ctx.GetSessionVars().MemQuotaQuery) ctx.GetSessionVars().StmtCtx.DiskTracker = disk.NewTracker(-1, -1) ctx.GetSessionVars().SnapshotTS = uint64(1) + domain.BindDomain(ctx, domain.NewMockDomain()) return ctx } diff --git a/executor/explainfor_test.go b/executor/explainfor_test.go index 89245fa6b237d..ddb0578338c6f 100644 --- a/executor/explainfor_test.go +++ b/executor/explainfor_test.go @@ -551,9 +551,9 @@ func TestIssue28259(t *testing.T) { ps = []*util.ProcessInfo{tkProcess} tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps}) res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Len(t, res.Rows(), 4) - require.Regexp(t, ".*Selection.*", res.Rows()[0][0]) - require.Regexp(t, ".*IndexFullScan.*", res.Rows()[3][0]) + require.Len(t, res.Rows(), 3) + require.Regexp(t, ".*Selection.*", res.Rows()[1][0]) + require.Regexp(t, ".*IndexFullScan.*", res.Rows()[2][0]) res = tk.MustQuery("explain format = 'brief' select col1 from UK_GCOL_VIRTUAL_18588 use index(UK_COL1) " + "where col1 between -1696020282760139948 and -2619168038882941276 or col1 < -4004648990067362699;") @@ -589,11 +589,9 @@ func TestIssue28259(t *testing.T) { ps = []*util.ProcessInfo{tkProcess} tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps}) res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Len(t, res.Rows(), 5) - require.Regexp(t, ".*Selection.*", res.Rows()[1][0]) - require.Equal(t, "lt(test.t.b, 1), or(and(ge(test.t.a, 2), le(test.t.a, 1)), lt(test.t.a, 1))", res.Rows()[1][4]) - require.Regexp(t, ".*IndexReader.*", res.Rows()[2][0]) - require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[4][0]) + require.Len(t, res.Rows(), 4) + require.Regexp(t, ".*Selection.*", res.Rows()[2][0]) + require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[3][0]) res = tk.MustQuery("explain format = 'brief' select a from t use index(idx) " + "where (a between 0 and 2 or a < 2) and b < 1;") @@ -636,12 +634,11 @@ func TestIssue28259(t *testing.T) { ps = []*util.ProcessInfo{tkProcess} tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps}) res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Len(t, res.Rows(), 6) - require.Regexp(t, ".*Selection.*", res.Rows()[1][0]) - require.Regexp(t, ".*IndexLookUp.*", res.Rows()[2][0]) - require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[3][0]) - require.Regexp(t, ".*Selection.*", res.Rows()[4][0]) - require.Regexp(t, ".*TableRowIDScan.*", res.Rows()[5][0]) + require.Len(t, res.Rows(), 5) + require.Regexp(t, ".*IndexLookUp.*", res.Rows()[1][0]) + require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0]) + require.Regexp(t, ".*Selection.*", res.Rows()[3][0]) + require.Regexp(t, ".*TableRowIDScan.*", res.Rows()[4][0]) res = tk.MustQuery("explain format = 'brief' select /*+ USE_INDEX(t, idx) */ a from t use index(idx) " + "where (a between 0 and 2 or a < 2) and b < 1;") @@ -860,7 +857,7 @@ func TestIndexMerge4PlanCache(t *testing.T) { tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=? or (b=? and (a >= ? and a <= ?));';") tk.MustQuery("execute stmt using @a, @a, @b, @a").Check(testkit.Rows("10 10 10")) tk.MustQuery("execute stmt using @b, @b, @b, @b").Check(testkit.Rows("11 11 11")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=10 or (a >=? and a <= ?);';") tk.MustExec("set @a=9, @b=10, @c=11;") diff --git a/executor/fktest/BUILD.bazel b/executor/fktest/BUILD.bazel index f245bba152c59..2c9f00dfa0624 100644 --- a/executor/fktest/BUILD.bazel +++ b/executor/fktest/BUILD.bazel @@ -15,6 +15,7 @@ go_test( "//infoschema", "//kv", "//meta/autoid", + "//parser", "//parser/ast", "//parser/auth", "//parser/format", diff --git a/executor/fktest/foreign_key_test.go b/executor/fktest/foreign_key_test.go index 8d6442f39fad4..1dc92d6954a2e 100644 --- a/executor/fktest/foreign_key_test.go +++ b/executor/fktest/foreign_key_test.go @@ -21,6 +21,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" @@ -28,6 +29,7 @@ import ( "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/format" @@ -2643,3 +2645,91 @@ func TestForeignKeyOnReplaceInto(t *testing.T) { tk.MustExec("replace into t1 values (1, 'new-boss', null)") tk.MustQuery("select id from t1 order by id").Check(testkit.Rows("1")) } + +func TestForeignKeyLargeTxnErr(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set @@foreign_key_checks=1") + tk.MustExec("use test") + tk.MustExec("create table t1 (id int auto_increment key, pid int, name varchar(200), index(pid));") + tk.MustExec("insert into t1 (name) values ('abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890');") + for i := 0; i < 8; i++ { + tk.MustExec("insert into t1 (name) select name from t1;") + } + tk.MustQuery("select count(*) from t1").Check(testkit.Rows("256")) + tk.MustExec("update t1 set pid=1 where id>1") + tk.MustExec("alter table t1 add foreign key (pid) references t1 (id) on update cascade") + originLimit := atomic.LoadUint64(&kv.TxnTotalSizeLimit) + defer func() { + atomic.StoreUint64(&kv.TxnTotalSizeLimit, originLimit) + }() + // Set the limitation to a small value, make it easier to reach the limitation. + atomic.StoreUint64(&kv.TxnTotalSizeLimit, 10240) + tk.MustQuery("select sum(id) from t1").Check(testkit.Rows("32896")) + // foreign key cascade behaviour will cause ErrTxnTooLarge. + tk.MustGetDBError("update t1 set id=id+100000 where id=1", kv.ErrTxnTooLarge) + tk.MustQuery("select sum(id) from t1").Check(testkit.Rows("32896")) + tk.MustGetDBError("update t1 set id=id+100000 where id=1", kv.ErrTxnTooLarge) + tk.MustQuery("select id,pid from t1 where id<3 order by id").Check(testkit.Rows("1 ", "2 1")) + tk.MustExec("set @@foreign_key_checks=0") + tk.MustExec("update t1 set id=id+100000 where id=1") + tk.MustQuery("select id,pid from t1 where id<3 or pid is null order by id").Check(testkit.Rows("2 1", "100001 ")) +} + +func TestForeignKeyAndLockView(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (id int key)") + tk.MustExec("create table t2 (id int key, foreign key (id) references t1(id) ON DELETE CASCADE ON UPDATE CASCADE)") + tk.MustExec("insert into t1 values (1)") + tk.MustExec("insert into t2 values (1)") + tk.MustExec("begin pessimistic") + tk.MustExec("set @@foreign_key_checks=0") + tk.MustExec("update t2 set id=2") + + tk2 := testkit.NewTestKit(t, store) + tk2.MustExec("set @@foreign_key_checks=1") + tk2.MustExec("use test") + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + tk2.MustExec("begin pessimistic") + tk2.MustExec("update t1 set id=2 where id=1") + tk2.MustExec("commit") + }() + time.Sleep(time.Millisecond * 200) + _, digest := parser.NormalizeDigest("update t1 set id=2 where id=1") + tk.MustQuery("select CURRENT_SQL_DIGEST from information_schema.tidb_trx where state='LockWaiting' and db='test'").Check(testkit.Rows(digest.String())) + tk.MustGetErrMsg("update t1 set id=2", "[executor:1213]Deadlock found when trying to get lock; try restarting transaction") + wg.Wait() +} + +func TestForeignKeyAndMemoryTracker(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set @@foreign_key_checks=1") + tk.MustExec("use test") + tk.MustExec("create table t1 (id int auto_increment key, pid int, name varchar(200), index(pid));") + tk.MustExec("insert into t1 (name) values ('abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz');") + for i := 0; i < 8; i++ { + tk.MustExec("insert into t1 (name) select name from t1;") + } + tk.MustQuery("select count(*) from t1").Check(testkit.Rows("256")) + tk.MustExec("update t1 set pid=1 where id>1") + tk.MustExec("alter table t1 add foreign key (pid) references t1 (id) on update cascade") + tk.MustQuery("select sum(id) from t1").Check(testkit.Rows("32896")) + defer tk.MustExec("SET GLOBAL tidb_mem_oom_action = DEFAULT") + tk.MustExec("SET GLOBAL tidb_mem_oom_action='CANCEL'") + tk.MustExec("set @@tidb_mem_quota_query=40960;") + // foreign key cascade behaviour will exceed memory quota. + err := tk.ExecToErr("update t1 set id=id+100000 where id=1") + require.Error(t, err) + require.Contains(t, err.Error(), "Out Of Memory Quota!") + tk.MustQuery("select id,pid from t1 where id = 1").Check(testkit.Rows("1 ")) + tk.MustExec("set @@foreign_key_checks=0") + // After disable foreign_key_checks, following DML will execute successful. + tk.MustExec("update t1 set id=id+100000 where id=1") + tk.MustQuery("select id,pid from t1 where id<3 or pid is null order by id").Check(testkit.Rows("2 1", "100001 ")) +} diff --git a/executor/insert_common.go b/executor/insert_common.go index d321f02a2f4fd..8440242f1dad5 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -388,7 +388,7 @@ func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression e.evalBuffer.SetDatum(offset, val1) } // Row may lack of generated column, autoIncrement column, empty column here. - return e.fillRow(ctx, row, hasValue) + return e.fillRow(ctx, row, hasValue, rowIdx) } var emptyRow chunk.Row @@ -422,7 +422,7 @@ func (e *InsertValues) fastEvalRow(ctx context.Context, list []expression.Expres offset := e.insertColumns[i].Offset row[offset], hasValue[offset] = val1, true } - return e.fillRow(ctx, row, hasValue) + return e.fillRow(ctx, row, hasValue, rowIdx) } // setValueForRefColumn set some default values for the row to eval the row value with other columns, @@ -562,7 +562,7 @@ func (e *InsertValues) getRow(ctx context.Context, vals []types.Datum) ([]types. hasValue[offset] = true } - return e.fillRow(ctx, row, hasValue) + return e.fillRow(ctx, row, hasValue, 0) } // getColDefaultValue gets the column default value. @@ -647,7 +647,7 @@ func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx // `insert|replace values` can guarantee consecutive autoID in a batch. // Other statements like `insert select from` don't guarantee consecutive autoID. // https://dev.mysql.com/doc/refman/8.0/en/innodb-auto-increment-handling.html -func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue []bool) ([]types.Datum, error) { +func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue []bool, rowIdx int) ([]types.Datum, error) { gCols := make([]*table.Column, 0) tCols := e.Table.Cols() if e.hasExtraHandle { @@ -693,6 +693,9 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue for i, gCol := range gCols { colIdx := gCol.ColumnInfo.Offset val, err := e.GenExprs[i].Eval(chunk.MutRowFromDatums(row).ToRow()) + if err != nil && gCol.FieldType.IsArray() { + return nil, completeError(tbl, gCol.Offset, rowIdx, err) + } if e.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) != nil { return nil, err } @@ -708,6 +711,29 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return row, nil } +func completeError(tbl *model.TableInfo, offset int, rowIdx int, err error) error { + name := "expression_index" + for _, idx := range tbl.Indices { + for _, column := range idx.Columns { + if column.Offset == offset { + name = idx.Name.O + break + } + } + } + + if expression.ErrInvalidJSONForFuncIndex.Equal(err) { + return expression.ErrInvalidJSONForFuncIndex.GenWithStackByArgs(name) + } + if types.ErrOverflow.Equal(err) { + return expression.ErrDataOutOfRangeFuncIndex.GenWithStackByArgs(name, rowIdx+1) + } + if types.ErrDataTooLong.Equal(err) { + return expression.ErrFuncIndexDataIsTooLong.GenWithStackByArgs(name) + } + return err +} + // isAutoNull can help judge whether a datum is AutoIncrement Null quickly. // This used to help lazyFillAutoIncrement to find consecutive N datum backwards for batch autoID alloc. func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column) bool { diff --git a/executor/mpp_gather.go b/executor/mpp_gather.go index 42526774dbdd5..eee019bd0de47 100644 --- a/executor/mpp_gather.go +++ b/executor/mpp_gather.go @@ -16,6 +16,7 @@ package executor import ( "context" + "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -38,6 +39,18 @@ func useMPPExecution(ctx sessionctx.Context, tr *plannercore.PhysicalTableReader return ok } +func getMPPQueryID(ctx sessionctx.Context) uint64 { + mppQueryInfo := &ctx.GetSessionVars().StmtCtx.MPPQueryInfo + mppQueryInfo.QueryID.CompareAndSwap(0, plannercore.AllocMPPQueryID()) + return mppQueryInfo.QueryID.Load() +} + +func getMPPQueryTS(ctx sessionctx.Context) uint64 { + mppQueryInfo := &ctx.GetSessionVars().StmtCtx.MPPQueryInfo + mppQueryInfo.QueryTS.CompareAndSwap(0, uint64(time.Now().UnixNano())) + return mppQueryInfo.QueryTS.Load() +} + // MPPGather dispatch MPP tasks and read data from root tasks. type MPPGather struct { // following fields are construct needed @@ -45,6 +58,7 @@ type MPPGather struct { is infoschema.InfoSchema originalPlan plannercore.PhysicalPlan startTS uint64 + mppQueryID kv.MPPQueryID mppReqs []*kv.MPPDispatchRequest @@ -78,17 +92,19 @@ func (e *MPPGather) appendMPPDispatchReq(pf *plannercore.Fragment) error { return errors.Trace(err) } logutil.BgLogger().Info("Dispatch mpp task", zap.Uint64("timestamp", mppTask.StartTs), - zap.Int64("ID", mppTask.ID), zap.String("address", mppTask.Meta.GetAddress()), + zap.Int64("ID", mppTask.ID), zap.Uint64("QueryTs", mppTask.MppQueryID.QueryTs), zap.Uint64("LocalQueryId", mppTask.MppQueryID.LocalQueryID), + zap.Uint64("ServerID", mppTask.MppQueryID.ServerID), zap.String("address", mppTask.Meta.GetAddress()), zap.String("plan", plannercore.ToString(pf.ExchangeSender))) req := &kv.MPPDispatchRequest{ - Data: pbData, - Meta: mppTask.Meta, - ID: mppTask.ID, - IsRoot: pf.IsRoot, - Timeout: 10, - SchemaVar: e.is.SchemaMetaVersion(), - StartTs: e.startTS, - State: kv.MppTaskReady, + Data: pbData, + Meta: mppTask.Meta, + ID: mppTask.ID, + IsRoot: pf.IsRoot, + Timeout: 10, + SchemaVar: e.is.SchemaMetaVersion(), + StartTs: e.startTS, + MppQueryID: mppTask.MppQueryID, + State: kv.MppTaskReady, } e.mppReqs = append(e.mppReqs, req) } @@ -109,7 +125,7 @@ func (e *MPPGather) Open(ctx context.Context) (err error) { // TODO: Move the construct tasks logic to planner, so we can see the explain results. sender := e.originalPlan.(*plannercore.PhysicalExchangeSender) planIDs := collectPlanIDS(e.originalPlan, nil) - frags, err := plannercore.GenerateRootMPPTasks(e.ctx, e.startTS, sender, e.is) + frags, err := plannercore.GenerateRootMPPTasks(e.ctx, e.startTS, e.mppQueryID, sender, e.is) if err != nil { return errors.Trace(err) } @@ -124,7 +140,7 @@ func (e *MPPGather) Open(ctx context.Context) (err error) { failpoint.Return(errors.Errorf("The number of tasks is not right, expect %d tasks but actually there are %d tasks", val.(int), len(e.mppReqs))) } }) - e.respIter, err = distsql.DispatchMPPTasks(ctx, e.ctx, e.mppReqs, e.retFieldTypes, planIDs, e.id, e.startTS) + e.respIter, err = distsql.DispatchMPPTasks(ctx, e.ctx, e.mppReqs, e.retFieldTypes, planIDs, e.id, e.startTS, e.mppQueryID) if err != nil { return errors.Trace(err) } diff --git a/executor/point_get_test.go b/executor/point_get_test.go index c615c3a75cb1a..8f13675457481 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -825,3 +825,20 @@ func TestPointGetIssue25167(t *testing.T) { tk.MustExec("insert into t values (1)") tk.MustQuery("select * from t as of timestamp @a where a = 1").Check(testkit.Rows()) } + +func TestPointGetIssue40194(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1(id int primary key, v int)") + tk.MustExec("insert into t1 values(1, 10)") + tk.MustExec("prepare s from 'select * from t1 where id=1'") + tk.MustExec("set @@tidb_enable_plan_replayer_capture=1") + tk.MustQuery("execute s").Check(testkit.Rows("1 10")) + tk.MustQuery("execute s").Check(testkit.Rows("1 10")) + + tk2 := testkit.NewTestKit(t, store) + tk2.MustExec("use test") + tk2.MustExec("update t1 set v=v+1") + tk.MustQuery("execute s").Check(testkit.Rows("1 11")) +} diff --git a/executor/recover_test.go b/executor/recover_test.go index aad1c93d9fb87..a7d26f247c952 100644 --- a/executor/recover_test.go +++ b/executor/recover_test.go @@ -332,10 +332,12 @@ func TestRecoverClusterMeetError(t *testing.T) { newTk.MustGetErrCode(fmt.Sprintf("flashback cluster to timestamp '%s'", time.Now().Add(0-30*time.Second)), errno.ErrPrivilegeCheckFail) tk.MustExec("drop user 'testflashback'@'localhost';") - // Flashback failed because of ddl history. - tk.MustExec("use test;") - tk.MustExec("create table t(a int);") - tk.MustMatchErrMsg(fmt.Sprintf("flashback cluster to timestamp '%s'", flashbackTs), "Detected schema change due to another DDL job during \\[.*, now\\), can't do flashback") + // update tidb_server_version + nowTS, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) + require.NoError(t, err) + tk.MustExec("update mysql.tidb set VARIABLE_VALUE=VARIABLE_VALUE+1 where VARIABLE_NAME='tidb_server_version'") + errorMsg := fmt.Sprintf("[ddl:-1]Detected TiDB upgrade during [%s, now), can't do flashback", oracle.GetTimeFromTS(nowTS).String()) + tk.MustGetErrMsg(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(nowTS)), errorMsg) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) diff --git a/executor/simple_test.go b/executor/simple_test.go index 61688519b55d5..a67933bbbca92 100644 --- a/executor/simple_test.go +++ b/executor/simple_test.go @@ -21,13 +21,13 @@ import ( "testing" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/server" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/util" "github.com/stretchr/testify/require" - tikvutil "github.com/tikv/client-go/v2/util" ) func TestKillStmt(t *testing.T) { @@ -86,7 +86,7 @@ func TestKillStmt(t *testing.T) { func TestUserAttributes(t *testing.T) { store, _ := testkit.CreateMockStoreAndDomain(t) rootTK := testkit.NewTestKit(t, store) - ctx := context.WithValue(context.Background(), tikvutil.RequestSourceKey, tikvutil.RequestSource{RequestSourceInternal: true}) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // https://dev.mysql.com/doc/refman/8.0/en/create-user.html#create-user-comments-attributes rootTK.MustExec(`CREATE USER testuser COMMENT '1234'`) diff --git a/executor/tiflashtest/BUILD.bazel b/executor/tiflashtest/BUILD.bazel index 5223fa79cc2d9..c7678e569522d 100644 --- a/executor/tiflashtest/BUILD.bazel +++ b/executor/tiflashtest/BUILD.bazel @@ -16,6 +16,7 @@ go_test( "//executor", "//meta/autoid", "//parser/terror", + "//planner/core", "//store/mockstore", "//store/mockstore/unistore", "//testkit", diff --git a/executor/tiflashtest/tiflash_test.go b/executor/tiflashtest/tiflash_test.go index 222e1bfdaff0b..ca52fb48fd788 100644 --- a/executor/tiflashtest/tiflash_test.go +++ b/executor/tiflashtest/tiflash_test.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/parser/terror" + plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/mockstore/unistore" "github.com/pingcap/tidb/testkit" @@ -267,14 +268,9 @@ func TestMppExecution(t *testing.T) { tk.MustExec("begin") tk.MustQuery("select count(*) from ( select * from t2 group by a, b) A group by A.b").Check(testkit.Rows("3")) tk.MustQuery("select count(*) from t1 where t1.a+100 > ( select count(*) from t2 where t1.a=t2.a and t1.b=t2.b) group by t1.b").Check(testkit.Rows("4")) - txn, err := tk.Session().Txn(true) - require.NoError(t, err) - ts := txn.StartTS() - taskID := tk.Session().GetSessionVars().AllocMPPTaskID(ts) - require.Equal(t, int64(6), taskID) - tk.MustExec("commit") - taskID = tk.Session().GetSessionVars().AllocMPPTaskID(ts + 1) + taskID := plannercore.AllocMPPTaskID(tk.Session()) require.Equal(t, int64(1), taskID) + tk.MustExec("commit") failpoint.Enable("github.com/pingcap/tidb/executor/checkTotalMPPTasks", `return(3)`) // all the data is related to one store, so there are three tasks. @@ -1043,7 +1039,7 @@ func TestTiFlashPartitionTableBroadcastJoin(t *testing.T) { } } -func TestForbidTiflashDuringStaleRead(t *testing.T) { +func TestTiflashSupportStaleRead(t *testing.T) { store := testkit.CreateMockStore(t, withMockTiFlash(2)) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") @@ -1075,8 +1071,8 @@ func TestForbidTiflashDuringStaleRead(t *testing.T) { fmt.Fprintf(resBuff, "%s\n", row) } res = resBuff.String() - require.NotContains(t, res, "tiflash") - require.Contains(t, res, "tikv") + require.Contains(t, res, "tiflash") + require.NotContains(t, res, "tikv") } func TestForbidTiFlashIfExtraPhysTableIDIsNeeded(t *testing.T) { @@ -1281,3 +1277,35 @@ func TestDisaggregatedTiFlash(t *testing.T) { }) tk.MustQuery("select * from t;").Check(testkit.Rows()) } + +func TestDisaggregatedTiFlashQuery(t *testing.T) { + config.UpdateGlobal(func(conf *config.Config) { + conf.DisaggregatedTiFlash = true + }) + defer config.UpdateGlobal(func(conf *config.Config) { + conf.DisaggregatedTiFlash = false + }) + + store := testkit.CreateMockStore(t, withMockTiFlash(2)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists tbl_1") + tk.MustExec(`create table tbl_1 ( col_1 bigint not null default -1443635317331776148, + col_2 text ( 176 ) collate utf8mb4_bin not null, + col_3 decimal ( 8, 3 ), + col_4 varchar ( 128 ) collate utf8mb4_bin not null, + col_5 varchar ( 377 ) collate utf8mb4_bin, + col_6 double, + col_7 varchar ( 459 ) collate utf8mb4_bin, + col_8 tinyint default -88 ) charset utf8mb4 collate utf8mb4_bin ;`) + tk.MustExec("alter table tbl_1 set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "tbl_1") + err := domain.GetDomain(tk.Session()).DDL().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") + + needCheckTiFlashComputeNode := "false" + failpoint.Enable("github.com/pingcap/tidb/planner/core/testDisaggregatedTiFlashQuery", fmt.Sprintf("return(%s)", needCheckTiFlashComputeNode)) + defer failpoint.Disable("github.com/pingcap/tidb/planner/core/testDisaggregatedTiFlashQuery") + tk.MustExec("explain select max( tbl_1.col_1 ) as r0 , sum( tbl_1.col_1 ) as r1 , sum( tbl_1.col_8 ) as r2 from tbl_1 where tbl_1.col_8 != 68 or tbl_1.col_3 between null and 939 order by r0,r1,r2;") +} diff --git a/executor/writetest/BUILD.bazel b/executor/writetest/BUILD.bazel new file mode 100644 index 0000000000000..2ddc46c29ad82 --- /dev/null +++ b/executor/writetest/BUILD.bazel @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "writetest_test", + srcs = [ + "main_test.go", + "write_test.go", + ], + flaky = True, + race = "on", + shard_count = 50, + deps = [ + "//config", + "//executor", + "//kv", + "//meta/autoid", + "//parser/model", + "//parser/mysql", + "//planner/core", + "//session", + "//sessionctx", + "//sessionctx/stmtctx", + "//sessionctx/variable", + "//sessiontxn", + "//store/mockstore", + "//table", + "//table/tables", + "//testkit", + "//types", + "//util", + "//util/mock", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//tikv", + "@io_opencensus_go//stats/view", + "@org_uber_go_goleak//:goleak", + ], +) diff --git a/executor/writetest/main_test.go b/executor/writetest/main_test.go new file mode 100644 index 0000000000000..075e1f91b488a --- /dev/null +++ b/executor/writetest/main_test.go @@ -0,0 +1,60 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writetest + +import ( + "fmt" + "testing" + + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/testkit" + "github.com/tikv/client-go/v2/tikv" + "go.opencensus.io/stats/view" + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + autoid.SetStep(5000) + config.UpdateGlobal(func(conf *config.Config) { + conf.Log.SlowThreshold = 30000 // 30s + conf.TiKVClient.AsyncCommit.SafeWindow = 0 + conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 + conf.Experimental.AllowsExpressionIndex = true + }) + tikv.EnableFailpoints() + + opts := []goleak.Option{ + goleak.Cleanup(func(_ int) { + view.Stop() + }), + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), + goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), + goleak.IgnoreTopFunction("gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun"), + goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), + } + + goleak.VerifyTestMain(m, opts...) +} + +func fillData(tk *testkit.TestKit, table string) { + tk.MustExec("use test") + tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table)) + + // insert data + tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table)) + tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table)) +} diff --git a/executor/write_test.go b/executor/writetest/write_test.go similarity index 99% rename from executor/write_test.go rename to executor/writetest/write_test.go index 32aa261c5518d..ebeaaa388e269 100644 --- a/executor/write_test.go +++ b/executor/writetest/write_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package executor_test +package writetest import ( "context" diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go index e6257c4dd058c..545abd497a2da 100644 --- a/expression/builtin_cast.go +++ b/expression/builtin_cast.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -420,7 +421,7 @@ func (c *castAsArrayFunctionClass) verifyArgs(args []Expression) error { } if args[0].GetType().EvalType() != types.ETJson { - return types.ErrInvalidJSONData.GenWithStackByArgs("1", "cast_as_array") + return ErrInvalidTypeForJSON.GenWithStackByArgs(1, "cast_as_array") } return nil @@ -467,9 +468,80 @@ func (b *castJSONAsArrayFunctionSig) evalJSON(row chunk.Row) (res types.BinaryJS return types.BinaryJSON{}, false, ErrNotSupportedYet.GenWithStackByArgs("CAST-ing Non-JSON Array type to array") } - // TODO: impl the cast(... as ... array) function + arrayVals := make([]any, 0, len(b.args)) + ft := b.tp.ArrayType() + f := convertJSON2Tp(ft.EvalType()) + if f == nil { + return types.BinaryJSON{}, false, ErrNotSupportedYet.GenWithStackByArgs("CAS-ing JSON to the target type") + } + sc := b.ctx.GetSessionVars().StmtCtx + originalOverflowAsWarning := sc.OverflowAsWarning + originIgnoreTruncate := sc.IgnoreTruncate + originTruncateAsWarning := sc.TruncateAsWarning + sc.OverflowAsWarning = false + sc.IgnoreTruncate = false + sc.TruncateAsWarning = false + defer func() { + sc.OverflowAsWarning = originalOverflowAsWarning + sc.IgnoreTruncate = originIgnoreTruncate + sc.TruncateAsWarning = originTruncateAsWarning + }() + for i := 0; i < val.GetElemCount(); i++ { + item, err := f(sc, val.ArrayGetElem(i), ft) + if err != nil { + return types.BinaryJSON{}, false, err + } + arrayVals = append(arrayVals, item) + } + return types.CreateBinaryJSON(arrayVals), false, nil +} - return types.BinaryJSON{}, false, nil +func convertJSON2Tp(eval types.EvalType) func(*stmtctx.StatementContext, types.BinaryJSON, *types.FieldType) (any, error) { + switch eval { + case types.ETString: + return func(sc *stmtctx.StatementContext, item types.BinaryJSON, tp *types.FieldType) (any, error) { + if item.TypeCode != types.JSONTypeCodeString { + return nil, ErrInvalidJSONForFuncIndex + } + return types.ProduceStrWithSpecifiedTp(string(item.GetString()), tp, sc, false) + } + case types.ETInt: + return func(sc *stmtctx.StatementContext, item types.BinaryJSON, tp *types.FieldType) (any, error) { + if item.TypeCode != types.JSONTypeCodeInt64 && item.TypeCode != types.JSONTypeCodeUint64 { + return nil, ErrInvalidJSONForFuncIndex + } + return types.ConvertJSONToInt(sc, item, mysql.HasUnsignedFlag(tp.GetFlag()), tp.GetType()) + } + case types.ETReal, types.ETDecimal: + return func(sc *stmtctx.StatementContext, item types.BinaryJSON, tp *types.FieldType) (any, error) { + if item.TypeCode != types.JSONTypeCodeInt64 && item.TypeCode != types.JSONTypeCodeUint64 && item.TypeCode != types.JSONTypeCodeFloat64 { + return nil, ErrInvalidJSONForFuncIndex + } + return types.ConvertJSONToFloat(sc, item) + } + case types.ETDatetime: + return func(sc *stmtctx.StatementContext, item types.BinaryJSON, tp *types.FieldType) (any, error) { + if (tp.GetType() == mysql.TypeDatetime && item.TypeCode != types.JSONTypeCodeDatetime) || (tp.GetType() == mysql.TypeDate && item.TypeCode != types.JSONTypeCodeDate) { + return nil, ErrInvalidJSONForFuncIndex + } + res := item.GetTime() + res.SetType(tp.GetType()) + if tp.GetType() == mysql.TypeDate { + // Truncate hh:mm:ss part if the type is Date. + res.SetCoreTime(types.FromDate(res.Year(), res.Month(), res.Day(), 0, 0, 0, 0)) + } + return res, nil + } + case types.ETDuration: + return func(sc *stmtctx.StatementContext, item types.BinaryJSON, tp *types.FieldType) (any, error) { + if item.TypeCode != types.JSONTypeCodeDuration { + return nil, ErrInvalidJSONForFuncIndex + } + return item.GetDuration(), nil + } + default: + return nil + } } type castAsJSONFunctionClass struct { diff --git a/expression/builtin_cast_test.go b/expression/builtin_cast_test.go index e670aad1e35ae..8208e0a0af0d6 100644 --- a/expression/builtin_cast_test.go +++ b/expression/builtin_cast_test.go @@ -1619,3 +1619,73 @@ func TestCastBinaryStringAsJSONSig(t *testing.T) { require.Equal(t, tt.resultStr, res.String()) } } + +func TestCastArrayFunc(t *testing.T) { + ctx := createContext(t) + tbl := []struct { + input interface{} + expected interface{} + tp *types.FieldType + success bool + buildFuncSuccess bool + }{ + { + []interface{}{int64(-1), int64(2), int64(3)}, + []interface{}{int64(-1), int64(2), int64(3)}, + types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).SetArray(true).BuildP(), + true, + true, + }, + { + []interface{}{int64(-1), int64(2), int64(3)}, + nil, + types.NewFieldTypeBuilder().SetType(mysql.TypeString).SetCharset(charset.CharsetUTF8MB4).SetCollate(charset.CollationUTF8MB4).SetArray(true).BuildP(), + false, + true, + }, + { + []interface{}{"1"}, + nil, + types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetCharset(charset.CharsetBin).SetCollate(charset.CharsetBin).SetArray(true).BuildP(), + false, + true, + }, + { + []interface{}{"1", "2"}, + nil, + types.NewFieldTypeBuilder().SetType(mysql.TypeDouble).SetCharset(charset.CharsetBin).SetCollate(charset.CharsetBin).SetArray(true).BuildP(), + false, + true, + }, + { + []interface{}{int64(-1), 2.1, int64(3)}, + []interface{}{int64(-1), 2.1, int64(3)}, + types.NewFieldTypeBuilder().SetType(mysql.TypeDouble).SetCharset(charset.CharsetBin).SetCollate(charset.CharsetBin).SetArray(true).BuildP(), + true, + true, + }, + } + for _, tt := range tbl { + f, err := BuildCastFunctionWithCheck(ctx, datumsToConstants(types.MakeDatums(types.CreateBinaryJSON(tt.input)))[0], tt.tp) + if tt.buildFuncSuccess { + require.NoError(t, err, tt.input) + } else { + require.Error(t, err, tt.input) + continue + } + + val, isNull, err := f.EvalJSON(ctx, chunk.Row{}) + if tt.success { + require.NoError(t, err, tt.input) + if tt.expected == nil { + require.True(t, isNull, tt.input) + } else { + j1 := types.CreateBinaryJSON(tt.expected) + cmp := types.CompareBinaryJSON(j1, val) + require.Equal(t, 0, cmp, tt.input) + } + } else { + require.Error(t, err, tt.input) + } + } +} diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go index 4411f4b6b90ff..dec5d06983679 100644 --- a/expression/builtin_compare.go +++ b/expression/builtin_compare.go @@ -1575,17 +1575,18 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express // To keep the result be compatible with MySQL, refine `int non-constant str constant` // here and skip this refine operation in all other cases for safety. if (arg0IsInt && !arg0IsCon && arg1IsString && arg1IsCon) || (arg1IsInt && !arg1IsCon && arg0IsString && arg0IsCon) { - ctx.GetSessionVars().StmtCtx.SkipPlanCache = true + var reason error if arg1IsString { - ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg1.String())) + reason = errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg1.String()) } else { // arg0IsString - ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg0.String())) + reason = errors.Errorf("skip plan-cache: '%v' may be converted to INT", arg0.String()) } + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(reason) RemoveMutableConst(ctx, args) } else { return args } - } else if ctx.GetSessionVars().StmtCtx.SkipPlanCache { + } else if !ctx.GetSessionVars().StmtCtx.UseCache { // We should remove the mutable constant for correctness, because its value may be changed. RemoveMutableConst(ctx, args) } diff --git a/expression/builtin_other.go b/expression/builtin_other.go index c62278c2bd101..c5bd6738aa9df 100644 --- a/expression/builtin_other.go +++ b/expression/builtin_other.go @@ -165,8 +165,7 @@ func (c *inFunctionClass) verifyArgs(ctx sessionctx.Context, args []Expression) case columnType.GetType() == mysql.TypeBit && constant.Value.Kind() == types.KindInt64: if constant.Value.GetInt64() < 0 { if MaybeOverOptimized4PlanCache(ctx, args) { - ctx.GetSessionVars().StmtCtx.SkipPlanCache = true - ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("skip plan-cache: Bit Column in (%v)", constant.Value.GetInt64())) + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: Bit Column in (%v)", constant.Value.GetInt64())) } continue } diff --git a/expression/errors.go b/expression/errors.go index c56737ec2fae3..62d5b89d547b9 100644 --- a/expression/errors.go +++ b/expression/errors.go @@ -38,6 +38,9 @@ var ( ErrInternal = dbterror.ClassOptimizer.NewStd(mysql.ErrInternal) ErrNoDB = dbterror.ClassOptimizer.NewStd(mysql.ErrNoDB) ErrNotSupportedYet = dbterror.ClassExpression.NewStd(mysql.ErrNotSupportedYet) + ErrInvalidJSONForFuncIndex = dbterror.ClassExpression.NewStd(mysql.ErrInvalidJSONValueForFuncIndex) + ErrDataOutOfRangeFuncIndex = dbterror.ClassExpression.NewStd(mysql.ErrDataOutOfRangeFunctionalIndex) + ErrFuncIndexDataIsTooLong = dbterror.ClassExpression.NewStd(mysql.ErrFunctionalIndexDataIsTooLong) // All the un-exported errors are defined here: errFunctionNotExists = dbterror.ClassExpression.NewStd(mysql.ErrSpDoesNotExist) diff --git a/expression/expr_to_pb_test.go b/expression/expr_to_pb_test.go index 1025f3c7fdcb1..341c03b552804 100644 --- a/expression/expr_to_pb_test.go +++ b/expression/expr_to_pb_test.go @@ -1239,6 +1239,11 @@ func TestExprPushDownToFlash(t *testing.T) { require.Equal(t, tipb.ScalarFuncSig_CastTimeAsDuration, function.(*ScalarFunction).Function.PbCode()) exprs = append(exprs, function) + // Unhex + function, err = NewFunction(mock.NewContext(), ast.Unhex, types.NewFieldType(mysql.TypeString), stringColumn) + require.NoError(t, err) + exprs = append(exprs, function) + pushed, remained = PushDownExprs(sc, exprs, client, kv.TiFlash) require.Len(t, pushed, len(exprs)) require.Len(t, remained, 0) diff --git a/expression/expression.go b/expression/expression.go index ee4ba67f1f854..00697c2df68ea 100644 --- a/expression/expression.go +++ b/expression/expression.go @@ -816,7 +816,7 @@ func SplitDNFItems(onExpr Expression) []Expression { // If the Expression is a non-constant value, it means the result is unknown. func EvaluateExprWithNull(ctx sessionctx.Context, schema *Schema, expr Expression) Expression { if MaybeOverOptimized4PlanCache(ctx, []Expression{expr}) { - return expr + ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.New("skip plan-cache: %v affects null check")) } if ctx.GetSessionVars().StmtCtx.InNullRejectCheck { expr, _ = evaluateExprWithNullInNullRejectCheck(ctx, schema, expr) @@ -1267,7 +1267,7 @@ func scalarExprSupportedByFlash(function *ScalarFunction) bool { } case ast.IsTruthWithNull, ast.IsTruthWithoutNull, ast.IsFalsity: return true - case ast.Hex, ast.Bin: + case ast.Hex, ast.Unhex, ast.Bin: return true case ast.GetFormat: return true diff --git a/expression/expression_test.go b/expression/expression_test.go index 79ee1970ba800..4a24cb5eef759 100644 --- a/expression/expression_test.go +++ b/expression/expression_test.go @@ -75,8 +75,9 @@ func TestEvaluateExprWithNullAndParameters(t *testing.T) { ltWithParam, err := newFunctionForTest(ctx, ast.LT, col0, param) require.NoError(t, err) res = EvaluateExprWithNull(ctx, schema, ltWithParam) - _, isScalarFunc := res.(*ScalarFunction) - require.True(t, isScalarFunc) // the expression with parameters is not evaluated + _, isConst := res.(*Constant) + require.True(t, isConst) // this expression is evaluated and skip-plan cache flag is set. + require.True(t, !ctx.GetSessionVars().StmtCtx.UseCache) } func TestEvaluateExprWithNullNoChangeRetType(t *testing.T) { diff --git a/expression/integration_serial_test.go b/expression/integration_serial_test.go index 77574b4e309a2..b70b7be4a5070 100644 --- a/expression/integration_serial_test.go +++ b/expression/integration_serial_test.go @@ -3762,16 +3762,6 @@ func TestSetVariables(t *testing.T) { _, err = tk.Exec("set @@global.max_prepared_stmt_count='';") require.Error(t, err) require.Error(t, err, variable.ErrWrongTypeForVar.GenWithStackByArgs("max_prepared_stmt_count").Error()) - - tk.MustExec("set @@global.tidb_enable_concurrent_ddl=1") - tk.MustQuery("select @@global.tidb_enable_concurrent_ddl").Check(testkit.Rows("1")) - require.True(t, variable.EnableConcurrentDDL.Load()) - tk.MustExec("set @@global.tidb_enable_metadata_lock=0") - tk.MustExec("set @@global.tidb_enable_concurrent_ddl=0") - tk.MustQuery("select @@global.tidb_enable_concurrent_ddl").Check(testkit.Rows("0")) - require.False(t, variable.EnableConcurrentDDL.Load()) - testkit.NewTestKit(t, store).MustQuery("select @@global.tidb_enable_concurrent_ddl").Check(testkit.Rows("0")) - tk.MustExec("set @@global.tidb_enable_concurrent_ddl=1") } func TestPreparePlanCache(t *testing.T) { diff --git a/expression/multi_valued_index_test.go b/expression/multi_valued_index_test.go index 788dadddab02c..97e59993e23b4 100644 --- a/expression/multi_valued_index_test.go +++ b/expression/multi_valued_index_test.go @@ -15,10 +15,20 @@ package expression_test import ( + "context" + "fmt" "testing" "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessiontxn" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/stretchr/testify/require" ) func TestMultiValuedIndexDDL(t *testing.T) { @@ -36,7 +46,7 @@ func TestMultiValuedIndexDDL(t *testing.T) { tk.MustExec("drop table t") tk.MustGetErrCode("CREATE TABLE t(x INT, KEY k ((1 AND CAST(JSON_ARRAY(x) AS UNSIGNED ARRAY))));", errno.ErrNotSupportedYet) tk.MustGetErrCode("CREATE TABLE t1 (f1 json, key mvi((cast(cast(f1 as unsigned array) as unsigned array))));", errno.ErrNotSupportedYet) - tk.MustGetErrCode("CREATE TABLE t1 (f1 json, key mvi((cast(f1->>'$[*]' as unsigned array))));", errno.ErrInvalidJSONData) + tk.MustGetErrCode("CREATE TABLE t1 (f1 json, key mvi((cast(f1->>'$[*]' as unsigned array))));", errno.ErrInvalidTypeForJSON) tk.MustGetErrCode("CREATE TABLE t1 (f1 json, key mvi((cast(f1->'$[*]' as year array))));", errno.ErrNotSupportedYet) tk.MustGetErrCode("CREATE TABLE t1 (f1 json, key mvi((cast(f1->'$[*]' as json array))));", errno.ErrNotSupportedYet) tk.MustGetErrCode("CREATE TABLE t1 (f1 json, key mvi((cast(f1->'$[*]' as char(10) charset gbk array))));", errno.ErrNotSupportedYet) @@ -56,3 +66,419 @@ func TestMultiValuedIndexDDL(t *testing.T) { tk.MustExec("drop table t") tk.MustExec("create table t(a json, b int, index idx3(b, (cast(a as signed array))));") } + +func TestMultiValuedIndexDML(t *testing.T) { + store := testkit.CreateMockStore(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("USE test;") + + mode := []string{`''`, `default`} + + for _, m := range mode { + tk.MustExec(fmt.Sprintf("set @@sql_mode=%s", m)) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as unsigned array))));`) + tk.MustExec(`insert into t values ('[1,2,3]');`) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrDataOutOfRangeFunctionalIndex) + tk.MustGetErrCode(`insert into t values ('["1"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["a"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as signed array))));`) + tk.MustExec(`insert into t values ('[1,2,3]');`) + tk.MustExec(`insert into t values ('[-1]');`) + tk.MustGetErrCode(`insert into t values ('["1"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["a"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as double array))));`) + tk.MustExec(`insert into t values ('[1,2,3]');`) + tk.MustExec(`insert into t values ('[-1]');`) + tk.MustGetErrCode(`insert into t values ('["1"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["a"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values ('[1.2]');`) + tk.MustExec(`insert into t values ('[1.0]');`) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as decimal(10, 2) array))));`) + tk.MustExec(`insert into t values ('[1,2,3]');`) + tk.MustExec(`insert into t values ('[-1]');`) + tk.MustGetErrCode(`insert into t values ('["1"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["a"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values ('[1.2]');`) + tk.MustExec(`insert into t values ('[1.0]');`) + tk.MustExec(`insert into t values ('[1.1102]');`) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as char(1) array))));`) + tk.MustGetErrCode(`insert into t values ('[1,2,3]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values ('["1"]');`) + tk.MustExec(`insert into t values ('["a"]');`) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrFunctionalIndexDataIsTooLong) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as char(2) array))));`) + tk.MustGetErrCode(`insert into t values ('[1,2,3]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values ('["1"]');`) + tk.MustExec(`insert into t values ('["a"]');`) + tk.MustExec(`insert into t values ('["汉字"]');`) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as binary(1) array))));`) + tk.MustGetErrCode(`insert into t values ('[1,2,3]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values ('["1"]');`) + tk.MustExec(`insert into t values ('["a"]');`) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrFunctionalIndexDataIsTooLong) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as binary(2) array))));`) + tk.MustGetErrCode(`insert into t values ('[1,2,3]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values ('["1"]');`) + tk.MustExec(`insert into t values ('["a"]');`) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrFunctionalIndexDataIsTooLong) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as date array))));`) + tk.MustGetErrCode(`insert into t values ('[1,2,3]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["1"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["a"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values (json_array(cast("2022-02-02" as date)));`) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as time array))));`) + tk.MustGetErrCode(`insert into t values ('[1,2,3]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["1"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["a"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values (json_array(cast("11:00:00" as time)));`) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a json, index idx((cast(a as datetime array))));`) + tk.MustGetErrCode(`insert into t values ('[1,2,3]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[-1]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["1"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["a"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('["汉字"]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.2]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values ('[1.0]');`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("11:00:00" as time)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustGetErrCode(`insert into t values (json_array(cast("2022-02-02" as date)));`, errno.ErrInvalidJSONValueForFuncIndex) + tk.MustExec(`insert into t values (json_array(cast("2022-02-02 11:00:00" as datetime)));`) + tk.MustGetErrCode(`insert into t values (json_array(cast('{"a":1}' as json)));`, errno.ErrInvalidJSONValueForFuncIndex) + } +} + +func TestWriteMultiValuedIndex(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1(pk int primary key, a json, index idx((cast(a as signed array))))") + tk.MustExec("insert into t1 values (1, '[1,2,2,3]')") + tk.MustExec("insert into t1 values (2, '[1,2,3]')") + tk.MustExec("insert into t1 values (3, '[]')") + tk.MustExec("insert into t1 values (4, '[2,3,4]')") + tk.MustExec("insert into t1 values (5, null)") + + t1, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + require.NoError(t, err) + for _, index := range t1.Indices() { + if index.Meta().MVIndex { + checkCount(t, t1.IndexPrefix(), index, store, 10) + checkKey(t, t1.IndexPrefix(), index, store, [][]types.Datum{ + {types.NewDatum(nil), types.NewIntDatum(5)}, + {types.NewIntDatum(1), types.NewIntDatum(1)}, + {types.NewIntDatum(1), types.NewIntDatum(2)}, + {types.NewIntDatum(2), types.NewIntDatum(1)}, + {types.NewIntDatum(2), types.NewIntDatum(2)}, + {types.NewIntDatum(2), types.NewIntDatum(4)}, + {types.NewIntDatum(3), types.NewIntDatum(1)}, + {types.NewIntDatum(3), types.NewIntDatum(2)}, + {types.NewIntDatum(3), types.NewIntDatum(4)}, + {types.NewIntDatum(4), types.NewIntDatum(4)}, + }) + } + } + tk.MustExec("delete from t1") + for _, index := range t1.Indices() { + if index.Meta().MVIndex { + checkCount(t, t1.IndexPrefix(), index, store, 0) + } + } + + tk.MustExec("drop table t1") + tk.MustExec("create table t1(pk int primary key, a json, index idx((cast(a as char(5) array))))") + tk.MustExec("insert into t1 values (1, '[\"abc\", \"abc \"]')") + tk.MustExec("insert into t1 values (2, '[\"b\"]')") + tk.MustExec("insert into t1 values (3, '[\"b \"]')") + + t1, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + require.NoError(t, err) + for _, index := range t1.Indices() { + if index.Meta().MVIndex { + checkCount(t, t1.IndexPrefix(), index, store, 4) + checkKey(t, t1.IndexPrefix(), index, store, [][]types.Datum{ + {types.NewBytesDatum([]byte("abc")), types.NewIntDatum(1)}, + {types.NewBytesDatum([]byte("abc ")), types.NewIntDatum(1)}, + {types.NewBytesDatum([]byte("b")), types.NewIntDatum(2)}, + {types.NewBytesDatum([]byte("b ")), types.NewIntDatum(3)}, + }) + } + } + + tk.MustExec("update t1 set a = json_array_append(a, '$', 'bcd') where pk = 1") + tk.MustExec("update t1 set a = '[]' where pk = 2") + tk.MustExec("update t1 set a = '[\"abc\"]' where pk = 3") + + for _, index := range t1.Indices() { + if index.Meta().MVIndex { + checkCount(t, t1.IndexPrefix(), index, store, 4) + checkKey(t, t1.IndexPrefix(), index, store, [][]types.Datum{ + {types.NewBytesDatum([]byte("abc")), types.NewIntDatum(1)}, + {types.NewBytesDatum([]byte("abc")), types.NewIntDatum(3)}, + {types.NewBytesDatum([]byte("abc ")), types.NewIntDatum(1)}, + {types.NewBytesDatum([]byte("bcd")), types.NewIntDatum(1)}, + }) + } + } + + tk.MustExec("delete from t1") + for _, index := range t1.Indices() { + if index.Meta().MVIndex { + checkCount(t, t1.IndexPrefix(), index, store, 0) + } + } +} + +func TestWriteMultiValuedIndexPartitionTable(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t1 +( + pk int primary key, + a json, + index idx ((cast(a as signed array))) +) partition by range columns (pk) (partition p0 values less than (10), partition p1 values less than (20));`) + tk.MustExec("insert into t1 values (1, '[1,2,2,3]')") + tk.MustExec("insert into t1 values (11, '[1,2,3]')") + tk.MustExec("insert into t1 values (2, '[]')") + tk.MustExec("insert into t1 values (12, '[2,3,4]')") + tk.MustExec("insert into t1 values (3, null)") + tk.MustExec("insert into t1 values (13, null)") + + t1, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + require.NoError(t, err) + + expect := map[string]struct { + count int + vals [][]types.Datum + }{ + "p0": {4, [][]types.Datum{ + {types.NewDatum(nil), types.NewIntDatum(3)}, + {types.NewIntDatum(1), types.NewIntDatum(1)}, + {types.NewIntDatum(2), types.NewIntDatum(1)}, + {types.NewIntDatum(3), types.NewIntDatum(1)}, + }}, + "p1": {7, [][]types.Datum{ + {types.NewDatum(nil), types.NewIntDatum(13)}, + {types.NewIntDatum(1), types.NewIntDatum(11)}, + {types.NewIntDatum(2), types.NewIntDatum(11)}, + {types.NewIntDatum(2), types.NewIntDatum(12)}, + {types.NewIntDatum(3), types.NewIntDatum(11)}, + {types.NewIntDatum(3), types.NewIntDatum(12)}, + {types.NewIntDatum(4), types.NewIntDatum(12)}, + }}, + } + + for _, def := range t1.Meta().GetPartitionInfo().Definitions { + partition := t1.(table.PartitionedTable).GetPartition(def.ID) + for _, index := range partition.Indices() { + if index.Meta().MVIndex { + checkCount(t, partition.IndexPrefix(), index, store, expect[def.Name.L].count) + checkKey(t, partition.IndexPrefix(), index, store, expect[def.Name.L].vals) + } + } + } + + tk.MustExec("delete from t1") + for _, def := range t1.Meta().GetPartitionInfo().Definitions { + partition := t1.(table.PartitionedTable).GetPartition(def.ID) + for _, index := range partition.Indices() { + if index.Meta().MVIndex { + checkCount(t, partition.IndexPrefix(), index, store, 0) + } + } + } +} + +func TestWriteMultiValuedIndexUnique(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1(pk int primary key, a json, unique index idx((cast(a as signed array))))") + tk.MustExec("insert into t1 values (1, '[1,2,2]')") + tk.MustGetErrCode("insert into t1 values (2, '[1]')", errno.ErrDupEntry) + tk.MustExec("insert into t1 values (3, '[3,3,4]')") + + t1, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + require.NoError(t, err) + for _, index := range t1.Indices() { + if index.Meta().MVIndex { + checkCount(t, t1.IndexPrefix(), index, store, 4) + checkKey(t, t1.IndexPrefix(), index, store, [][]types.Datum{ + {types.NewIntDatum(1)}, + {types.NewIntDatum(2)}, + {types.NewIntDatum(3)}, + {types.NewIntDatum(4)}, + }) + } + } +} + +func TestWriteMultiValuedIndexComposite(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1(pk int primary key, a json, c int, d int, index idx(c, (cast(a as signed array)), d))") + tk.MustExec("insert into t1 values (1, '[1,2,2]', 1, 1)") + tk.MustExec("insert into t1 values (2, '[2,2,2]', 2, 2)") + tk.MustExec("insert into t1 values (3, '[3,3,4]', 3, 3)") + tk.MustExec("insert into t1 values (4, null, 4, 4)") + tk.MustExec("insert into t1 values (5, '[]', 5, 5)") + + t1, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + require.NoError(t, err) + for _, index := range t1.Indices() { + if index.Meta().MVIndex { + checkCount(t, t1.IndexPrefix(), index, store, 6) + checkKey(t, t1.IndexPrefix(), index, store, [][]types.Datum{ + {types.NewIntDatum(1), types.NewIntDatum(1), types.NewIntDatum(1), types.NewIntDatum(1)}, + {types.NewIntDatum(1), types.NewIntDatum(2), types.NewIntDatum(1), types.NewIntDatum(1)}, + {types.NewIntDatum(2), types.NewIntDatum(2), types.NewIntDatum(2), types.NewIntDatum(2)}, + {types.NewIntDatum(3), types.NewIntDatum(3), types.NewIntDatum(3), types.NewIntDatum(3)}, + {types.NewIntDatum(3), types.NewIntDatum(4), types.NewIntDatum(3), types.NewIntDatum(3)}, + {types.NewIntDatum(4), types.NewDatum(nil), types.NewIntDatum(4), types.NewIntDatum(4)}, + }) + } + } +} + +func checkCount(t *testing.T, prefix kv.Key, index table.Index, store kv.Storage, except int) { + c := 0 + checkIndex(t, prefix, index, store, func(it kv.Iterator) { + c++ + }) + require.Equal(t, except, c) +} + +func checkKey(t *testing.T, prefix kv.Key, index table.Index, store kv.Storage, except [][]types.Datum) { + idx := 0 + checkIndex(t, prefix, index, store, func(it kv.Iterator) { + indexKey := decodeIndexKey(t, it.Key()) + require.Equal(t, except[idx], indexKey) + idx++ + }) +} + +func checkIndex(t *testing.T, prefix kv.Key, index table.Index, store kv.Storage, fn func(kv.Iterator)) { + startKey := codec.EncodeInt(prefix, index.Meta().ID) + prefix.Next() + se := testkit.NewTestKit(t, store).Session() + err := sessiontxn.NewTxn(context.Background(), se) + require.NoError(t, err) + txn, err := se.Txn(true) + require.NoError(t, err) + it, err := txn.Iter(startKey, prefix.PrefixNext()) + require.NoError(t, err) + for it.Valid() && it.Key().HasPrefix(prefix) { + fn(it) + err = it.Next() + require.NoError(t, err) + } + it.Close() + se.Close() +} + +func decodeIndexKey(t *testing.T, key kv.Key) []types.Datum { + var idLen = 8 + var prefixLen = 1 + idLen /*tableID*/ + 2 + _, _, isRecord, err := tablecodec.DecodeKeyHead(key) + require.NoError(t, err) + require.False(t, isRecord) + indexKey := key[prefixLen+idLen:] + var datumValues []types.Datum + for len(indexKey) > 0 { + remain, d, err := codec.DecodeOne(indexKey) + require.NoError(t, err) + datumValues = append(datumValues, d) + indexKey = remain + } + return datumValues +} diff --git a/expression/util.go b/expression/util.go index e19ec047e43d0..0de4253aec5f9 100644 --- a/expression/util.go +++ b/expression/util.go @@ -1241,7 +1241,7 @@ func ContainCorrelatedColumn(exprs []Expression) bool { // TODO: Do more careful check here. func MaybeOverOptimized4PlanCache(ctx sessionctx.Context, exprs []Expression) bool { // If we do not enable plan cache, all the optimization can work correctly. - if !ctx.GetSessionVars().StmtCtx.UseCache || ctx.GetSessionVars().StmtCtx.SkipPlanCache { + if !ctx.GetSessionVars().StmtCtx.UseCache { return false } return containMutableConst(ctx, exprs) diff --git a/go.mod b/go.mod index f4d3fb93f9e98..d87619f7dc67c 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,7 @@ require ( github.com/jingyugao/rowserrcheck v1.1.1 github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df github.com/kisielk/errcheck v1.6.2 - github.com/klauspost/compress v1.15.1 + github.com/klauspost/compress v1.15.13 github.com/kyoh86/exportloopref v0.1.8 github.com/lestrrat-go/jwx/v2 v2.0.6 github.com/mgechev/revive v1.2.4 @@ -66,19 +66,19 @@ require ( github.com/opentracing/basictracer-go v1.0.0 github.com/opentracing/opentracing-go v1.2.0 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 - github.com/pingcap/badger v1.5.1-0.20220314162537-ab58fbf40580 - github.com/pingcap/errors v0.11.5-0.20220729040631-518f63d66278 + github.com/pingcap/badger v1.5.1-0.20221229114011-ddffaa0fff7a + github.com/pingcap/errors v0.11.5-0.20221009092201-b66cddb77c32 github.com/pingcap/failpoint v0.0.0-20220423142525-ae43b7f4e5c3 github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 - github.com/pingcap/kvproto v0.0.0-20221130022225-6c56ac56fe5f + github.com/pingcap/kvproto v0.0.0-20221213093948-9ccc6beaf0aa github.com/pingcap/log v1.1.1-0.20221116035753-734d527bc87c github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4 github.com/pingcap/tidb/parser v0.0.0-20211011031125-9b13dc409c5e github.com/pingcap/tipb v0.0.0-20221123081521-2fb828910813 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.37.0 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 + github.com/prometheus/common v0.39.0 github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 github.com/shirou/gopsutil/v3 v3.22.9 github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 @@ -89,7 +89,7 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tdakkota/asciicheck v0.1.1 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.3 + github.com/tikv/client-go/v2 v2.0.4-0.20221226080148-018c59dbd837 github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07 github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 github.com/twmb/murmur3 v1.1.3 @@ -107,11 +107,11 @@ require ( go.uber.org/atomic v1.10.0 go.uber.org/automaxprocs v1.4.0 go.uber.org/goleak v1.2.0 - go.uber.org/multierr v1.8.0 - go.uber.org/zap v1.23.0 + go.uber.org/multierr v1.9.0 + go.uber.org/zap v1.24.0 golang.org/x/exp v0.0.0-20221023144134-a1e5550cf13e golang.org/x/net v0.4.0 - golang.org/x/oauth2 v0.2.0 + golang.org/x/oauth2 v0.3.0 golang.org/x/sync v0.1.0 golang.org/x/sys v0.3.0 golang.org/x/term v0.3.0 @@ -140,7 +140,7 @@ require ( github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect github.com/cockroachdb/redact v1.0.8 // indirect @@ -195,7 +195,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect @@ -209,7 +209,7 @@ require ( github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/tsdb v0.8.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect @@ -219,6 +219,7 @@ require ( github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/tiancaiamao/gp v0.0.0-20221221095600-1a473d1f9b4b // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect diff --git a/go.sum b/go.sum index 8c21c0326306d..efeb0a0fb5537 100644 --- a/go.sum +++ b/go.sum @@ -151,8 +151,9 @@ github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 h1:E7LT642ysztPWE0dfz43cWOvMiF42DyTRC+eZIaO4yI= @@ -317,7 +318,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -614,8 +614,8 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0= +github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= @@ -674,8 +674,9 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxatome/go-testdeep v1.11.0 h1:Tgh5efyCYyJFGUYiT0qxBSIDeXw0F5zSoatlou685kk= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= @@ -760,8 +761,8 @@ github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rK github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pingcap/badger v1.5.1-0.20220314162537-ab58fbf40580 h1:MKVFZuqFvAMiDtv3AbihOQ6rY5IE8LWflI1BuZ/hF0Y= -github.com/pingcap/badger v1.5.1-0.20220314162537-ab58fbf40580/go.mod h1:upwDfet29M5y5koWilbWWA6ca3Lr0YVuzwX/DK58Vdk= +github.com/pingcap/badger v1.5.1-0.20221229114011-ddffaa0fff7a h1:QB16qn8wx5X4SRn3/5axrjPMNS3WRt87+5Bfrnmt6IA= +github.com/pingcap/badger v1.5.1-0.20221229114011-ddffaa0fff7a/go.mod h1:p8QnkZnmyV8L/M/jzYb8rT7kv3bz9m7bn1Ju94wDifs= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390= @@ -770,8 +771,8 @@ github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTw github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= -github.com/pingcap/errors v0.11.5-0.20220729040631-518f63d66278 h1:3Dm0DWeQlwV8LbpQxP2tojHhxd9aY59KI+QN0ns6bBo= -github.com/pingcap/errors v0.11.5-0.20220729040631-518f63d66278/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= +github.com/pingcap/errors v0.11.5-0.20221009092201-b66cddb77c32 h1:m5ZsBa5o/0CkzZXfXLaThzKuR85SnHHetqBCpzQ30h8= +github.com/pingcap/errors v0.11.5-0.20221009092201-b66cddb77c32/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/failpoint v0.0.0-20220423142525-ae43b7f4e5c3 h1:kJolJWbyadVeL8RKBlqmXQR7FRKPsIeU85TUYyhbhiQ= github.com/pingcap/failpoint v0.0.0-20220423142525-ae43b7f4e5c3/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= @@ -781,11 +782,11 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20221026112947-f8d61344b172/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= -github.com/pingcap/kvproto v0.0.0-20221130022225-6c56ac56fe5f h1:46ZD6xzQWJ8Jkeal/U7SqkX030Mgs8DAn6QV/9zbqOQ= -github.com/pingcap/kvproto v0.0.0-20221130022225-6c56ac56fe5f/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= +github.com/pingcap/kvproto v0.0.0-20221213093948-9ccc6beaf0aa h1:v0Z0nC0knwWHn3e9br8EMNfLBB14QDULn142UGjiTMQ= +github.com/pingcap/kvproto v0.0.0-20221213093948-9ccc6beaf0aa/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= +github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/log v1.1.1-0.20221116035753-734d527bc87c h1:crhkw6DD+07Bg1wYhW5Piw+kYNKZqFQqfC2puUf6gMI= github.com/pingcap/log v1.1.1-0.20221116035753-734d527bc87c/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= @@ -811,31 +812,29 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 h1:3DyLm+sTAJkfLyR/1pJ3L+fU2lFufWbpcgMFlGtqeyA= github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64/go.mod h1:oYrT4Vs22/NcnoVYXt5m4cIHP+znvgyusahVpyETKTw= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -932,8 +931,10 @@ github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpR github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= -github.com/tikv/client-go/v2 v2.0.3 h1:/glZOHs/K2pkCioDVae+aThUHFYRYQkEgY4NUTgfh+s= -github.com/tikv/client-go/v2 v2.0.3/go.mod h1:MDT4J9LzgS7Bj1DnEq6Gk/puy6mp8TgUC92zGEVVLLg= +github.com/tiancaiamao/gp v0.0.0-20221221095600-1a473d1f9b4b h1:4RNtqw1/tW67qP9fFgfQpTVd7DrfkaAWu4vsC18QmBo= +github.com/tiancaiamao/gp v0.0.0-20221221095600-1a473d1f9b4b/go.mod h1:h4xBhSNtOeEosLJ4P7JyKXX7Cabg7AVkWCK5gV2vOrM= +github.com/tikv/client-go/v2 v2.0.4-0.20221226080148-018c59dbd837 h1:m6glgBGCIds9QURbk8Mn+8mjLKDcv6nWrNwYh92fydQ= +github.com/tikv/client-go/v2 v2.0.4-0.20221226080148-018c59dbd837/go.mod h1:ptS8K+VBrEH2gIS3JxaiFSSLfDDyuS2xcdLozOtBWBw= github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07 h1:ckPpxKcl75mO2N6a4cJXiZH43hvcHPpqc9dh1TmH1nc= github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07/go.mod h1:CipBxPfxPUME+BImx9MUYXCnAVLS3VJUr3mnSJwh40A= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= @@ -1053,8 +1054,9 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1062,8 +1064,9 @@ go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.20.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1182,7 +1185,6 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1210,8 +1212,8 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU= -golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs= +golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/infoschema/cluster_tables_test.go b/infoschema/cluster_tables_test.go index 73488028a26ec..89244bf984631 100644 --- a/infoschema/cluster_tables_test.go +++ b/infoschema/cluster_tables_test.go @@ -41,7 +41,6 @@ import ( "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/server" - "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/mockstore/mockstorage" @@ -817,10 +816,6 @@ func (s *clusterTablesSuite) newTestKitWithRoot(t *testing.T) *testkit.TestKit { } func TestMDLView(t *testing.T) { - if !variable.EnableConcurrentDDL.Load() { - t.Skipf("test requires concurrent ddl") - } - // setup suite s := new(clusterTablesSuite) s.store, s.dom = testkit.CreateMockStoreAndDomain(t) diff --git a/infoschema/error.go b/infoschema/error.go index a7e4929a35bcc..d89d3e3dc8201 100644 --- a/infoschema/error.go +++ b/infoschema/error.go @@ -64,6 +64,8 @@ var ( ErrKeyNotExists = dbterror.ClassSchema.NewStd(mysql.ErrKeyDoesNotExist) // ErrCannotAddForeign returns for foreign key exists. ErrCannotAddForeign = dbterror.ClassSchema.NewStd(mysql.ErrCannotAddForeign) + // ErrForeignKeyOnPartitioned returns for foreign key on partition table. + ErrForeignKeyOnPartitioned = dbterror.ClassSchema.NewStd(mysql.ErrForeignKeyOnPartitioned) // ErrForeignKeyNotMatch returns for foreign key not match. ErrForeignKeyNotMatch = dbterror.ClassSchema.NewStd(mysql.ErrWrongFkDef) // ErrIndexExists returns for index already exists. diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index d4d4c4fa588f7..574529a822b13 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1670,10 +1670,6 @@ func TestVariablesInfo(t *testing.T) { tk := testkit.NewTestKit(t, store) - if !variable.EnableConcurrentDDL.Load() { - t.Skip("skip test when concurrent DDL is disabled") - } - tk.MustExec("use information_schema") tk.MustExec("SET GLOBAL innodb_compression_level = 8;") diff --git a/kv/mpp.go b/kv/mpp.go index 2e398af595650..32e9186506067 100644 --- a/kv/mpp.go +++ b/kv/mpp.go @@ -27,12 +27,20 @@ type MPPTaskMeta interface { GetAddress() string } +// MPPQueryID means the global unique id of a mpp query. +type MPPQueryID struct { + QueryTs uint64 // timestamp of query execution, used for TiFlash minTSO schedule + LocalQueryID uint64 // unique mpp query id in local tidb memory. + ServerID uint64 +} + // MPPTask means the minimum execution unit of a mpp computation job. type MPPTask struct { - Meta MPPTaskMeta // on which store this task will execute - ID int64 // mppTaskID - StartTs uint64 - TableID int64 // physical table id + Meta MPPTaskMeta // on which store this task will execute + ID int64 // mppTaskID + StartTs uint64 + MppQueryID MPPQueryID + TableID int64 // physical table id PartitionTableIDs []int64 } @@ -40,8 +48,11 @@ type MPPTask struct { // ToPB generates the pb structure. func (t *MPPTask) ToPB() *mpp.TaskMeta { meta := &mpp.TaskMeta{ - StartTs: t.StartTs, - TaskId: t.ID, + StartTs: t.StartTs, + QueryTs: t.MppQueryID.QueryTs, + LocalQueryId: t.MppQueryID.LocalQueryID, + ServerId: t.MppQueryID.ServerID, + TaskId: t.ID, } if t.ID != -1 { meta.Address = t.Meta.GetAddress() @@ -70,10 +81,11 @@ type MPPDispatchRequest struct { IsRoot bool // root task returns data to tidb directly. Timeout uint64 // If task is assigned but doesn't receive a connect request during timeout, the task should be destroyed. // SchemaVer is for any schema-ful storage (like tiflash) to validate schema correctness if necessary. - SchemaVar int64 - StartTs uint64 - ID int64 // identify a single task - State MppTaskStates + SchemaVar int64 + StartTs uint64 + MppQueryID MPPQueryID + ID int64 // identify a single task + State MppTaskStates } // MPPClient accepts and processes mpp requests. @@ -83,7 +95,7 @@ type MPPClient interface { ConstructMPPTasks(context.Context, *MPPBuildTasksRequest, time.Duration) ([]MPPTaskMeta, error) // DispatchMPPTasks dispatches ALL mpp requests at once, and returns an iterator that transfers the data. - DispatchMPPTasks(ctx context.Context, vars interface{}, reqs []*MPPDispatchRequest, needTriggerFallback bool, startTs uint64) Response + DispatchMPPTasks(ctx context.Context, vars interface{}, reqs []*MPPDispatchRequest, needTriggerFallback bool, startTs uint64, mppQueryID MPPQueryID) Response } // MPPBuildTasksRequest request the stores allocation for a mpp plan fragment. diff --git a/kv/txn.go b/kv/txn.go index d7828c7fb3138..035f2aa662eca 100644 --- a/kv/txn.go +++ b/kv/txn.go @@ -195,20 +195,22 @@ func BackOff(attempts uint) int { func setRequestSourceForInnerTxn(ctx context.Context, txn Transaction) { if source := ctx.Value(RequestSourceKey); source != nil { requestSource := source.(RequestSource) - if !requestSource.RequestSourceInternal { - logutil.Logger(ctx).Warn("`RunInNewTxn` should be used by inner txn only") + if requestSource.RequestSourceType != "" { + if !requestSource.RequestSourceInternal { + logutil.Logger(ctx).Warn("`RunInNewTxn` should be used by inner txn only") + } + txn.SetOption(RequestSourceInternal, requestSource.RequestSourceInternal) + txn.SetOption(RequestSourceType, requestSource.RequestSourceType) + return } - txn.SetOption(RequestSourceInternal, requestSource.RequestSourceInternal) - txn.SetOption(RequestSourceType, requestSource.RequestSourceType) + } + // panic in test mode in case there are requests without source in the future. + // log warnings in production mode. + if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { + panic("unexpected no source type context, if you see this error, " + + "the `RequestSourceTypeKey` is missing in your context") } else { - // panic in test mode in case there are requests without source in the future. - // log warnings in production mode. - if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { - panic("unexpected no source type context, if you see this error, " + - "the `RequestSourceTypeKey` is missing in your context") - } else { - logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, " + - "the `RequestSourceTypeKey` is missing in the context") - } + logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, " + + "the `RequestSourceTypeKey` is missing in the context") } } diff --git a/meta/BUILD.bazel b/meta/BUILD.bazel index 791662be8c215..c6c796a9771c1 100644 --- a/meta/BUILD.bazel +++ b/meta/BUILD.bazel @@ -16,10 +16,8 @@ go_library( "//parser/mysql", "//structure", "//util/dbterror", - "//util/logutil", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/kvrpcpb", - "@org_uber_go_zap//:zap", ], ) @@ -33,12 +31,10 @@ go_test( embed = [":meta"], flaky = True, deps = [ - "//ddl", "//kv", "//parser/model", "//store/mockstore", "//testkit/testsetup", - "//testkit/testutil", "//util", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/meta/meta.go b/meta/meta.go index 9f262be8b464d..97f6756a582b2 100644 --- a/meta/meta.go +++ b/meta/meta.go @@ -19,7 +19,6 @@ import ( "encoding/binary" "encoding/json" "fmt" - "math" "strconv" "strings" "sync" @@ -34,8 +33,6 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/structure" "github.com/pingcap/tidb/util/dbterror" - "github.com/pingcap/tidb/util/logutil" - "go.uber.org/zap" ) var ( @@ -77,7 +74,6 @@ var ( mPolicyGlobalID = []byte("PolicyGlobalID") mPolicyMagicByte = CurrentMagicByteVer mDDLTableVersion = []byte("DDLTableVersion") - mConcurrentDDL = []byte("concurrentDDL") mMetaDataLock = []byte("metadataLock") ) @@ -129,17 +125,13 @@ type Meta struct { // NewMeta creates a Meta in transaction txn. // If the current Meta needs to handle a job, jobListKey is the type of the job's list. -func NewMeta(txn kv.Transaction, jobListKeys ...JobListKeyType) *Meta { +func NewMeta(txn kv.Transaction) *Meta { txn.SetOption(kv.Priority, kv.PriorityHigh) txn.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) t := structure.NewStructure(txn, txn, mMetaPrefix) - listKey := DefaultJobListKey - if len(jobListKeys) != 0 { - listKey = jobListKeys[0] - } return &Meta{txn: t, StartTS: txn.StartTS(), - jobListKey: listKey, + jobListKey: DefaultJobListKey, } } @@ -622,27 +614,6 @@ func (m *Meta) CheckMDLTableExists() (bool, error) { return bytes.Equal(v, []byte("2")), nil } -// SetConcurrentDDL set the concurrent DDL flag. -func (m *Meta) SetConcurrentDDL(b bool) error { - var data []byte - if b { - data = []byte("1") - } else { - data = []byte("0") - } - return errors.Trace(m.txn.Set(mConcurrentDDL, data)) -} - -// IsConcurrentDDL returns true if the concurrent DDL flag is set. -func (m *Meta) IsConcurrentDDL() (bool, error) { - val, err := m.txn.Get(mConcurrentDDL) - if err != nil { - return false, errors.Trace(err) - } - - return len(val) == 0 || bytes.Equal(val, []byte("1")), nil -} - // SetMetadataLock sets the metadata lock. func (m *Meta) SetMetadataLock(b bool) error { var data []byte @@ -987,12 +958,8 @@ var ( mDDLJobListKey = []byte("DDLJobList") mDDLJobAddIdxList = []byte("DDLJobAddIdxList") mDDLJobHistoryKey = []byte("DDLJobHistory") - mDDLJobReorgKey = []byte("DDLJobReorg") ) -// JobListKeyType is a key type of the DDL job queue. -type JobListKeyType []byte - var ( // DefaultJobListKey keeps all actions of DDL jobs except "add index". DefaultJobListKey JobListKeyType = mDDLJobListKey @@ -1018,31 +985,8 @@ func (m *Meta) EnQueueDDLJob(job *model.Job, jobListKeys ...JobListKeyType) erro return m.enQueueDDLJob(listKey, job, true) } -// EnQueueDDLJobNoUpdate adds a DDL job to the list without update raw args. -func (m *Meta) EnQueueDDLJobNoUpdate(job *model.Job, jobListKeys ...JobListKeyType) error { - listKey := m.jobListKey - if len(jobListKeys) != 0 { - listKey = jobListKeys[0] - } - - return m.enQueueDDLJob(listKey, job, false) -} - -func (m *Meta) deQueueDDLJob(key []byte) (*model.Job, error) { - value, err := m.txn.LPop(key) - if err != nil || value == nil { - return nil, errors.Trace(err) - } - - job := &model.Job{} - err = job.Decode(value) - return job, errors.Trace(err) -} - -// DeQueueDDLJob pops a DDL job from the list. -func (m *Meta) DeQueueDDLJob() (*model.Job, error) { - return m.deQueueDDLJob(m.jobListKey) -} +// JobListKeyType is a key type of the DDL job queue. +type JobListKeyType []byte func (m *Meta) getDDLJob(key []byte, index int64) (*model.Job, error) { value, err := m.txn.LIndex(key, index) @@ -1063,61 +1007,6 @@ func (m *Meta) getDDLJob(key []byte, index int64) (*model.Job, error) { return job, errors.Trace(err) } -// GetDDLJobByIdx returns the corresponding DDL job by the index. -// The length of jobListKeys can only be 1 or 0. -// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. -// Otherwise, we use m.jobListKey directly. -func (m *Meta) GetDDLJobByIdx(index int64, jobListKeys ...JobListKeyType) (*model.Job, error) { - listKey := m.jobListKey - if len(jobListKeys) != 0 { - listKey = jobListKeys[0] - } - - startTime := time.Now() - job, err := m.getDDLJob(listKey, index) - metrics.MetaHistogram.WithLabelValues(metrics.GetDDLJobByIdx, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) - return job, errors.Trace(err) -} - -// updateDDLJob updates the DDL job with index and key. -// updateRawArgs is used to determine whether to update the raw args when encode the job. -func (m *Meta) updateDDLJob(index int64, job *model.Job, key []byte, updateRawArgs bool) error { - b, err := job.Encode(updateRawArgs) - if err == nil { - err = m.txn.LSet(key, index, b) - } - return errors.Trace(err) -} - -// UpdateDDLJob updates the DDL job with index. -// updateRawArgs is used to determine whether to update the raw args when encode the job. -// The length of jobListKeys can only be 1 or 0. -// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. -// Otherwise, we use m.jobListKey directly. -func (m *Meta) UpdateDDLJob(index int64, job *model.Job, updateRawArgs bool, jobListKeys ...JobListKeyType) error { - listKey := m.jobListKey - if len(jobListKeys) != 0 { - listKey = jobListKeys[0] - } - - startTime := time.Now() - err := m.updateDDLJob(index, job, listKey, updateRawArgs) - metrics.MetaHistogram.WithLabelValues(metrics.UpdateDDLJob, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) - return errors.Trace(err) -} - -// DDLJobQueueLen returns the DDL job queue length. -// The length of jobListKeys can only be 1 or 0. -// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. -// Otherwise, we use m.jobListKey directly. -func (m *Meta) DDLJobQueueLen(jobListKeys ...JobListKeyType) (int64, error) { - listKey := m.jobListKey - if len(jobListKeys) != 0 { - listKey = jobListKeys[0] - } - return m.txn.LLen(listKey) -} - // GetAllDDLJobsInQueue gets all DDL Jobs in the current queue. // The length of jobListKeys can only be 1 or 0. // If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. @@ -1152,45 +1041,6 @@ func (*Meta) jobIDKey(id int64) []byte { return b } -func (m *Meta) reorgJobCurrentElement(id int64) []byte { - b := make([]byte, 0, 12) - b = append(b, m.jobIDKey(id)...) - b = append(b, "_ele"...) - return b -} - -func (m *Meta) reorgJobStartHandle(id int64, element *Element) []byte { - b := make([]byte, 0, 16+len(element.TypeKey)) - b = append(b, m.jobIDKey(id)...) - b = append(b, element.TypeKey...) - eID := make([]byte, 8) - binary.BigEndian.PutUint64(eID, uint64(element.ID)) - b = append(b, eID...) - return b -} - -func (*Meta) reorgJobEndHandle(id int64, element *Element) []byte { - b := make([]byte, 8, 25) - binary.BigEndian.PutUint64(b, uint64(id)) - b = append(b, element.TypeKey...) - eID := make([]byte, 8) - binary.BigEndian.PutUint64(eID, uint64(element.ID)) - b = append(b, eID...) - b = append(b, "_end"...) - return b -} - -func (*Meta) reorgJobPhysicalTableID(id int64, element *Element) []byte { - b := make([]byte, 8, 25) - binary.BigEndian.PutUint64(b, uint64(id)) - b = append(b, element.TypeKey...) - eID := make([]byte, 8) - binary.BigEndian.PutUint64(eID, uint64(element.ID)) - b = append(b, eID...) - b = append(b, "_pid"...) - return b -} - func (m *Meta) addHistoryDDLJob(key []byte, job *model.Job, updateRawArgs bool) error { b, err := job.Encode(updateRawArgs) if err == nil { @@ -1352,160 +1202,6 @@ func DecodeElement(b []byte) (*Element, error) { return &Element{ID: int64(id), TypeKey: tp}, nil } -// UpdateDDLReorgStartHandle saves the job reorganization latest processed element and start handle for later resuming. -func (m *Meta) UpdateDDLReorgStartHandle(job *model.Job, element *Element, startKey kv.Key) error { - err := m.txn.HSet(mDDLJobReorgKey, m.reorgJobCurrentElement(job.ID), element.EncodeElement()) - if err != nil { - return errors.Trace(err) - } - if startKey != nil { - err = m.txn.HSet(mDDLJobReorgKey, m.reorgJobStartHandle(job.ID, element), startKey) - if err != nil { - return errors.Trace(err) - } - } - return nil -} - -// UpdateDDLReorgHandle saves the job reorganization latest processed information for later resuming. -func (m *Meta) UpdateDDLReorgHandle(jobID int64, startKey, endKey kv.Key, physicalTableID int64, element *Element) error { - err := m.txn.HSet(mDDLJobReorgKey, m.reorgJobCurrentElement(jobID), element.EncodeElement()) - if err != nil { - return errors.Trace(err) - } - if startKey != nil { - err = m.txn.HSet(mDDLJobReorgKey, m.reorgJobStartHandle(jobID, element), startKey) - if err != nil { - return errors.Trace(err) - } - } - if endKey != nil { - err = m.txn.HSet(mDDLJobReorgKey, m.reorgJobEndHandle(jobID, element), endKey) - if err != nil { - return errors.Trace(err) - } - } - err = m.txn.HSet(mDDLJobReorgKey, m.reorgJobPhysicalTableID(jobID, element), []byte(strconv.FormatInt(physicalTableID, 10))) - return errors.Trace(err) -} - -// ClearAllDDLReorgHandle clears all reorganization related handles. -func (m *Meta) ClearAllDDLReorgHandle() error { - return m.txn.HClear(mDDLJobReorgKey) -} - -// ClearALLDDLJob clears all DDL jobs. -func (m *Meta) ClearALLDDLJob() error { - if err := m.txn.LClear(mDDLJobAddIdxList); err != nil { - return errors.Trace(err) - } - if err := m.txn.LClear(mDDLJobListKey); err != nil { - return errors.Trace(err) - } - return nil -} - -// ClearAllHistoryJob clears all history jobs. **IT IS VERY DANGEROUS** -func (m *Meta) ClearAllHistoryJob() error { - if err := m.txn.HClear(mDDLJobHistoryKey); err != nil { - return errors.Trace(err) - } - return nil -} - -// RemoveReorgElement removes the element of the reorganization information. -func (m *Meta) RemoveReorgElement(job *model.Job) error { - err := m.txn.HDel(mDDLJobReorgKey, m.reorgJobCurrentElement(job.ID)) - if err != nil { - return errors.Trace(err) - } - return nil -} - -// RemoveDDLReorgHandle removes the job reorganization related handles. -func (m *Meta) RemoveDDLReorgHandle(job *model.Job, elements []*Element) error { - if len(elements) == 0 { - return nil - } - - err := m.txn.HDel(mDDLJobReorgKey, m.reorgJobCurrentElement(job.ID)) - if err != nil { - return errors.Trace(err) - } - - for _, element := range elements { - err = m.txn.HDel(mDDLJobReorgKey, m.reorgJobStartHandle(job.ID, element)) - if err != nil { - return errors.Trace(err) - } - if err = m.txn.HDel(mDDLJobReorgKey, m.reorgJobEndHandle(job.ID, element)); err != nil { - logutil.BgLogger().Warn("remove DDL reorg end handle", zap.Error(err)) - } - if err = m.txn.HDel(mDDLJobReorgKey, m.reorgJobPhysicalTableID(job.ID, element)); err != nil { - logutil.BgLogger().Warn("remove DDL reorg physical ID", zap.Error(err)) - } - } - return nil -} - -// GetDDLReorgHandle gets the latest processed DDL reorganize position. -func (m *Meta) GetDDLReorgHandle(job *model.Job) (element *Element, startKey, endKey kv.Key, physicalTableID int64, err error) { - elementBytes, err := m.txn.HGet(mDDLJobReorgKey, m.reorgJobCurrentElement(job.ID)) - if err != nil { - return nil, nil, nil, 0, errors.Trace(err) - } - if elementBytes == nil { - return nil, nil, nil, 0, ErrDDLReorgElementNotExist - } - element, err = DecodeElement(elementBytes) - if err != nil { - return nil, nil, nil, 0, errors.Trace(err) - } - - startKey, err = getReorgJobFieldHandle(m.txn, m.reorgJobStartHandle(job.ID, element)) - if err != nil { - return nil, nil, nil, 0, errors.Trace(err) - } - endKey, err = getReorgJobFieldHandle(m.txn, m.reorgJobEndHandle(job.ID, element)) - if err != nil { - return nil, nil, nil, 0, errors.Trace(err) - } - - physicalTableID, err = m.txn.HGetInt64(mDDLJobReorgKey, m.reorgJobPhysicalTableID(job.ID, element)) - if err != nil { - err = errors.Trace(err) - return - } - - // physicalTableID may be 0, because older version TiDB (without table partition) doesn't store them. - // update them to table's in this case. - if physicalTableID == 0 { - if job.ReorgMeta != nil { - endKey = kv.IntHandle(job.ReorgMeta.EndHandle).Encoded() - } else { - endKey = kv.IntHandle(math.MaxInt64).Encoded() - } - physicalTableID = job.TableID - logutil.BgLogger().Warn("new TiDB binary running on old TiDB DDL reorg data", - zap.Int64("partition ID", physicalTableID), - zap.Stringer("startHandle", startKey), - zap.Stringer("endHandle", endKey)) - } - return -} - -func getReorgJobFieldHandle(t *structure.TxStructure, reorgJobField []byte) (kv.Key, error) { - bs, err := t.HGet(mDDLJobReorgKey, reorgJobField) - if err != nil { - return nil, errors.Trace(err) - } - keyNotFound := bs == nil - if keyNotFound { - return nil, nil - } - return bs, nil -} - func (*Meta) schemaDiffKey(schemaVersion int64) []byte { return []byte(fmt.Sprintf("%s:%d", mSchemaDiffPrefix, schemaVersion)) } diff --git a/meta/meta_test.go b/meta/meta_test.go index 28603f76189ad..d1d932821bce3 100644 --- a/meta/meta_test.go +++ b/meta/meta_test.go @@ -17,18 +17,15 @@ package meta_test import ( "context" "fmt" - "math" "strconv" "testing" "time" "github.com/pingcap/errors" - "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/testkit/testutil" "github.com/pingcap/tidb/util" "github.com/stretchr/testify/require" ) @@ -448,202 +445,6 @@ func TestElement(t *testing.T) { require.EqualError(t, err, `invalid encoded element "_col_" length 5`) } -func TestDDL(t *testing.T) { - testCases := []struct { - desc string - startHandle kv.Handle - endHandle kv.Handle - }{ - { - "kv.IntHandle", - kv.IntHandle(1), - kv.IntHandle(2), - }, - { - "kv.CommonHandle", - testutil.MustNewCommonHandle(t, "abc", 1222, "string"), - testutil.MustNewCommonHandle(t, "dddd", 1222, "string"), - }, - } - - for _, tc := range testCases { - // copy iterator variable into a new variable, see issue #27779 - tc := tc - t.Run(tc.desc, func(t *testing.T) { - store, err := mockstore.NewMockStore() - require.NoError(t, err) - defer func() { - err := store.Close() - require.NoError(t, err) - }() - - txn, err := store.Begin() - require.NoError(t, err) - - m := meta.NewMeta(txn) - - job := &model.Job{ID: 1} - err = m.EnQueueDDLJob(job) - require.NoError(t, err) - n, err := m.DDLJobQueueLen() - require.NoError(t, err) - require.Equal(t, int64(1), n) - - v, err := m.GetDDLJobByIdx(0) - require.NoError(t, err) - require.Equal(t, job, v) - v, err = m.GetDDLJobByIdx(1) - require.NoError(t, err) - require.Nil(t, v) - - job.ID = 2 - err = m.UpdateDDLJob(0, job, true) - require.NoError(t, err) - - element := &meta.Element{ID: 123, TypeKey: meta.IndexElementKey} - // There are 3 meta key relate to index reorganization: - // start_handle, end_handle and physical_table_id. - // Only start_handle is initialized. - err = m.UpdateDDLReorgStartHandle(job, element, kv.IntHandle(1).Encoded()) - require.NoError(t, err) - - // Since physical_table_id is uninitialized, we simulate older TiDB version that doesn't store them. - // In this case GetDDLReorgHandle always return maxInt64 as end_handle. - e, i, j, k, err := m.GetDDLReorgHandle(job) - require.NoError(t, err) - require.Equal(t, element, e) - require.Equal(t, kv.Key(kv.IntHandle(1).Encoded()), i) - require.Equal(t, kv.Key(kv.IntHandle(math.MaxInt64).Encoded()), j) - require.Equal(t, int64(0), k) - - element = &meta.Element{ID: 222, TypeKey: meta.ColumnElementKey} - err = m.UpdateDDLReorgHandle(job.ID, tc.startHandle.Encoded(), tc.endHandle.Encoded(), 3, element) - require.NoError(t, err) - element1 := &meta.Element{ID: 223, TypeKey: meta.IndexElementKey} - err = m.UpdateDDLReorgHandle(job.ID, tc.startHandle.Encoded(), tc.endHandle.Encoded(), 3, element1) - require.NoError(t, err) - - e, i, j, k, err = m.GetDDLReorgHandle(job) - require.NoError(t, err) - require.Equal(t, element1, e) - require.Equal(t, kv.Key(tc.startHandle.Encoded()), i) - require.Equal(t, kv.Key(tc.endHandle.Encoded()), j) - require.Equal(t, int64(3), k) - - err = m.RemoveDDLReorgHandle(job, []*meta.Element{element, element1}) - require.NoError(t, err) - e, i, j, k, err = m.GetDDLReorgHandle(job) - require.True(t, meta.ErrDDLReorgElementNotExist.Equal(err)) - require.Nil(t, e) - require.Nil(t, i) - require.Nil(t, j) - require.Equal(t, k, int64(0)) - - // new TiDB binary running on old TiDB DDL reorg data. - e, i, j, k, err = m.GetDDLReorgHandle(job) - require.True(t, meta.ErrDDLReorgElementNotExist.Equal(err)) - require.Nil(t, e) - require.Nil(t, i) - require.Nil(t, j) - require.Equal(t, k, int64(0)) - - // Test GetDDLReorgHandle failed. - _, _, _, _, err = m.GetDDLReorgHandle(job) - require.True(t, meta.ErrDDLReorgElementNotExist.Equal(err)) - - v, err = m.DeQueueDDLJob() - require.NoError(t, err) - require.Equal(t, job, v) - - err = m.AddHistoryDDLJob(job, true) - require.NoError(t, err) - v, err = m.GetHistoryDDLJob(2) - require.NoError(t, err) - require.Equal(t, job, v) - - // Add multiple history jobs. - arg := "test arg" - historyJob1 := &model.Job{ID: 1234} - historyJob1.Args = append(job.Args, arg) - err = m.AddHistoryDDLJob(historyJob1, true) - require.NoError(t, err) - historyJob2 := &model.Job{ID: 123} - historyJob2.Args = append(job.Args, arg) - err = m.AddHistoryDDLJob(historyJob2, false) - require.NoError(t, err) - all, err := ddl.GetAllHistoryDDLJobs(m) - require.NoError(t, err) - var lastID int64 - for _, job := range all { - require.Greater(t, job.ID, lastID) - lastID = job.ID - arg1 := "" - err := job.DecodeArgs(&arg1) - require.NoError(t, err) - if job.ID == historyJob1.ID { - require.Equal(t, historyJob1.Args[0], *(job.Args[0].(*string))) - } else { - require.Len(t, job.Args, 0) - } - } - - // Test for get last N history ddl jobs. - historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, 2) - require.NoError(t, err) - require.Len(t, historyJobs, 2) - require.Equal(t, int64(1234), historyJobs[0].ID) - require.Equal(t, int64(123), historyJobs[1].ID) - - // Test GetAllDDLJobsInQueue. - err = m.EnQueueDDLJob(job) - require.NoError(t, err) - job1 := &model.Job{ID: 2} - err = m.EnQueueDDLJob(job1) - require.NoError(t, err) - jobs, err := m.GetAllDDLJobsInQueue() - require.NoError(t, err) - expectJobs := []*model.Job{job, job1} - require.Equal(t, expectJobs, jobs) - - err = txn.Commit(context.Background()) - require.NoError(t, err) - }) - } -} - -func TestAddIndexJob(t *testing.T) { - store, err := mockstore.NewMockStore() - require.NoError(t, err) - defer func() { - err := store.Close() - require.NoError(t, err) - }() - - txn1, err := store.Begin() - require.NoError(t, err) - - m := meta.NewMeta(txn1, meta.AddIndexJobListKey) - job := &model.Job{ID: 1} - err = m.EnQueueDDLJob(job) - require.NoError(t, err) - job.ID = 123 - err = m.UpdateDDLJob(0, job, true, meta.AddIndexJobListKey) - require.NoError(t, err) - v, err := m.GetDDLJobByIdx(0, meta.AddIndexJobListKey) - require.NoError(t, err) - require.Equal(t, job, v) - l, err := m.DDLJobQueueLen(meta.AddIndexJobListKey) - require.NoError(t, err) - require.Equal(t, int64(1), l) - jobs, err := m.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) - require.NoError(t, err) - expectJobs := []*model.Job{job} - require.Equal(t, expectJobs, jobs) - - err = txn1.Commit(context.Background()) - require.NoError(t, err) -} - func BenchmarkGenGlobalIDs(b *testing.B) { store, err := mockstore.NewMockStore() require.NoError(b, err) @@ -773,45 +574,6 @@ func TestSequenceKey(b *testing.T) { require.Equal(b, tableID, id) } -func TestClearJob(t *testing.T) { - store, err := mockstore.NewMockStore() - require.NoError(t, err) - defer func() { - require.NoError(t, store.Close()) - }() - - txn, err := store.Begin() - require.NoError(t, err) - - job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn} - job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable} - job3 := &model.Job{ID: 3, TableID: 2, Type: model.ActionDropColumn} - - m := meta.NewMeta(txn) - - require.NoError(t, m.EnQueueDDLJob(job1)) - require.NoError(t, m.EnQueueDDLJob(job2)) - require.NoError(t, m.EnQueueDDLJob(job3)) - - require.NoError(t, m.AddHistoryDDLJob(job1, false)) - require.NoError(t, m.AddHistoryDDLJob(job2, false)) - - jobs, err := m.GetAllDDLJobsInQueue() - require.NoError(t, err) - require.Len(t, jobs, 3) - require.NoError(t, m.ClearALLDDLJob()) - jobs, err = m.GetAllDDLJobsInQueue() - require.NoError(t, err) - require.Len(t, jobs, 0) - - count, err := m.GetHistoryDDLCount() - require.NoError(t, err) - require.Equal(t, count, uint64(2)) - - err = txn.Rollback() - require.NoError(t, err) -} - func TestCreateMySQLDatabase(t *testing.T) { store, err := mockstore.NewMockStore() require.NoError(t, err) @@ -835,41 +597,3 @@ func TestCreateMySQLDatabase(t *testing.T) { err = txn.Rollback() require.NoError(t, err) } - -func TestDDLTable(t *testing.T) { - store, err := mockstore.NewMockStore() - require.NoError(t, err) - defer func() { - require.NoError(t, store.Close()) - }() - - txn, err := store.Begin() - require.NoError(t, err) - - m := meta.NewMeta(txn) - - exists, err := m.CheckDDLTableExists() - require.NoError(t, err) - require.False(t, exists) - - err = m.SetDDLTables() - require.NoError(t, err) - - exists, err = m.CheckDDLTableExists() - require.NoError(t, err) - require.True(t, exists) - - err = m.SetConcurrentDDL(true) - require.NoError(t, err) - b, err := m.IsConcurrentDDL() - require.NoError(t, err) - require.True(t, b) - err = m.SetConcurrentDDL(false) - require.NoError(t, err) - b, err = m.IsConcurrentDDL() - require.NoError(t, err) - require.False(t, b) - - err = txn.Rollback() - require.NoError(t, err) -} diff --git a/metrics/meta.go b/metrics/meta.go index 519ba6a0924a1..af967fe48a3bb 100644 --- a/metrics/meta.go +++ b/metrics/meta.go @@ -34,8 +34,6 @@ var ( GetSchemaDiff = "get_schema_diff" SetSchemaDiff = "set_schema_diff" - GetDDLJobByIdx = "get_ddl_job" - UpdateDDLJob = "update_ddl_job" GetHistoryDDLJob = "get_history_ddl_job" MetaHistogram = prometheus.NewHistogramVec( diff --git a/metrics/metrics.go b/metrics/metrics.go index f609caaa124cd..8f303ba58180e 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -124,7 +124,6 @@ func RegisterMetrics() { prometheus.MustRegister(SyncLoadHistogram) prometheus.MustRegister(ReadStatsHistogram) prometheus.MustRegister(JobsGauge) - prometheus.MustRegister(KeepAliveCounter) prometheus.MustRegister(LoadPrivilegeCounter) prometheus.MustRegister(InfoCacheCounters) prometheus.MustRegister(LoadSchemaCounter) diff --git a/metrics/server.go b/metrics/server.go index 6bd34c4a8b903..830e03b28e986 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -120,14 +120,6 @@ var ( Help: "Counter of system time jumps backward.", }) - KeepAliveCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "tidb", - Subsystem: "monitor", - Name: "keep_alive_total", - Help: "Counter of TiDB keep alive.", - }) - PlanCacheCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "tidb", diff --git a/parser/ast/ddl.go b/parser/ast/ddl.go index f6d60e16863cd..fa9507ea0e7b4 100644 --- a/parser/ast/ddl.go +++ b/parser/ast/ddl.go @@ -1096,7 +1096,8 @@ func (n *CreateTableStmt) Restore(ctx *format.RestoreCtx) error { ctx.WritePlain(")") } - for i, option := range n.Options { + options := tableOptionsWithRestoreTTLFlag(ctx.Flags, n.Options) + for i, option := range options { ctx.WritePlain(" ") if err := option.Restore(ctx); err != nil { return errors.Annotatef(err, "An error occurred while splicing CreateTableStmt TableOption: [%v]", i) @@ -3573,11 +3574,21 @@ func (n *AlterTableStmt) Restore(ctx *format.RestoreCtx) error { if err := n.Table.Restore(ctx); err != nil { return errors.Annotate(err, "An error occurred while restore AlterTableStmt.Table") } - var specs []*AlterTableSpec + specs := make([]*AlterTableSpec, 0, len(n.Specs)) for _, spec := range n.Specs { - if !(spec.IsAllPlacementRule() && ctx.Flags.HasSkipPlacementRuleForRestoreFlag()) { - specs = append(specs, spec) + if spec.IsAllPlacementRule() && ctx.Flags.HasSkipPlacementRuleForRestoreFlag() { + continue + } + if spec.Tp == AlterTableOption { + newOptions := tableOptionsWithRestoreTTLFlag(ctx.Flags, spec.Options) + if len(newOptions) == 0 { + continue + } + newSpec := *spec + newSpec.Options = newOptions + spec = &newSpec } + specs = append(specs, spec) } for i, spec := range specs { if i == 0 || spec.Tp == AlterTablePartition || spec.Tp == AlterTableRemovePartitioning || spec.Tp == AlterTableImportTablespace || spec.Tp == AlterTableDiscardTablespace { @@ -4510,3 +4521,25 @@ func restorePlacementStmtInSpecialComment(ctx *format.RestoreCtx, n DDLNode) err return n.Restore(ctx) }) } + +func tableOptionsWithRestoreTTLFlag(flags format.RestoreFlags, options []*TableOption) []*TableOption { + if !flags.HasRestoreWithTTLEnableOff() { + return options + } + + newOptions := make([]*TableOption, 0, len(options)) + for _, opt := range options { + if opt.Tp == TableOptionTTLEnable { + continue + } + + newOptions = append(newOptions, opt) + if opt.Tp == TableOptionTTL { + newOptions = append(newOptions, &TableOption{ + Tp: TableOptionTTLEnable, + BoolValue: false, + }) + } + } + return newOptions +} diff --git a/parser/ast/ddl_test.go b/parser/ast/ddl_test.go index e6107f34513ec..156a66398426f 100644 --- a/parser/ast/ddl_test.go +++ b/parser/ast/ddl_test.go @@ -869,3 +869,37 @@ func TestTableOptionTTLRestore(t *testing.T) { runNodeRestoreTestWithFlags(t, testCases, "%s", extractNodeFunc, ca.flags) } } + +func TestTableOptionTTLRestoreWithTTLEnableOffFlag(t *testing.T) { + sourceSQL1 := "create table t (created_at datetime) ttl = created_at + INTERVAL 1 YEAR" + sourceSQL2 := "alter table t ttl_enable = 'ON'" + sourceSQL3 := "alter table t remove ttl" + sourceSQL4 := "create table t (created_at datetime) ttl = created_at + INTERVAL 1 YEAR ttl_enable = 'ON'" + sourceSQL5 := "alter table t ttl_enable = 'ON' placement policy p1" + cases := []struct { + sourceSQL string + flags format.RestoreFlags + expectSQL string + }{ + {sourceSQL1, format.DefaultRestoreFlags | format.RestoreWithTTLEnableOff, "CREATE TABLE `t` (`created_at` DATETIME) TTL = `created_at` + INTERVAL 1 YEAR TTL_ENABLE = 'OFF'"}, + {sourceSQL1, format.DefaultRestoreFlags | format.RestoreTiDBSpecialComment | format.RestoreWithTTLEnableOff, "CREATE TABLE `t` (`created_at` DATETIME) /*T![ttl] TTL = `created_at` + INTERVAL 1 YEAR */ /*T![ttl] TTL_ENABLE = 'OFF' */"}, + {sourceSQL2, format.DefaultRestoreFlags | format.RestoreWithTTLEnableOff, "ALTER TABLE `t`"}, + {sourceSQL2, format.DefaultRestoreFlags | format.RestoreTiDBSpecialComment | format.RestoreWithTTLEnableOff, "ALTER TABLE `t`"}, + {sourceSQL3, format.DefaultRestoreFlags | format.RestoreWithTTLEnableOff, "ALTER TABLE `t` REMOVE TTL"}, + {sourceSQL3, format.DefaultRestoreFlags | format.RestoreTiDBSpecialComment | format.RestoreWithTTLEnableOff, "ALTER TABLE `t` /*T![ttl] REMOVE TTL */"}, + {sourceSQL4, format.DefaultRestoreFlags | format.RestoreWithTTLEnableOff, "CREATE TABLE `t` (`created_at` DATETIME) TTL = `created_at` + INTERVAL 1 YEAR TTL_ENABLE = 'OFF'"}, + {sourceSQL4, format.DefaultRestoreFlags | format.RestoreTiDBSpecialComment | format.RestoreWithTTLEnableOff, "CREATE TABLE `t` (`created_at` DATETIME) /*T![ttl] TTL = `created_at` + INTERVAL 1 YEAR */ /*T![ttl] TTL_ENABLE = 'OFF' */"}, + {sourceSQL5, format.DefaultRestoreFlags | format.RestoreTiDBSpecialComment | format.RestoreWithTTLEnableOff, "ALTER TABLE `t` /*T![placement] PLACEMENT POLICY = `p1` */"}, + } + + extractNodeFunc := func(node Node) Node { + return node + } + + for _, ca := range cases { + testCases := []NodeRestoreTestCase{ + {ca.sourceSQL, ca.expectSQL}, + } + runNodeRestoreTestWithFlagsStmtChange(t, testCases, "%s", extractNodeFunc, ca.flags) + } +} diff --git a/parser/format/format.go b/parser/format/format.go index adada122e255e..284d4dff4e9df 100644 --- a/parser/format/format.go +++ b/parser/format/format.go @@ -235,6 +235,7 @@ const ( RestoreTiDBSpecialComment SkipPlacementRuleForRestore + RestoreWithTTLEnableOff ) const ( @@ -321,6 +322,11 @@ func (rf RestoreFlags) HasSkipPlacementRuleForRestoreFlag() bool { return rf.has(SkipPlacementRuleForRestore) } +// HasRestoreWithTTLEnableOff returns a boolean indicating whether to force set TTL_ENABLE='OFF' when restoring a TTL table +func (rf RestoreFlags) HasRestoreWithTTLEnableOff() bool { + return rf.has(RestoreWithTTLEnableOff) +} + // RestoreCtx is `Restore` context to hold flags and writer. type RestoreCtx struct { Flags RestoreFlags diff --git a/parser/model/ddl.go b/parser/model/ddl.go index c278ffb56167d..4c09f06c29152 100644 --- a/parser/model/ddl.go +++ b/parser/model/ddl.go @@ -941,6 +941,8 @@ type SchemaDiff struct { OldTableID int64 `json:"old_table_id"` // OldSchemaID is the schema ID before rename table, only used by rename table DDL. OldSchemaID int64 `json:"old_schema_id"` + // RegenerateSchemaMap means whether to rebuild the schema map when applying to the schema diff. + RegenerateSchemaMap bool `json:"regenerate_schema_map"` AffectedOpts []*AffectedOption `json:"affected_options"` } diff --git a/parser/parser_test.go b/parser/parser_test.go index 98f90a0104a5c..dae08a80afde3 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -3244,6 +3244,7 @@ func TestDDL(t *testing.T) { {"create table t (a bigint, b bigint as (a+1) not null);", true, "CREATE TABLE `t` (`a` BIGINT,`b` BIGINT GENERATED ALWAYS AS(`a`+1) VIRTUAL NOT NULL)"}, {"create table t (a bigint, b bigint as (a+1) not null);", true, "CREATE TABLE `t` (`a` BIGINT,`b` BIGINT GENERATED ALWAYS AS(`a`+1) VIRTUAL NOT NULL)"}, {"create table t (a bigint, b bigint as (a+1) not null comment 'ttt');", true, "CREATE TABLE `t` (`a` BIGINT,`b` BIGINT GENERATED ALWAYS AS(`a`+1) VIRTUAL NOT NULL COMMENT 'ttt')"}, + {"create table t(a int, index idx((cast(a as binary(1)))));", true, "CREATE TABLE `t` (`a` INT,INDEX `idx`((CAST(`a` AS BINARY(1)))))"}, {"alter table t add column (f timestamp as (a+1) default '2019-01-01 11:11:11');", false, ""}, {"alter table t modify column f int as (a+1) default 55;", false, ""}, diff --git a/parser/types/field_type.go b/parser/types/field_type.go index ff0ac9793cf17..991dc3d087d75 100644 --- a/parser/types/field_type.go +++ b/parser/types/field_type.go @@ -518,7 +518,7 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error { // RestoreAsCastType is used for write AST back to string. func (ft *FieldType) RestoreAsCastType(ctx *format.RestoreCtx, explicitCharset bool) { switch ft.tp { - case mysql.TypeVarString: + case mysql.TypeVarString, mysql.TypeString: skipWriteBinary := false if ft.charset == charset.CharsetBin && ft.collate == charset.CollationBin { ctx.WriteKeyWord("BINARY") diff --git a/planner/core/BUILD.bazel b/planner/core/BUILD.bazel index 0495f1f50cae0..b7f37923547be 100644 --- a/planner/core/BUILD.bazel +++ b/planner/core/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "handle_cols.go", "hashcode.go", "hints.go", + "indexmerge_path.go", "initialize.go", "logical_plan_builder.go", "logical_plans.go", @@ -170,6 +171,7 @@ go_test( "flat_plan_test.go", "fragment_test.go", "indexmerge_intersection_test.go", + "indexmerge_path_test.go", "indexmerge_test.go", "integration_partition_test.go", "integration_test.go", diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index ddb905dc5c06b..5a8f8a2df9f35 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -1562,13 +1562,12 @@ func (er *expressionRewriter) inToExpression(lLen int, not bool, tp *types.Field if c.GetType().EvalType() == types.ETString { // To keep the result be compatible with MySQL, refine `int non-constant str constant` // here and skip this refine operation in all other cases for safety. - er.sctx.GetSessionVars().StmtCtx.SkipPlanCache = true - er.sctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("skip plan-cache: '%v' may be converted to INT", c.String())) + er.sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: '%v' may be converted to INT", c.String())) expression.RemoveMutableConst(er.sctx, []expression.Expression{c}) } else { continue } - } else if er.sctx.GetSessionVars().StmtCtx.SkipPlanCache { + } else if !er.sctx.GetSessionVars().StmtCtx.UseCache { // We should remove the mutable constant for correctness, because its value may be changed. expression.RemoveMutableConst(er.sctx, []expression.Expression{c}) } diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 639bc15dbdc98..aff1c29997fbd 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -928,8 +928,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if len(path.Ranges) == 0 { // We should uncache the tableDual plan. if expression.MaybeOverOptimized4PlanCache(ds.ctx, path.AccessConds) { - ds.ctx.GetSessionVars().StmtCtx.SkipPlanCache = true - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("skip plan-cache: get a TableDual plan")) + ds.ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: get a TableDual plan")) } dual := PhysicalTableDual{}.Init(ds.ctx, ds.stats, ds.blockOffset) dual.SetSchema(ds.schema) @@ -1419,6 +1418,12 @@ func (ds *DataSource) addSelection4PlanCache(task *rootTask, stats *property.Sta // convertToIndexScan converts the DataSource to index scan with idx. func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { + if candidate.path.Index.MVIndex { + // MVIndex is special since different index rows may return the same _row_id and this can break some assumptions of IndexReader. + // Currently only support using IndexMerge to access MVIndex instead of IndexReader. + // TODO: make IndexReader support accessing MVIndex directly. + return invalidTask, nil + } if !candidate.path.IsSingleScan { // If it's parent requires single read task, return max cost. if prop.TaskTp == property.CopSingleReadTaskType { @@ -1988,8 +1993,11 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid } } } - // In disaggregated tiflash mode, only MPP is allowed, Cop and BatchCop is deprecated. - if prop.TaskTp == property.MppTaskType || config.GetGlobalConfig().DisaggregatedTiFlash && ts.StoreType == kv.TiFlash { + // In disaggregated tiflash mode, only MPP is allowed, cop and batchCop is deprecated. + // So if prop.TaskTp is RootTaskType, have to use mppTask then convert to rootTask. + isDisaggregatedTiFlashPath := config.GetGlobalConfig().DisaggregatedTiFlash && ts.StoreType == kv.TiFlash + canMppConvertToRootForDisaggregatedTiFlash := isDisaggregatedTiFlashPath && prop.TaskTp == property.RootTaskType && ds.SCtx().GetSessionVars().IsMPPAllowed() + if prop.TaskTp == property.MppTaskType || canMppConvertToRootForDisaggregatedTiFlash { if ts.KeepOrder { return invalidTask, nil } @@ -2005,8 +2013,9 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid } } mppTask := &mppTask{ - p: ts, - partTp: property.AnyType, + p: ts, + partTp: property.AnyType, + tblColHists: ds.TblColHists, } ts.PartitionInfo = PartitionInfo{ PruningConds: pushDownNot(ds.ctx, ds.allConds), @@ -2015,7 +2024,26 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid ColumnNames: ds.names, } mppTask = ts.addPushedDownSelectionToMppTask(mppTask, ds.stats.ScaleByExpectCnt(prop.ExpectedCnt)) - return mppTask, nil + task = mppTask + if !mppTask.invalid() { + if prop.TaskTp == property.MppTaskType && len(mppTask.rootTaskConds) > 0 { + // If got filters cannot be pushed down to tiflash, we have to make sure it will be executed in TiDB, + // So have to return a rootTask, but prop requires mppTask, cannot meet this requirement. + task = invalidTask + } else if prop.TaskTp == property.RootTaskType { + // when got here, canMppConvertToRootForDisaggregatedTiFlash is true. + task = mppTask + task = task.convertToRootTask(ds.ctx) + if !task.invalid() { + ds.addSelection4PlanCache(task.(*rootTask), ds.stats.ScaleByExpectCnt(prop.ExpectedCnt), prop) + } + } + } + return task, nil + } + if isDisaggregatedTiFlashPath { + // prop.TaskTp is cop related, just return invalidTask. + return invalidTask, nil } copTask := &copTask{ tablePlan: ts, @@ -2225,10 +2253,8 @@ func (ts *PhysicalTableScan) addPushedDownSelectionToMppTask(mpp *mppTask, stats filterCondition, rootTaskConds := SplitSelCondsWithVirtualColumn(ts.filterCondition) var newRootConds []expression.Expression filterCondition, newRootConds = expression.PushDownExprs(ts.ctx.GetSessionVars().StmtCtx, filterCondition, ts.ctx.GetClient(), ts.StoreType) - rootTaskConds = append(rootTaskConds, newRootConds...) - if len(rootTaskConds) > 0 { - return &mppTask{} - } + mpp.rootTaskConds = append(rootTaskConds, newRootConds...) + ts.filterCondition = filterCondition // Add filter condition to table plan now. if len(ts.filterCondition) > 0 { diff --git a/planner/core/fragment.go b/planner/core/fragment.go index 917f4392d9f9e..7e86696ccc4d6 100644 --- a/planner/core/fragment.go +++ b/planner/core/fragment.go @@ -16,6 +16,7 @@ package core import ( "context" + "sync/atomic" "time" "unsafe" @@ -79,29 +80,45 @@ type tasksAndFrags struct { } type mppTaskGenerator struct { - ctx sessionctx.Context - startTS uint64 - is infoschema.InfoSchema - frags []*Fragment - cache map[int]tasksAndFrags + ctx sessionctx.Context + startTS uint64 + mppQueryID kv.MPPQueryID + is infoschema.InfoSchema + frags []*Fragment + cache map[int]tasksAndFrags } // GenerateRootMPPTasks generate all mpp tasks and return root ones. -func GenerateRootMPPTasks(ctx sessionctx.Context, startTs uint64, sender *PhysicalExchangeSender, is infoschema.InfoSchema) ([]*Fragment, error) { +func GenerateRootMPPTasks(ctx sessionctx.Context, startTs uint64, mppQueryID kv.MPPQueryID, sender *PhysicalExchangeSender, is infoschema.InfoSchema) ([]*Fragment, error) { g := &mppTaskGenerator{ - ctx: ctx, - startTS: startTs, - is: is, - cache: make(map[int]tasksAndFrags), + ctx: ctx, + startTS: startTs, + mppQueryID: mppQueryID, + is: is, + cache: make(map[int]tasksAndFrags), } return g.generateMPPTasks(sender) } +// AllocMPPTaskID allocates task id for mpp tasks. It will reset the task id when the query finished. +func AllocMPPTaskID(ctx sessionctx.Context) int64 { + mppQueryInfo := &ctx.GetSessionVars().StmtCtx.MPPQueryInfo + return mppQueryInfo.AllocatedMPPTaskID.Add(1) +} + +var mppQueryID uint64 = 1 + +// AllocMPPQueryID allocates local query id for mpp queries. +func AllocMPPQueryID() uint64 { + return atomic.AddUint64(&mppQueryID, 1) +} + func (e *mppTaskGenerator) generateMPPTasks(s *PhysicalExchangeSender) ([]*Fragment, error) { logutil.BgLogger().Info("Mpp will generate tasks", zap.String("plan", ToString(s))) tidbTask := &kv.MPPTask{ - StartTs: e.startTS, - ID: -1, + StartTs: e.startTS, + MppQueryID: e.mppQueryID, + ID: -1, } _, frags, err := e.generateMPPTasksForExchangeSender(s) if err != nil { @@ -132,10 +149,11 @@ func (e *mppTaskGenerator) constructMPPTasksByChildrenTasks(tasks []*kv.MPPTask) _, ok := addressMap[addr] if !ok { mppTask := &kv.MPPTask{ - Meta: &mppAddr{addr: addr}, - ID: e.ctx.GetSessionVars().AllocMPPTaskID(e.startTS), - StartTs: e.startTS, - TableID: -1, + Meta: &mppAddr{addr: addr}, + ID: AllocMPPTaskID(e.ctx), + MppQueryID: e.mppQueryID, + StartTs: e.startTS, + TableID: -1, } newTasks = append(newTasks, mppTask) addressMap[addr] = struct{}{} @@ -385,7 +403,12 @@ func (e *mppTaskGenerator) constructMPPTasksImpl(ctx context.Context, ts *Physic tasks := make([]*kv.MPPTask, 0, len(metas)) for _, meta := range metas { - task := &kv.MPPTask{Meta: meta, ID: e.ctx.GetSessionVars().AllocMPPTaskID(e.startTS), StartTs: e.startTS, TableID: ts.Table.ID, PartitionTableIDs: allPartitionsIDs} + task := &kv.MPPTask{Meta: meta, + ID: AllocMPPTaskID(e.ctx), + StartTs: e.startTS, + MppQueryID: e.mppQueryID, + TableID: ts.Table.ID, + PartitionTableIDs: allPartitionsIDs} tasks = append(tasks, task) } return tasks, nil diff --git a/planner/core/indexmerge_path.go b/planner/core/indexmerge_path.go new file mode 100644 index 0000000000000..f0ecf02a00231 --- /dev/null +++ b/planner/core/indexmerge_path.go @@ -0,0 +1,633 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "math" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/ranger" + "go.uber.org/zap" +) + +// generateIndexMergePath generates IndexMerge AccessPaths on this DataSource. +func (ds *DataSource) generateIndexMergePath() error { + // Consider the IndexMergePath. Now, we just generate `IndexMergePath` in DNF case. + // Use allConds instread of pushedDownConds, + // because we want to use IndexMerge even if some expr cannot be pushed to TiKV. + // We will create new Selection for exprs that cannot be pushed in convertToIndexMergeScan. + indexMergeConds := make([]expression.Expression, 0, len(ds.allConds)) + for _, expr := range ds.allConds { + indexMergeConds = append(indexMergeConds, expression.PushDownNot(ds.ctx, expr)) + } + + stmtCtx := ds.ctx.GetSessionVars().StmtCtx + isPossibleIdxMerge := len(indexMergeConds) > 0 && len(ds.possibleAccessPaths) > 1 + sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || len(ds.indexMergeHints) > 0) && !stmtCtx.NoIndexMergeHint + // We current do not consider `IndexMergePath`: + // 1. If there is an index path. + // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. + needConsiderIndexMerge := true + if len(ds.indexMergeHints) == 0 { + for i := 1; i < len(ds.possibleAccessPaths); i++ { + if len(ds.possibleAccessPaths[i].AccessConds) != 0 { + needConsiderIndexMerge = false + break + } + } + if needConsiderIndexMerge { + // PushDownExprs() will append extra warnings, which is annoying. So we reset warnings here. + warnings := stmtCtx.GetWarnings() + extraWarnings := stmtCtx.GetExtraWarnings() + _, remaining := expression.PushDownExprs(stmtCtx, indexMergeConds, ds.ctx.GetClient(), kv.UnSpecified) + stmtCtx.SetWarnings(warnings) + stmtCtx.SetExtraWarnings(extraWarnings) + + remainingExpr := 0 + for _, expr := range remaining { + // Handle these 3 functions specially since they can be used to access MVIndex. + if sf, ok := expr.(*expression.ScalarFunction); ok { + if sf.FuncName.L == ast.JSONMemberOf || sf.FuncName.L == ast.JSONOverlaps || + sf.FuncName.L == ast.JSONContains { + continue + } + } + remainingExpr++ + } + if remainingExpr > 0 { + needConsiderIndexMerge = false + } + } + } + + if isPossibleIdxMerge && sessionAndStmtPermission && needConsiderIndexMerge && ds.tableInfo.TempTableType != model.TempTableLocal { + err := ds.generateAndPruneIndexMergePath(indexMergeConds, ds.indexMergeHints != nil) + if err != nil { + return err + } + } else if len(ds.indexMergeHints) > 0 { + ds.indexMergeHints = nil + var msg string + if !isPossibleIdxMerge { + msg = "No available filter or available index." + } else if !sessionAndStmtPermission { + msg = "Got no_index_merge hint or tidb_enable_index_merge is off." + } else if ds.tableInfo.TempTableType == model.TempTableLocal { + msg = "Cannot use IndexMerge on temporary table." + } + msg = fmt.Sprintf("IndexMerge is inapplicable or disabled. %s", msg) + stmtCtx.AppendWarning(errors.Errorf(msg)) + logutil.BgLogger().Debug(msg) + } + + return nil +} + +// getIndexMergeOrPath generates all possible IndexMergeOrPaths. +func (ds *DataSource) generateIndexMergeOrPaths(filters []expression.Expression) error { + usedIndexCount := len(ds.possibleAccessPaths) + for i, cond := range filters { + sf, ok := cond.(*expression.ScalarFunction) + if !ok || sf.FuncName.L != ast.LogicOr { + continue + } + var partialPaths = make([]*util.AccessPath, 0, usedIndexCount) + dnfItems := expression.FlattenDNFConditions(sf) + for _, item := range dnfItems { + cnfItems := expression.SplitCNFItems(item) + itemPaths := ds.accessPathsForConds(cnfItems, usedIndexCount) + if len(itemPaths) == 0 { + partialPaths = nil + break + } + partialPath, err := ds.buildIndexMergePartialPath(itemPaths) + if err != nil { + return err + } + if partialPath == nil { + partialPaths = nil + break + } + partialPaths = append(partialPaths, partialPath) + } + // If all of the partialPaths use the same index, we will not use the indexMerge. + singlePath := true + for i := len(partialPaths) - 1; i >= 1; i-- { + if partialPaths[i].Index != partialPaths[i-1].Index { + singlePath = false + break + } + } + if singlePath { + continue + } + if len(partialPaths) > 1 { + possiblePath := ds.buildIndexMergeOrPath(filters, partialPaths, i) + if possiblePath == nil { + return nil + } + + accessConds := make([]expression.Expression, 0, len(partialPaths)) + for _, p := range partialPaths { + indexCondsForP := p.AccessConds[:] + indexCondsForP = append(indexCondsForP, p.IndexFilters...) + if len(indexCondsForP) > 0 { + accessConds = append(accessConds, expression.ComposeCNFCondition(ds.ctx, indexCondsForP...)) + } + } + accessDNF := expression.ComposeDNFCondition(ds.ctx, accessConds...) + sel, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, []expression.Expression{accessDNF}, nil) + if err != nil { + logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) + sel = SelectionFactor + } + possiblePath.CountAfterAccess = sel * ds.tableStats.RowCount + ds.possibleAccessPaths = append(ds.possibleAccessPaths, possiblePath) + } + } + return nil +} + +// isInIndexMergeHints returns true if the input index name is not excluded by the IndexMerge hints, which means either +// (1) there's no IndexMerge hint, (2) there's IndexMerge hint but no specified index names, or (3) the input index +// name is specified in the IndexMerge hints. +func (ds *DataSource) isInIndexMergeHints(name string) bool { + if len(ds.indexMergeHints) == 0 { + return true + } + for _, hint := range ds.indexMergeHints { + if hint.indexHint == nil || len(hint.indexHint.IndexNames) == 0 { + return true + } + for _, hintName := range hint.indexHint.IndexNames { + if strings.EqualFold(strings.ToLower(name), strings.ToLower(hintName.String())) { + return true + } + } + } + return false +} + +// indexMergeHintsHasSpecifiedIdx returns true if there's IndexMerge hint, and it has specified index names. +func (ds *DataSource) indexMergeHintsHasSpecifiedIdx() bool { + for _, hint := range ds.indexMergeHints { + if hint.indexHint == nil || len(hint.indexHint.IndexNames) == 0 { + continue + } + if len(hint.indexHint.IndexNames) > 0 { + return true + } + } + return false +} + +// indexMergeHintsHasSpecifiedIdx return true if the input index name is specified in the IndexMerge hint. +func (ds *DataSource) isSpecifiedInIndexMergeHints(name string) bool { + for _, hint := range ds.indexMergeHints { + if hint.indexHint == nil || len(hint.indexHint.IndexNames) == 0 { + continue + } + for _, hintName := range hint.indexHint.IndexNames { + if strings.EqualFold(strings.ToLower(name), strings.ToLower(hintName.String())) { + return true + } + } + } + return false +} + +// accessPathsForConds generates all possible index paths for conditions. +func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, usedIndexCount int) []*util.AccessPath { + var results = make([]*util.AccessPath, 0, usedIndexCount) + for i := 0; i < usedIndexCount; i++ { + path := &util.AccessPath{} + if ds.possibleAccessPaths[i].IsTablePath() { + if !ds.isInIndexMergeHints("primary") { + continue + } + if ds.tableInfo.IsCommonHandle { + path.IsCommonHandlePath = true + path.Index = ds.possibleAccessPaths[i].Index + } else { + path.IsIntHandlePath = true + } + err := ds.deriveTablePathStats(path, conditions, true) + if err != nil { + logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err)) + continue + } + var unsignedIntHandle bool + if path.IsIntHandlePath && ds.tableInfo.PKIsHandle { + if pkColInfo := ds.tableInfo.GetPkColInfo(); pkColInfo != nil { + unsignedIntHandle = mysql.HasUnsignedFlag(pkColInfo.GetFlag()) + } + } + // If the path contains a full range, ignore it. + if ranger.HasFullRange(path.Ranges, unsignedIntHandle) { + continue + } + // If we have point or empty range, just remove other possible paths. + if len(path.Ranges) == 0 || path.OnlyPointRange(ds.SCtx()) { + if len(results) == 0 { + results = append(results, path) + } else { + results[0] = path + results = results[:1] + } + break + } + } else { + path.Index = ds.possibleAccessPaths[i].Index + if !ds.isInIndexMergeHints(path.Index.Name.L) { + continue + } + err := ds.fillIndexPath(path, conditions) + if err != nil { + logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err)) + continue + } + ds.deriveIndexPathStats(path, conditions, true) + // If the path contains a full range, ignore it. + if ranger.HasFullRange(path.Ranges, false) { + continue + } + // If we have empty range, or point range on unique index, just remove other possible paths. + if len(path.Ranges) == 0 || (path.OnlyPointRange(ds.SCtx()) && path.Index.Unique) { + if len(results) == 0 { + results = append(results, path) + } else { + results[0] = path + results = results[:1] + } + break + } + } + results = append(results, path) + } + return results +} + +// buildIndexMergePartialPath chooses the best index path from all possible paths. +// Now we choose the index with minimal estimate row count. +func (ds *DataSource) buildIndexMergePartialPath(indexAccessPaths []*util.AccessPath) (*util.AccessPath, error) { + if len(indexAccessPaths) == 1 { + return indexAccessPaths[0], nil + } + + minEstRowIndex := 0 + minEstRow := math.MaxFloat64 + for i := 0; i < len(indexAccessPaths); i++ { + rc := indexAccessPaths[i].CountAfterAccess + if len(indexAccessPaths[i].IndexFilters) > 0 { + rc = indexAccessPaths[i].CountAfterIndex + } + if rc < minEstRow { + minEstRowIndex = i + minEstRow = rc + } + } + return indexAccessPaths[minEstRowIndex], nil +} + +// buildIndexMergeOrPath generates one possible IndexMergePath. +func (ds *DataSource) buildIndexMergeOrPath(filters []expression.Expression, partialPaths []*util.AccessPath, current int) *util.AccessPath { + indexMergePath := &util.AccessPath{PartialIndexPaths: partialPaths} + indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[:current]...) + indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[current+1:]...) + var addCurrentFilter bool + for _, path := range partialPaths { + // If any partial path contains table filters, we need to keep the whole DNF filter in the Selection. + if len(path.TableFilters) > 0 { + addCurrentFilter = true + } + // If any partial path's index filter cannot be pushed to TiKV, we should keep the whole DNF filter. + if len(path.IndexFilters) != 0 && !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, path.IndexFilters, ds.ctx.GetClient(), kv.TiKV) { + addCurrentFilter = true + // Clear IndexFilter, the whole filter will be put in indexMergePath.TableFilters. + path.IndexFilters = nil + } + if len(path.TableFilters) != 0 && !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, path.TableFilters, ds.ctx.GetClient(), kv.TiKV) { + addCurrentFilter = true + path.TableFilters = nil + } + } + if addCurrentFilter { + indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[current]) + } + return indexMergePath +} + +// generateIndexMergeAndPaths generates IndexMerge paths for `AND` (a.k.a. intersection type IndexMerge) +func (ds *DataSource) generateIndexMergeAndPaths(normalPathCnt int) *util.AccessPath { + // For now, we only consider intersection type IndexMerge when the index names are specified in the hints. + if !ds.indexMergeHintsHasSpecifiedIdx() { + return nil + } + + // 1. Collect partial paths from normal paths. + var partialPaths []*util.AccessPath + for i := 0; i < normalPathCnt; i++ { + originalPath := ds.possibleAccessPaths[i] + // No need to consider table path as a partial path. + if ds.possibleAccessPaths[i].IsTablePath() { + continue + } + if !ds.isSpecifiedInIndexMergeHints(originalPath.Index.Name.L) { + continue + } + // If the path contains a full range, ignore it. + if ranger.HasFullRange(originalPath.Ranges, false) { + continue + } + newPath := originalPath.Clone() + partialPaths = append(partialPaths, newPath) + } + if len(partialPaths) < 2 { + return nil + } + + // 2. Collect filters that can't be covered by the partial paths and deduplicate them. + finalFilters := make([]expression.Expression, 0) + partialFilters := make([]expression.Expression, 0, len(partialPaths)) + hashCodeSet := make(map[string]struct{}) + for _, path := range partialPaths { + // Classify filters into coveredConds and notCoveredConds. + coveredConds := make([]expression.Expression, 0, len(path.AccessConds)+len(path.IndexFilters)) + notCoveredConds := make([]expression.Expression, 0, len(path.IndexFilters)+len(path.TableFilters)) + // AccessConds can be covered by partial path. + coveredConds = append(coveredConds, path.AccessConds...) + for i, cond := range path.IndexFilters { + // IndexFilters can be covered by partial path if it can be pushed down to TiKV. + if !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, []expression.Expression{cond}, ds.ctx.GetClient(), kv.TiKV) { + path.IndexFilters = append(path.IndexFilters[:i], path.IndexFilters[i+1:]...) + notCoveredConds = append(notCoveredConds, cond) + } else { + coveredConds = append(coveredConds, cond) + } + } + // TableFilters can't be covered by partial path. + notCoveredConds = append(notCoveredConds, path.TableFilters...) + + // Record covered filters in hashCodeSet. + // Note that we only record filters that not appear in the notCoveredConds. It's possible that a filter appear + // in both coveredConds and notCoveredConds (e.g. because of prefix index). So we need this extra check to + // avoid wrong deduplication. + notCoveredHashCodeSet := make(map[string]struct{}) + for _, cond := range notCoveredConds { + hashCode := string(cond.HashCode(ds.ctx.GetSessionVars().StmtCtx)) + notCoveredHashCodeSet[hashCode] = struct{}{} + } + for _, cond := range coveredConds { + hashCode := string(cond.HashCode(ds.ctx.GetSessionVars().StmtCtx)) + if _, ok := notCoveredHashCodeSet[hashCode]; !ok { + hashCodeSet[hashCode] = struct{}{} + } + } + + finalFilters = append(finalFilters, notCoveredConds...) + partialFilters = append(partialFilters, coveredConds...) + } + + // Remove covered filters from finalFilters and deduplicate finalFilters. + dedupedFinalFilters := make([]expression.Expression, 0, len(finalFilters)) + for _, cond := range finalFilters { + hashCode := string(cond.HashCode(ds.ctx.GetSessionVars().StmtCtx)) + if _, ok := hashCodeSet[hashCode]; !ok { + dedupedFinalFilters = append(dedupedFinalFilters, cond) + hashCodeSet[hashCode] = struct{}{} + } + } + + // 3. Estimate the row count after partial paths. + sel, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, partialFilters, nil) + if err != nil { + logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) + sel = SelectionFactor + } + + indexMergePath := &util.AccessPath{ + PartialIndexPaths: partialPaths, + IndexMergeIsIntersection: true, + TableFilters: dedupedFinalFilters, + CountAfterAccess: sel * ds.tableStats.RowCount, + } + return indexMergePath +} + +func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expression.Expression, needPrune bool) error { + regularPathCount := len(ds.possibleAccessPaths) + // 1. Generate possible IndexMerge paths for `OR`. + err := ds.generateIndexMergeOrPaths(indexMergeConds) + if err != nil { + return err + } + // 2. Generate possible IndexMerge paths for `AND`. + indexMergeAndPath := ds.generateIndexMergeAndPaths(regularPathCount) + if indexMergeAndPath != nil { + ds.possibleAccessPaths = append(ds.possibleAccessPaths, indexMergeAndPath) + } + // 3. Generate possible IndexMerge paths for MVIndex. + mvIndexMergePath, err := ds.generateIndexMergeJSONMVIndexPath(regularPathCount, indexMergeConds) + if err != nil { + return err + } + if mvIndexMergePath != nil { + ds.possibleAccessPaths = append(ds.possibleAccessPaths, mvIndexMergePath...) + } + + // 4. If needed, append a warning if no IndexMerge is generated. + + // If without hints, it means that `enableIndexMerge` is true + if len(ds.indexMergeHints) == 0 { + return nil + } + // With hints and without generated IndexMerge paths + if regularPathCount == len(ds.possibleAccessPaths) { + ds.indexMergeHints = nil + ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable")) + return nil + } + + // 4. If needPrune is true, prune non-IndexMerge paths. + + // Do not need to consider the regular paths in find_best_task(). + // So we can use index merge's row count as DataSource's row count. + if needPrune { + ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] + minRowCount := ds.possibleAccessPaths[0].CountAfterAccess + for _, path := range ds.possibleAccessPaths { + if minRowCount < path.CountAfterAccess { + minRowCount = path.CountAfterAccess + } + } + if ds.stats.RowCount > minRowCount { + ds.stats = ds.tableStats.ScaleByExpectCnt(minRowCount) + } + } + return nil +} + +// generateIndexMergeJSONMVIndexPath generates paths for (json_member_of / json_overlaps / json_contains) on multi-valued index. +/* + 1. select * from t where 1 member of (a) + IndexMerge(AND) + IndexRangeScan(a, [1,1]) + TableRowIdScan(t) + 2. select * from t where json_contains(a, '[1, 2, 3]') + IndexMerge(AND) + IndexRangeScan(a, [1,1]) + IndexRangeScan(a, [2,2]) + IndexRangeScan(a, [3,3]) + TableRowIdScan(t) + 3. select * from t where json_overlap(a, '[1, 2, 3]') + IndexMerge(OR) + IndexRangeScan(a, [1,1]) + IndexRangeScan(a, [2,2]) + IndexRangeScan(a, [3,3]) + TableRowIdScan(t) +*/ +func (ds *DataSource) generateIndexMergeJSONMVIndexPath(normalPathCnt int, filters []expression.Expression) (mvIndexPaths []*util.AccessPath, err error) { + for idx := 0; idx < normalPathCnt; idx++ { + if ds.possibleAccessPaths[idx].IsTablePath() || ds.possibleAccessPaths[idx].Index == nil || !ds.possibleAccessPaths[idx].Index.MVIndex { + continue // not a MVIndex path + } + if !ds.isSpecifiedInIndexMergeHints(ds.possibleAccessPaths[idx].Index.Name.L) { + continue // for safety, only consider using MVIndex when there is a `use_index_merge` hint now. + // TODO: remove this limitation + } + + // Step 1. Extract the underlying JSON column from MVIndex Info. + mvIndex := ds.possibleAccessPaths[idx].Index + if len(mvIndex.Columns) != 1 { + // only support single-column MVIndex now: idx((cast(a->'$.zip' as signed array))) + // TODO: support composite MVIndex idx((x, cast(a->'$.zip' as int array), z)) + continue + } + mvVirColOffset := mvIndex.Columns[0].Offset + mvVirColMeta := ds.table.Meta().Cols()[mvVirColOffset] + + var virCol *expression.Column + for _, ce := range ds.TblCols { + if ce.ID == mvVirColMeta.ID { + virCol = ce.Clone().(*expression.Column) + virCol.RetType = ce.GetType().ArrayType() // use the underlying type directly: JSON-ARRAY(INT) --> INT + break + } + } + // unwrap the outside cast: cast(json_extract(test.t.a, $.zip), JSON) --> json_extract(test.t.a, $.zip) + targetJSONPath, ok := unwrapJSONCast(virCol.VirtualExpr) + if !ok { + continue + } + + // Step 2. Iterate all filters and generate corresponding IndexMerge paths. + for filterIdx, filter := range filters { + // Step 2.1. Extract jsonPath and vals from json_member / json_overlaps / json_contains functions. + sf, ok := filter.(*expression.ScalarFunction) + if !ok { + continue + } + + var jsonPath expression.Expression + var vals []expression.Expression + switch sf.FuncName.L { + case ast.JSONMemberOf: // (1 member of a->'$.zip') + jsonPath = sf.GetArgs()[1] + v, ok := unwrapJSONCast(sf.GetArgs()[0]) // cast(1 as json) --> 1 + if !ok { + continue + } + vals = append(vals, v) + case ast.JSONOverlaps: // (json_overlaps(a->'$.zip', '[1, 2, 3]') + continue // TODO: support json_overlaps + case ast.JSONContains: // (json_contains(a->'$.zip', '[1, 2, 3]') + continue // TODO: support json_contains + default: + continue + } + + // Step 2.2. Check some limitations. + if jsonPath == nil || len(vals) == 0 { + continue + } + if !jsonPath.Equal(ds.ctx, targetJSONPath) { + continue // not on the same JSON col + } + // only support INT now + // TODO: support more types + if jsonPath.GetType().EvalType() == types.ETInt { + continue + } + allInt := true + // TODO: support using IndexLookUp to handle single-value cases. + for _, v := range vals { + if v.GetType().EvalType() != types.ETInt { + allInt = false + } + } + if !allInt { + continue + } + + // Step 2.3. Generate a IndexMerge Path of this filter on the current MVIndex. + var partialPaths []*util.AccessPath + for _, v := range vals { + partialPath := &util.AccessPath{Index: mvIndex} + partialPath.Ranges = ranger.FullRange() + // TODO: get the actual column length of this virtual column + partialPath.IdxCols, partialPath.IdxColLens = []*expression.Column{virCol}, []int{types.UnspecifiedLength} + partialPath.FullIdxCols, partialPath.FullIdxColLens = []*expression.Column{virCol}, []int{types.UnspecifiedLength} + + // calculate the path range with the condition `a->'$.zip' = 1`. + eq, err := expression.NewFunction(ds.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), virCol, v) + if err != nil { + return nil, err + } + if err = ds.detachCondAndBuildRangeForPath(partialPath, []expression.Expression{eq}); err != nil { + return nil, err + } + + partialPaths = append(partialPaths, partialPath) + } + indexMergePath := ds.buildIndexMergeOrPath(filters, partialPaths, filterIdx) + mvIndexPaths = append(mvIndexPaths, indexMergePath) + } + } + return +} + +func unwrapJSONCast(expr expression.Expression) (expression.Expression, bool) { + if expr == nil { + return nil, false + } + sf, ok := expr.(*expression.ScalarFunction) + if !ok { + return nil, false + } + if sf == nil || sf.FuncName.L != ast.Cast || sf.GetType().EvalType() != types.ETJson { + return nil, false + } + return sf.GetArgs()[0], true +} diff --git a/planner/core/indexmerge_path_test.go b/planner/core/indexmerge_path_test.go new file mode 100644 index 0000000000000..fcb0d27903c64 --- /dev/null +++ b/planner/core/indexmerge_path_test.go @@ -0,0 +1,53 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "testing" + + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testdata" +) + +func TestIndexMergeJSONMemberOf(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t( +a int, j0 json, j1 json, +index j0_0((cast(j0->'$.path0' as signed array))), +index j0_1((cast(j0->'$.path1' as signed array))), +index j1((cast(j1 as signed array))))`) + + var input []string + var output []struct { + SQL string + Plan []string + } + planSuiteData := core.GetIndexMergeSuiteData() + planSuiteData.LoadTestCases(t, &input, &output) + + for i, query := range input { + testdata.OnRecord(func() { + output[i].SQL = query + }) + result := tk.MustQuery("explain format = 'brief' " + query) + testdata.OnRecord(func() { + output[i].Plan = testdata.ConvertRowsToStrings(result.Rows()) + }) + result.Check(testkit.Rows(output[i].Plan...)) + } +} diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 743c6b87dc6d0..307971e2d77e1 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -7520,6 +7520,39 @@ func TestCastTimeAsDurationToTiFlash(t *testing.T) { tk.MustQuery("explain select cast(a as time), cast(b as time) from t;").CheckAt([]int{0, 2, 4}, rows) } +func TestUnhexPushDownToTiFlash(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b varchar(20));") + tk.MustExec("insert into t values(6162, '7469666C617368');") + tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1;") + tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") + + tbl, err := dom.InfoSchema().TableByName(model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + require.NoError(t, err) + // Set the hacked TiFlash replica for explain tests. + tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} + + rows := [][]interface{}{ + {"TableReader_9", "root", "data:ExchangeSender_8"}, + {"└─ExchangeSender_8", "mpp[tiflash]", "ExchangeType: PassThrough"}, + {" └─Projection_4", "mpp[tiflash]", "unhex(cast(test.t.a, var_string(20)))->Column#4"}, + {" └─TableFullScan_7", "mpp[tiflash]", "keep order:false, stats:pseudo"}, + } + tk.MustQuery("explain select unhex(a) from t;").CheckAt([]int{0, 2, 4}, rows) + + rows = [][]interface{}{ + {"TableReader_9", "root", "data:ExchangeSender_8"}, + {"└─ExchangeSender_8", "mpp[tiflash]", "ExchangeType: PassThrough"}, + {" └─Projection_4", "mpp[tiflash]", "unhex(test.t.b)->Column#4"}, + {" └─TableFullScan_7", "mpp[tiflash]", "keep order:false, stats:pseudo"}, + } + tk.MustQuery("explain select unhex(b) from t;").CheckAt([]int{0, 2, 4}, rows) +} + func TestPartitionTableFallBackStatic(t *testing.T) { store, _ := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) @@ -7792,8 +7825,6 @@ func TestPlanCacheForTableRangeFallback(t *testing.T) { tk.MustExec("set @a=10, @b=20, @c=30, @d=40, @e=50") tk.MustExec("execute stmt using @a, @b, @c, @d, @e") tk.MustQuery("show warnings").Sort().Check(testkit.Rows("Warning 1105 Memory capacity of 10 bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", - "Warning 1105 skip plan-cache: in-list is too long", - "Warning 1105 skip plan-cache: in-list is too long", "Warning 1105 skip plan-cache: in-list is too long")) tk.MustExec("execute stmt using @a, @b, @c, @d, @e") // The plan with range fallback is not cached. @@ -7842,7 +7873,6 @@ func TestPlanCacheForIndexRangeFallback(t *testing.T) { tk.MustExec("set @a='aa', @b='bb', @c='cc', @d='dd', @e='ee', @f='ff', @g='gg', @h='hh', @i='ii', @j='jj'") tk.MustExec("execute stmt2 using @a, @b, @c, @d, @e, @f, @g, @h, @i, @j") tk.MustQuery("show warnings").Sort().Check(testkit.Rows("Warning 1105 Memory capacity of 1330 bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", - "Warning 1105 skip plan-cache: in-list is too long", "Warning 1105 skip plan-cache: in-list is too long")) tk.MustExec("execute stmt2 using @a, @b, @c, @d, @e, @f, @g, @h, @i, @j") tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) @@ -8000,10 +8030,6 @@ func TestPlanCacheForIndexJoinRangeFallback(t *testing.T) { tk.MustExec("set @a='a', @b='b', @c='c', @d='d', @e='e'") tk.MustExec("execute stmt2 using @a, @b, @c, @d, @e") tk.MustQuery("show warnings").Sort().Check(testkit.Rows("Warning 1105 Memory capacity of 1275 bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", - "Warning 1105 skip plan-cache: in-list is too long", - "Warning 1105 skip plan-cache: in-list is too long", - "Warning 1105 skip plan-cache: in-list is too long", - "Warning 1105 skip plan-cache: in-list is too long", "Warning 1105 skip plan-cache: in-list is too long")) tk.MustExec("execute stmt2 using @a, @b, @c, @d, @e") tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 6bc8a677bc6de..592bb55f79619 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -4673,7 +4673,10 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as if i < len(columns) { if columns[i].IsGenerated() && !columns[i].GeneratedStored { var err error + originVal := b.allowBuildCastArray + b.allowBuildCastArray = true expr, _, err = b.rewrite(ctx, columns[i].GeneratedExpr, ds, nil, true) + b.allowBuildCastArray = originVal if err != nil { return nil, err } @@ -5736,7 +5739,10 @@ func (b *PlanBuilder) buildUpdateLists(ctx context.Context, tableList []*ast.Tab } } + o := b.allowBuildCastArray + b.allowBuildCastArray = true newExpr, np, err = b.rewriteWithPreprocess(ctx, assign.Expr, p, nil, nil, false, rewritePreprocess(assign)) + b.allowBuildCastArray = o if err != nil { return nil, nil, false, err } diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index 1064ea529b2d0..1a3e1ea5ab821 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -782,8 +782,7 @@ func setupFineGrainedShuffleInternal(plan PhysicalPlan, helper *fineGrainedShuff // Todo: make more careful check here. func checkPlanCacheable(sctx sessionctx.Context, plan PhysicalPlan) { if sctx.GetSessionVars().StmtCtx.UseCache && useTiFlash(plan) { - sctx.GetSessionVars().StmtCtx.SkipPlanCache = true - sctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("skip plan-cache: TiFlash plan is un-cacheable")) + sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: TiFlash plan is un-cacheable")) } } diff --git a/planner/core/plan_cache.go b/planner/core/plan_cache.go index ab4eb4e4912ab..8036f4067ce65 100644 --- a/planner/core/plan_cache.go +++ b/planner/core/plan_cache.go @@ -278,9 +278,9 @@ func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isNonPrepared // We only cache the tableDual plan when the number of parameters are zero. if containTableDual(p) && paramNum > 0 { - stmtCtx.SkipPlanCache = true + stmtCtx.SetSkipPlanCache(errors.New("skip plan-cache: get a TableDual plan")) } - if stmtAst.UseCache && !stmtCtx.SkipPlanCache && !ignorePlanCache { + if stmtCtx.UseCache && !ignorePlanCache { // rebuild key to exclude kv.TiFlash when stmt is not read only if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmtAst.Stmt, sessVars) { delete(sessVars.IsolationReadEngines, kv.TiFlash) @@ -640,7 +640,7 @@ func CheckPreparedPriv(sctx sessionctx.Context, stmt *PlanCacheStmt, is infosche // short paths for these executions, currently "point select" and "point update" func tryCachePointPlan(_ context.Context, sctx sessionctx.Context, stmt *PlanCacheStmt, _ infoschema.InfoSchema, p Plan) error { - if !sctx.GetSessionVars().StmtCtx.UseCache || sctx.GetSessionVars().StmtCtx.SkipPlanCache { + if !sctx.GetSessionVars().StmtCtx.UseCache { return nil } var ( diff --git a/planner/core/plan_cache_test.go b/planner/core/plan_cache_test.go index a480c583d0434..e25565a110e08 100644 --- a/planner/core/plan_cache_test.go +++ b/planner/core/plan_cache_test.go @@ -221,6 +221,88 @@ func TestIssue38533(t *testing.T) { tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) } +func TestInvalidRange(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int, key(a))") + tk.MustExec("prepare st from 'select * from t where a>? and a 0 && len(ds.possibleAccessPaths) > 1 - sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || len(ds.indexMergeHints) > 0) && !stmtCtx.NoIndexMergeHint - // We current do not consider `IndexMergePath`: - // 1. If there is an index path. - // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. - needConsiderIndexMerge := true - if len(ds.indexMergeHints) == 0 { - for i := 1; i < len(ds.possibleAccessPaths); i++ { - if len(ds.possibleAccessPaths[i].AccessConds) != 0 { - needConsiderIndexMerge = false - break - } - } - if needConsiderIndexMerge { - // PushDownExprs() will append extra warnings, which is annoying. So we reset warnings here. - warnings := stmtCtx.GetWarnings() - extraWarnings := stmtCtx.GetExtraWarnings() - _, remaining := expression.PushDownExprs(stmtCtx, indexMergeConds, ds.ctx.GetClient(), kv.UnSpecified) - stmtCtx.SetWarnings(warnings) - stmtCtx.SetExtraWarnings(extraWarnings) - if len(remaining) != 0 { - needConsiderIndexMerge = false - } - } + if err := ds.generateIndexMergePath(); err != nil { + return nil, err } - if isPossibleIdxMerge && sessionAndStmtPermission && needConsiderIndexMerge && ds.tableInfo.TempTableType != model.TempTableLocal { - err := ds.generateAndPruneIndexMergePath(indexMergeConds, ds.indexMergeHints != nil) - if err != nil { - return nil, err - } - } else if len(ds.indexMergeHints) > 0 { - ds.indexMergeHints = nil - var msg string - if !isPossibleIdxMerge { - msg = "No available filter or available index." - } else if !sessionAndStmtPermission { - msg = "Got no_index_merge hint or tidb_enable_index_merge is off." - } else if ds.tableInfo.TempTableType == model.TempTableLocal { - msg = "Cannot use IndexMerge on temporary table." - } - msg = fmt.Sprintf("IndexMerge is inapplicable or disabled. %s", msg) - stmtCtx.AppendWarning(errors.Errorf(msg)) - logutil.BgLogger().Debug(msg) - } return ds.stats, nil } -func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expression.Expression, needPrune bool) error { - regularPathCount := len(ds.possibleAccessPaths) - // 1. Generate possible IndexMerge paths for `OR`. - err := ds.generateIndexMergeOrPaths(indexMergeConds) - if err != nil { - return err - } - // 2. Generate possible IndexMerge paths for `AND`. - indexMergeAndPath := ds.generateIndexMergeAndPaths(regularPathCount) - if indexMergeAndPath != nil { - ds.possibleAccessPaths = append(ds.possibleAccessPaths, indexMergeAndPath) - } - - // 3. If needed, append a warning if no IndexMerge is generated. - - // If without hints, it means that `enableIndexMerge` is true - if len(ds.indexMergeHints) == 0 { - return nil - } - // With hints and without generated IndexMerge paths - if regularPathCount == len(ds.possibleAccessPaths) { - ds.indexMergeHints = nil - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable")) - return nil - } - - // 4. If needPrune is true, prune non-IndexMerge paths. - - // Do not need to consider the regular paths in find_best_task(). - // So we can use index merge's row count as DataSource's row count. - if needPrune { - ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] - minRowCount := ds.possibleAccessPaths[0].CountAfterAccess - for _, path := range ds.possibleAccessPaths { - if minRowCount < path.CountAfterAccess { - minRowCount = path.CountAfterAccess - } - } - if ds.stats.RowCount > minRowCount { - ds.stats = ds.tableStats.ScaleByExpectCnt(minRowCount) - } - } - return nil -} - // DeriveStats implements LogicalPlan DeriveStats interface. func (ts *LogicalTableScan) DeriveStats(_ []*property.StatsInfo, _ *expression.Schema, _ []*expression.Schema, _ [][]*expression.Column) (_ *property.StatsInfo, err error) { ts.Source.initStats(nil) @@ -566,337 +467,6 @@ func (is *LogicalIndexScan) DeriveStats(_ []*property.StatsInfo, selfSchema *exp return is.stats, nil } -// getIndexMergeOrPath generates all possible IndexMergeOrPaths. -func (ds *DataSource) generateIndexMergeOrPaths(filters []expression.Expression) error { - usedIndexCount := len(ds.possibleAccessPaths) - for i, cond := range filters { - sf, ok := cond.(*expression.ScalarFunction) - if !ok || sf.FuncName.L != ast.LogicOr { - continue - } - var partialPaths = make([]*util.AccessPath, 0, usedIndexCount) - dnfItems := expression.FlattenDNFConditions(sf) - for _, item := range dnfItems { - cnfItems := expression.SplitCNFItems(item) - itemPaths := ds.accessPathsForConds(cnfItems, usedIndexCount) - if len(itemPaths) == 0 { - partialPaths = nil - break - } - partialPath, err := ds.buildIndexMergePartialPath(itemPaths) - if err != nil { - return err - } - if partialPath == nil { - partialPaths = nil - break - } - partialPaths = append(partialPaths, partialPath) - } - // If all of the partialPaths use the same index, we will not use the indexMerge. - singlePath := true - for i := len(partialPaths) - 1; i >= 1; i-- { - if partialPaths[i].Index != partialPaths[i-1].Index { - singlePath = false - break - } - } - if singlePath { - continue - } - if len(partialPaths) > 1 { - possiblePath := ds.buildIndexMergeOrPath(filters, partialPaths, i) - if possiblePath == nil { - return nil - } - - accessConds := make([]expression.Expression, 0, len(partialPaths)) - for _, p := range partialPaths { - indexCondsForP := p.AccessConds[:] - indexCondsForP = append(indexCondsForP, p.IndexFilters...) - if len(indexCondsForP) > 0 { - accessConds = append(accessConds, expression.ComposeCNFCondition(ds.ctx, indexCondsForP...)) - } - } - accessDNF := expression.ComposeDNFCondition(ds.ctx, accessConds...) - sel, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, []expression.Expression{accessDNF}, nil) - if err != nil { - logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) - sel = SelectionFactor - } - possiblePath.CountAfterAccess = sel * ds.tableStats.RowCount - ds.possibleAccessPaths = append(ds.possibleAccessPaths, possiblePath) - } - } - return nil -} - -// isInIndexMergeHints returns true if the input index name is not excluded by the IndexMerge hints, which means either -// (1) there's no IndexMerge hint, (2) there's IndexMerge hint but no specified index names, or (3) the input index -// name is specified in the IndexMerge hints. -func (ds *DataSource) isInIndexMergeHints(name string) bool { - if len(ds.indexMergeHints) == 0 { - return true - } - for _, hint := range ds.indexMergeHints { - if hint.indexHint == nil || len(hint.indexHint.IndexNames) == 0 { - return true - } - for _, hintName := range hint.indexHint.IndexNames { - if strings.EqualFold(strings.ToLower(name), strings.ToLower(hintName.String())) { - return true - } - } - } - return false -} - -// indexMergeHintsHasSpecifiedIdx returns true if there's IndexMerge hint, and it has specified index names. -func (ds *DataSource) indexMergeHintsHasSpecifiedIdx() bool { - for _, hint := range ds.indexMergeHints { - if hint.indexHint == nil || len(hint.indexHint.IndexNames) == 0 { - continue - } - if len(hint.indexHint.IndexNames) > 0 { - return true - } - } - return false -} - -// indexMergeHintsHasSpecifiedIdx return true if the input index name is specified in the IndexMerge hint. -func (ds *DataSource) isSpecifiedInIndexMergeHints(name string) bool { - for _, hint := range ds.indexMergeHints { - if hint.indexHint == nil || len(hint.indexHint.IndexNames) == 0 { - continue - } - for _, hintName := range hint.indexHint.IndexNames { - if strings.EqualFold(strings.ToLower(name), strings.ToLower(hintName.String())) { - return true - } - } - } - return false -} - -// accessPathsForConds generates all possible index paths for conditions. -func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, usedIndexCount int) []*util.AccessPath { - var results = make([]*util.AccessPath, 0, usedIndexCount) - for i := 0; i < usedIndexCount; i++ { - path := &util.AccessPath{} - if ds.possibleAccessPaths[i].IsTablePath() { - if !ds.isInIndexMergeHints("primary") { - continue - } - if ds.tableInfo.IsCommonHandle { - path.IsCommonHandlePath = true - path.Index = ds.possibleAccessPaths[i].Index - } else { - path.IsIntHandlePath = true - } - err := ds.deriveTablePathStats(path, conditions, true) - if err != nil { - logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err)) - continue - } - var unsignedIntHandle bool - if path.IsIntHandlePath && ds.tableInfo.PKIsHandle { - if pkColInfo := ds.tableInfo.GetPkColInfo(); pkColInfo != nil { - unsignedIntHandle = mysql.HasUnsignedFlag(pkColInfo.GetFlag()) - } - } - // If the path contains a full range, ignore it. - if ranger.HasFullRange(path.Ranges, unsignedIntHandle) { - continue - } - // If we have point or empty range, just remove other possible paths. - if len(path.Ranges) == 0 || path.OnlyPointRange(ds.SCtx()) { - if len(results) == 0 { - results = append(results, path) - } else { - results[0] = path - results = results[:1] - } - break - } - } else { - path.Index = ds.possibleAccessPaths[i].Index - if !ds.isInIndexMergeHints(path.Index.Name.L) { - continue - } - err := ds.fillIndexPath(path, conditions) - if err != nil { - logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err)) - continue - } - ds.deriveIndexPathStats(path, conditions, true) - // If the path contains a full range, ignore it. - if ranger.HasFullRange(path.Ranges, false) { - continue - } - // If we have empty range, or point range on unique index, just remove other possible paths. - if len(path.Ranges) == 0 || (path.OnlyPointRange(ds.SCtx()) && path.Index.Unique) { - if len(results) == 0 { - results = append(results, path) - } else { - results[0] = path - results = results[:1] - } - break - } - } - results = append(results, path) - } - return results -} - -// buildIndexMergePartialPath chooses the best index path from all possible paths. -// Now we choose the index with minimal estimate row count. -func (ds *DataSource) buildIndexMergePartialPath(indexAccessPaths []*util.AccessPath) (*util.AccessPath, error) { - if len(indexAccessPaths) == 1 { - return indexAccessPaths[0], nil - } - - minEstRowIndex := 0 - minEstRow := math.MaxFloat64 - for i := 0; i < len(indexAccessPaths); i++ { - rc := indexAccessPaths[i].CountAfterAccess - if len(indexAccessPaths[i].IndexFilters) > 0 { - rc = indexAccessPaths[i].CountAfterIndex - } - if rc < minEstRow { - minEstRowIndex = i - minEstRow = rc - } - } - return indexAccessPaths[minEstRowIndex], nil -} - -// buildIndexMergeOrPath generates one possible IndexMergePath. -func (ds *DataSource) buildIndexMergeOrPath(filters []expression.Expression, partialPaths []*util.AccessPath, current int) *util.AccessPath { - indexMergePath := &util.AccessPath{PartialIndexPaths: partialPaths} - indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[:current]...) - indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[current+1:]...) - var addCurrentFilter bool - for _, path := range partialPaths { - // If any partial path contains table filters, we need to keep the whole DNF filter in the Selection. - if len(path.TableFilters) > 0 { - addCurrentFilter = true - } - // If any partial path's index filter cannot be pushed to TiKV, we should keep the whole DNF filter. - if len(path.IndexFilters) != 0 && !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, path.IndexFilters, ds.ctx.GetClient(), kv.TiKV) { - addCurrentFilter = true - // Clear IndexFilter, the whole filter will be put in indexMergePath.TableFilters. - path.IndexFilters = nil - } - if len(path.TableFilters) != 0 && !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, path.TableFilters, ds.ctx.GetClient(), kv.TiKV) { - addCurrentFilter = true - path.TableFilters = nil - } - } - if addCurrentFilter { - indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[current]) - } - return indexMergePath -} - -// generateIndexMergeAndPaths generates IndexMerge paths for `AND` (a.k.a. intersection type IndexMerge) -func (ds *DataSource) generateIndexMergeAndPaths(normalPathCnt int) *util.AccessPath { - // For now, we only consider intersection type IndexMerge when the index names are specified in the hints. - if !ds.indexMergeHintsHasSpecifiedIdx() { - return nil - } - - // 1. Collect partial paths from normal paths. - var partialPaths []*util.AccessPath - for i := 0; i < normalPathCnt; i++ { - originalPath := ds.possibleAccessPaths[i] - // No need to consider table path as a partial path. - if ds.possibleAccessPaths[i].IsTablePath() { - continue - } - if !ds.isSpecifiedInIndexMergeHints(originalPath.Index.Name.L) { - continue - } - // If the path contains a full range, ignore it. - if ranger.HasFullRange(originalPath.Ranges, false) { - continue - } - newPath := originalPath.Clone() - partialPaths = append(partialPaths, newPath) - } - if len(partialPaths) < 2 { - return nil - } - - // 2. Collect filters that can't be covered by the partial paths and deduplicate them. - finalFilters := make([]expression.Expression, 0) - partialFilters := make([]expression.Expression, 0, len(partialPaths)) - hashCodeSet := make(map[string]struct{}) - for _, path := range partialPaths { - // Classify filters into coveredConds and notCoveredConds. - coveredConds := make([]expression.Expression, 0, len(path.AccessConds)+len(path.IndexFilters)) - notCoveredConds := make([]expression.Expression, 0, len(path.IndexFilters)+len(path.TableFilters)) - // AccessConds can be covered by partial path. - coveredConds = append(coveredConds, path.AccessConds...) - for i, cond := range path.IndexFilters { - // IndexFilters can be covered by partial path if it can be pushed down to TiKV. - if !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, []expression.Expression{cond}, ds.ctx.GetClient(), kv.TiKV) { - path.IndexFilters = append(path.IndexFilters[:i], path.IndexFilters[i+1:]...) - notCoveredConds = append(notCoveredConds, cond) - } else { - coveredConds = append(coveredConds, cond) - } - } - // TableFilters can't be covered by partial path. - notCoveredConds = append(notCoveredConds, path.TableFilters...) - - // Record covered filters in hashCodeSet. - // Note that we only record filters that not appear in the notCoveredConds. It's possible that a filter appear - // in both coveredConds and notCoveredConds (e.g. because of prefix index). So we need this extra check to - // avoid wrong deduplication. - notCoveredHashCodeSet := make(map[string]struct{}) - for _, cond := range notCoveredConds { - hashCode := string(cond.HashCode(ds.ctx.GetSessionVars().StmtCtx)) - notCoveredHashCodeSet[hashCode] = struct{}{} - } - for _, cond := range coveredConds { - hashCode := string(cond.HashCode(ds.ctx.GetSessionVars().StmtCtx)) - if _, ok := notCoveredHashCodeSet[hashCode]; !ok { - hashCodeSet[hashCode] = struct{}{} - } - } - - finalFilters = append(finalFilters, notCoveredConds...) - partialFilters = append(partialFilters, coveredConds...) - } - - // Remove covered filters from finalFilters and deduplicate finalFilters. - dedupedFinalFilters := make([]expression.Expression, 0, len(finalFilters)) - for _, cond := range finalFilters { - hashCode := string(cond.HashCode(ds.ctx.GetSessionVars().StmtCtx)) - if _, ok := hashCodeSet[hashCode]; !ok { - dedupedFinalFilters = append(dedupedFinalFilters, cond) - hashCodeSet[hashCode] = struct{}{} - } - } - - // 3. Estimate the row count after partial paths. - sel, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, partialFilters, nil) - if err != nil { - logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) - sel = SelectionFactor - } - - indexMergePath := &util.AccessPath{ - PartialIndexPaths: partialPaths, - IndexMergeIsIntersection: true, - TableFilters: dedupedFinalFilters, - CountAfterAccess: sel * ds.tableStats.RowCount, - } - return indexMergePath -} - // DeriveStats implement LogicalPlan DeriveStats interface. func (p *LogicalSelection) DeriveStats(childStats []*property.StatsInfo, _ *expression.Schema, _ []*expression.Schema, _ [][]*expression.Column) (*property.StatsInfo, error) { if p.stats != nil { diff --git a/planner/core/task.go b/planner/core/task.go index 99952038688fe..c595880c9a37b 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1982,10 +1982,6 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { } attachPlan2Task(proj, newMpp) return newMpp - case NoMpp: - t = mpp.convertToRootTask(p.ctx) - attachPlan2Task(p, t) - return t default: return invalidTask } @@ -2072,6 +2068,19 @@ type mppTask struct { partTp property.MPPPartitionType hashCols []*property.MPPPartitionColumn + + // rootTaskConds record filters of TableScan that cannot be pushed down to TiFlash. + + // For logical plan like: HashAgg -> Selection -> TableScan, if filters in Selection cannot be pushed to TiFlash. + // Planner will generate physical plan like: PhysicalHashAgg -> PhysicalSelection -> TableReader -> PhysicalTableScan(cop tiflash) + // Because planner will make mppTask invalid directly then use copTask directly. + + // But in DisaggregatedTiFlash mode, cop and batchCop protocol is disabled, so we have to consider this situation for mppTask. + // When generating PhysicalTableScan, if prop.TaskTp is RootTaskType, mppTask will be converted to rootTask, + // and filters in rootTaskConds will be added in a Selection which will be executed in TiDB. + // So physical plan be like: PhysicalHashAgg -> PhysicalSelection -> TableReader -> ExchangeSender -> PhysicalTableScan(mpp tiflash) + rootTaskConds []expression.Expression + tblColHists *statistics.HistColl } func (t *mppTask) count() float64 { @@ -2151,6 +2160,32 @@ func (t *mppTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { rt := &rootTask{ p: p, } + + if len(t.rootTaskConds) > 0 { + // Some Filter cannot be pushed down to TiFlash, need to add Selection in rootTask, + // so this Selection will be executed in TiDB. + _, isTableScan := t.p.(*PhysicalTableScan) + _, isSelection := t.p.(*PhysicalSelection) + if isSelection { + _, isTableScan = t.p.Children()[0].(*PhysicalTableScan) + } + if !isTableScan { + // Need to make sure oriTaskPlan is TableScan, because rootTaskConds is part of TableScan.FilterCondition. + // It's only for TableScan. This is ensured by converting mppTask to rootTask just after TableScan is built, + // so no other operators are added into this mppTask. + logutil.BgLogger().Error("expect Selection or TableScan for mppTask.p", zap.String("mppTask.p", t.p.TP())) + return invalidTask + } + selectivity, _, err := t.tblColHists.Selectivity(ctx, t.rootTaskConds, nil) + if err != nil { + logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) + selectivity = SelectionFactor + } + sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, rt.p.statsInfo().Scale(selectivity), rt.p.SelectBlockOffset()) + sel.fromDataSource = true + sel.SetChildren(rt.p) + rt.p = sel + } return rt } diff --git a/planner/core/testdata/index_merge_suite_in.json b/planner/core/testdata/index_merge_suite_in.json index db7ebacdb29c7..2841de33bae0c 100644 --- a/planner/core/testdata/index_merge_suite_in.json +++ b/planner/core/testdata/index_merge_suite_in.json @@ -1,4 +1,14 @@ [ + { + "name": "TestIndexMergeJSONMemberOf", + "cases": [ + "select /*+ use_index_merge(t, j0_0) */ * from t where (1 member of (j0->'$.path0'))", + "select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.path1')) and a<10", + "select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.XXX')) and a<10", + "select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.path1')) and (2 member of (j1)) and a<10", + "select /*+ use_index_merge(t, j1) */ * from t where (1 member of (j0->'$.path1')) and (2 member of (j1)) and a<10" + ] + }, { "name": "TestIndexMergePathGeneration", "cases": [ diff --git a/planner/core/testdata/index_merge_suite_out.json b/planner/core/testdata/index_merge_suite_out.json index 3d67e5e372251..31427fbf4c7e0 100644 --- a/planner/core/testdata/index_merge_suite_out.json +++ b/planner/core/testdata/index_merge_suite_out.json @@ -1,4 +1,57 @@ [ + { + "Name": "TestIndexMergeJSONMemberOf", + "Cases": [ + { + "SQL": "select /*+ use_index_merge(t, j0_0) */ * from t where (1 member of (j0->'$.path0'))", + "Plan": [ + "Selection 0.00 root json_memberof(cast(1, json BINARY), json_extract(test.t.j0, \"$.path0\"))", + "└─IndexMerge 0.00 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:j0_0(cast(json_extract(`j0`, _utf8mb4'$.path0') as signed array)) range:[1,1], keep order:false, stats:pseudo", + " └─TableRowIDScan(Probe) 0.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.path1')) and a<10", + "Plan": [ + "Selection 0.00 root json_memberof(cast(1, json BINARY), json_extract(test.t.j0, \"$.path1\"))", + "└─IndexMerge 0.00 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:j0_1(cast(json_extract(`j0`, _utf8mb4'$.path1') as signed array)) range:[1,1], keep order:false, stats:pseudo", + " └─Selection(Probe) 0.00 cop[tikv] lt(test.t.a, 10)", + " └─TableRowIDScan 0.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.XXX')) and a<10", + "Plan": [ + "Selection 2658.67 root json_memberof(cast(1, json BINARY), json_extract(test.t.j0, \"$.XXX\"))", + "└─TableReader 3323.33 root data:Selection", + " └─Selection 3323.33 cop[tikv] lt(test.t.a, 10)", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index_merge(t, j0_1) */ * from t where (1 member of (j0->'$.path1')) and (2 member of (j1)) and a<10", + "Plan": [ + "Selection 0.00 root json_memberof(cast(1, json BINARY), json_extract(test.t.j0, \"$.path1\")), json_memberof(cast(2, json BINARY), test.t.j1)", + "└─IndexMerge 0.00 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:j0_1(cast(json_extract(`j0`, _utf8mb4'$.path1') as signed array)) range:[1,1], keep order:false, stats:pseudo", + " └─Selection(Probe) 0.00 cop[tikv] lt(test.t.a, 10)", + " └─TableRowIDScan 0.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select /*+ use_index_merge(t, j1) */ * from t where (1 member of (j0->'$.path1')) and (2 member of (j1)) and a<10", + "Plan": [ + "Selection 0.00 root json_memberof(cast(1, json BINARY), json_extract(test.t.j0, \"$.path1\")), json_memberof(cast(2, json BINARY), test.t.j1)", + "└─IndexMerge 0.00 root type: union", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:j1(cast(`j1` as signed array)) range:[2,2], keep order:false, stats:pseudo", + " └─Selection(Probe) 0.00 cop[tikv] lt(test.t.a, 10)", + " └─TableRowIDScan 0.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + } + ] + }, { "Name": "TestIndexMergePathGeneration", "Cases": [ diff --git a/roadmap.md b/roadmap.md index e311b85b932a0..a8b57db5440df 100644 --- a/roadmap.md +++ b/roadmap.md @@ -21,7 +21,7 @@ This roadmap brings you what's coming in the 1-year future, so you can see the n Scalability & Stability -
  • ✅ Optimize resource isolation in heavy read scenarios.
  • Optimize resource isolation in heavy (batch) write scenarios.
  • Provide resource management capability for background process.
  • Support resource management framework.
+
  • ✅ Optimize resource isolation in heavy read scenarios.
  • ✅ Optimize resource isolation in heavy (batch) write scenarios.
  • Provide resource management capability for background process.
  • Support resource management framework.
  • Provide a basic resource management and control framework to effectively control the resource squeeze of background tasks on front-end tasks (user operations), and improve cluster stability.
  • Refine resource management in the multi-service aggregation scenario.
@@ -34,7 +34,7 @@ This roadmap brings you what's coming in the 1-year future, so you can see the n SQL - Support the JSON function.
  • ✅ Expression index
  • Multi-value index
  • TiFlash supports JSON function pushdown
+ Support the JSON function.
  • ✅ Expression index
  • Multi-value index
  • ✅ TiFlash supports JSON function pushdown
In business scenarios that require flexible schema definitions, the application can use JSON to store information for ODS, transaction indicators, commodities, game characters, and props. @@ -42,7 +42,7 @@ This roadmap brings you what's coming in the 1-year future, so you can see the n In game rollback scenarios, the flashback can be used to achieve a fast rollback of the current cluster. This solves the common problems in the gaming industry such as version errors and bugs. - Support time to live (TTL). + ✅ Support time to live (TTL). This feature enables automatic data cleanup in limited data archiving scenarios. @@ -50,7 +50,7 @@ This roadmap brings you what's coming in the 1-year future, so you can see the n Supports foreign key constraints compatible with MySQL syntax, and provides DB-level referential integrity check capabilities. - Support non-transactional DML for insert and update operations. + ✅ Support non-transactional DML for insert and update operations. @@ -59,7 +59,7 @@ This roadmap brings you what's coming in the 1-year future, so you can see the n Hybrid Transactional and Analytical Processing (HTAP) - Support TiFlash result write-back. + ✅ Support TiFlash result write-back.

Support INSERT INTO SELECT.

  • Easily write analysis results in TiFlash back to TiDB.
  • Provide complete ACID transactions, more convenient and reliable than general ETL solutions.
  • Set a hard limit on the threshold of intermediate result size, and report an error if the threshold is exceeded.
  • Support fully distributed transactions, and remove or relax the limit on the intermediate result size.

These features combined enable a way to materialize intermediate results. The analysis results can be easily reused, which reduces unnecessary ad-hoc queries, improves the performance of BI and other applications (by pulling results directly) and reduces system load (by avoiding duplicated computation), thereby improving the overall data pipeline efficiency and reducing costs. It will make TiFlash an online service.

@@ -124,12 +124,12 @@ This roadmap brings you what's coming in the 1-year future, so you can see the n Increase TiCDC's scalability by spanning data changes for single table to multiple TiCDC nodes and reduce replication latency by removing sorting stage. - Support replicating data to object storage such as S3. + ✅ Support replicating data to object storage such as S3. TiCDC supports replicating data changes to common object storage services. Data migration - Continuous data verification during data migration. + ✅ Continuous data verification during data migration. DM supports online data verification during migration from MySQL compatible database to TiDB. @@ -148,22 +148,22 @@ This roadmap brings you what's coming in the 1-year future, so you can see the n Password complexity check - A strong password is required. + ✅ A strong password is required. To improve security, empty passwords and weak passwords are not allowed.
The required password length is not less than 8. The password must contain an uppercase letter, a lowercase letter, a number, and a character. Password expiration - TiDB provides password expiration management and requires users to change passwords regularly. + ✅ TiDB provides password expiration management and requires users to change passwords regularly. Reduce the security risk of password cracking or leakage caused by using the same password for a long time. Password reuse policy - TiDB provides a password reuse policy. + ✅ TiDB provides a password reuse policy. Restrict password reuse and improve password security. Password anti-brute force cracking - Accounts will be locked in case of consecutive incorrect passwords. + ✅ Accounts will be locked in case of consecutive incorrect passwords. Lock the account under continuous wrong passwords to prevent the password from being cracked by brute force. diff --git a/session/BUILD.bazel b/session/BUILD.bazel index be3c8699ee6c8..dc3106abdfe63 100644 --- a/session/BUILD.bazel +++ b/session/BUILD.bazel @@ -65,6 +65,7 @@ go_library( "//table/temptable", "//tablecodec", "//telemetry", + "//ttl/ttlworker", "//types", "//types/parser_driver", "//util", diff --git a/session/bootstrap.go b/session/bootstrap.go index 759855cc5c880..236a31c6b3248 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -23,6 +23,7 @@ import ( "encoding/hex" "flag" "fmt" + "io/ioutil" osuser "os/user" "runtime/debug" "strconv" @@ -526,6 +527,7 @@ func bootstrap(s Session) { if dom.DDL().OwnerManager().IsOwner() { doDDLWorks(s) doDMLWorks(s) + runBootstrapSQLFile = true logutil.BgLogger().Info("bootstrap successful", zap.Duration("take time", time.Since(startTime))) return @@ -735,6 +737,8 @@ const ( version108 = 108 // version109 add column source to mysql.stats_meta_history version109 = 109 + // version110 sets tidb_enable_gc_aware_memory_track to off when a cluster upgrades from some version lower than v6.5.0. + version110 = 110 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. @@ -744,6 +748,9 @@ var currentBootstrapVersion int64 = version109 // DDL owner key's expired time is ManagerSessionTTL seconds, we should wait the time and give more time to have a chance to finish it. var internalSQLTimeout = owner.ManagerSessionTTL + 15 +// whether to run the sql file in bootstrap. +var runBootstrapSQLFile = false + var ( bootstrapVersion = []func(Session, int64){ upgradeToVer2, @@ -853,6 +860,7 @@ var ( upgradeToVer107, upgradeToVer108, upgradeToVer109, + upgradeToVer110, } ) @@ -953,11 +961,6 @@ func upgrade(s Session) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) _, err = s.ExecuteInternal(ctx, "COMMIT") - if err == nil && ver <= version92 { - logutil.BgLogger().Info("start migrate DDLs") - err = domain.GetDomain(s).DDL().MoveJobFromQueue2Table(true) - } - if err != nil { sleepTime := 1 * time.Second logutil.BgLogger().Info("update bootstrap ver failed", @@ -2201,6 +2204,15 @@ func upgradeToVer109(s Session, ver int64) { doReentrantDDL(s, "ALTER TABLE mysql.stats_meta_history ADD COLUMN IF NOT EXISTS `source` varchar(40) NOT NULL after `version`;") } +// For users that upgrade TiDB from a 6.2-6.4 version, we want to disable tidb gc_aware_memory_track by default. +func upgradeToVer110(s Session, ver int64) { + if ver >= version110 { + return + } + mustExecute(s, "REPLACE HIGH_PRIORITY INTO %n.%n VALUES (%?, %?);", + mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBEnableGCAwareMemoryTrack, 0) +} + func writeOOMAction(s Session) { comment := "oom-action is `log` by default in v3.0.x, `cancel` by default in v4.0.11+" mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, %?) ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, @@ -2309,6 +2321,38 @@ func doDDLWorks(s Session) { mustExecute(s, CreateTTLTableStatus) } +// doBootstrapSQLFile executes SQL commands in a file as the last stage of bootstrap. +// It is useful for setting the initial value of GLOBAL variables. +func doBootstrapSQLFile(s Session) { + sqlFile := config.GetGlobalConfig().InitializeSQLFile + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + if sqlFile == "" { + return + } + logutil.BgLogger().Info("executing -initialize-sql-file", zap.String("file", sqlFile)) + b, err := ioutil.ReadFile(sqlFile) //nolint:gosec + if err != nil { + logutil.BgLogger().Fatal("unable to read InitializeSQLFile", zap.Error(err)) + } + stmts, err := s.Parse(ctx, string(b)) + if err != nil { + logutil.BgLogger().Fatal("unable to parse InitializeSQLFile", zap.Error(err)) + } + for _, stmt := range stmts { + rs, err := s.ExecuteStmt(ctx, stmt) + if err != nil { + logutil.BgLogger().Warn("InitializeSQLFile error", zap.Error(err)) + } + if rs != nil { + // I don't believe we need to drain the result-set in bootstrap mode + // but if required we can do this here in future. + if err := rs.Close(); err != nil { + logutil.BgLogger().Fatal("unable to close result", zap.Error(err)) + } + } + } +} + // inTestSuite checks if we are bootstrapping in the context of tests. // There are some historical differences in behavior between tests and non-tests. func inTestSuite() bool { diff --git a/session/bootstrap_test.go b/session/bootstrap_test.go index 4ebb7001461a6..a010daf32b14c 100644 --- a/session/bootstrap_test.go +++ b/session/bootstrap_test.go @@ -17,12 +17,14 @@ package session import ( "context" "fmt" + "os" "strconv" "strings" "testing" "time" "github.com/pingcap/tidb/bindinfo" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/auth" @@ -1043,6 +1045,60 @@ func TestUpgradeToVer85(t *testing.T) { mustExec(t, se, "delete from mysql.bind_info where default_db = 'test'") } +func TestInitializeSQLFile(t *testing.T) { + // We create an initialize-sql-file and then bootstrap the server with it. + // The observed behavior should be that tidb_enable_noop_variables is now + // disabled, and the feature works as expected. + initializeSQLFile, err := os.CreateTemp("", "init.sql") + require.NoError(t, err) + defer func() { + path := initializeSQLFile.Name() + err = initializeSQLFile.Close() + require.NoError(t, err) + err = os.Remove(path) + require.NoError(t, err) + }() + // Implicitly test multi-line init files + _, err = initializeSQLFile.WriteString( + "CREATE DATABASE initsqlfiletest;\n" + + "SET GLOBAL tidb_enable_noop_variables = OFF;\n") + require.NoError(t, err) + + // Create a mock store + // Set the config parameter for initialize sql file + store, err := mockstore.NewMockStore() + require.NoError(t, err) + config.GetGlobalConfig().InitializeSQLFile = initializeSQLFile.Name() + defer func() { + require.NoError(t, store.Close()) + config.GetGlobalConfig().InitializeSQLFile = "" + }() + + // Bootstrap with the InitializeSQLFile config option + dom, err := BootstrapSession(store) + require.NoError(t, err) + defer dom.Close() + se := createSessionAndSetID(t, store) + ctx := context.Background() + r, err := exec(se, `SHOW VARIABLES LIKE 'query_cache_type'`) + require.NoError(t, err) + req := r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 0, req.NumRows()) // not shown in noopvariables mode + require.NoError(t, r.Close()) + + r, err = exec(se, `SHOW VARIABLES LIKE 'tidb_enable_noop_variables'`) + require.NoError(t, err) + req = r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, req.NumRows()) + row := req.GetRow(0) + require.Equal(t, []byte("OFF"), row.GetBytes(1)) + require.NoError(t, r.Close()) +} + func TestTiDBEnablePagingVariable(t *testing.T) { store, dom := createStoreAndBootstrap(t) se := createSessionAndSetID(t, store) @@ -1273,3 +1329,56 @@ func TestTiDBCostModelUpgradeFrom610To650(t *testing.T) { }() } } + +func TestTiDBGCAwareUpgradeFrom630To650(t *testing.T) { + ctx := context.Background() + store, _ := createStoreAndBootstrap(t) + defer func() { require.NoError(t, store.Close()) }() + + // upgrade from 6.3 to 6.5+. + ver63 := version93 + seV63 := createSessionAndSetID(t, store) + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + err = m.FinishBootstrap(int64(ver63)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + mustExec(t, seV63, fmt.Sprintf("update mysql.tidb set variable_value=%d where variable_name='tidb_server_version'", ver63)) + mustExec(t, seV63, fmt.Sprintf("update mysql.GLOBAL_VARIABLES set variable_value='%s' where variable_name='%s'", "1", variable.TiDBEnableGCAwareMemoryTrack)) + mustExec(t, seV63, "commit") + unsetStoreBootstrapped(store.UUID()) + ver, err := getBootstrapVersion(seV63) + require.NoError(t, err) + require.Equal(t, int64(ver63), ver) + + // We are now in 6.3, tidb_enable_gc_aware_memory_track is ON. + res := mustExecToRecodeSet(t, seV63, fmt.Sprintf("select * from mysql.GLOBAL_VARIABLES where variable_name='%s'", variable.TiDBEnableGCAwareMemoryTrack)) + chk := res.NewChunk(nil) + err = res.Next(ctx, chk) + require.NoError(t, err) + require.Equal(t, 1, chk.NumRows()) + row := chk.GetRow(0) + require.Equal(t, 2, row.Len()) + require.Equal(t, "1", row.GetString(1)) + + // Upgrade to 6.5. + domCurVer, err := BootstrapSession(store) + require.NoError(t, err) + defer domCurVer.Close() + seCurVer := createSessionAndSetID(t, store) + ver, err = getBootstrapVersion(seCurVer) + require.NoError(t, err) + require.Equal(t, currentBootstrapVersion, ver) + + // We are now in 6.5. + res = mustExecToRecodeSet(t, seCurVer, fmt.Sprintf("select * from mysql.GLOBAL_VARIABLES where variable_name='%s'", variable.TiDBEnableGCAwareMemoryTrack)) + chk = res.NewChunk(nil) + err = res.Next(ctx, chk) + require.NoError(t, err) + require.Equal(t, 1, chk.NumRows()) + row = chk.GetRow(0) + require.Equal(t, 2, row.Len()) + require.Equal(t, "0", row.GetString(1)) +} diff --git a/session/session.go b/session/session.go index 8c449dc14faf2..2d95bf3bc73d6 100644 --- a/session/session.go +++ b/session/session.go @@ -86,6 +86,7 @@ import ( "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/telemetry" + "github.com/pingcap/tidb/ttl/ttlworker" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" @@ -2180,10 +2181,6 @@ func (s *session) ExecuteStmt(ctx context.Context, stmtNode ast.StmtNode) (sqlex // Transform abstract syntax tree to a physical plan(stored in executor.ExecStmt). compiler := executor.Compiler{Ctx: s} stmt, err := compiler.Compile(ctx, stmtNode) - if err == nil { - err = sessiontxn.OptimizeWithPlanAndThenWarmUp(s, stmt.Plan) - } - if err != nil { s.rollbackOnError(ctx) @@ -3203,15 +3200,14 @@ func InitMDLTable(store kv.Storage) error { // InitMDLVariableForBootstrap initializes the metadata lock variable. func InitMDLVariableForBootstrap(store kv.Storage) error { - initValue := variable.DefTiDBEnableConcurrentDDL err := kv.RunInNewTxn(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), store, true, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) - return t.SetMetadataLock(initValue) + return t.SetMetadataLock(true) }) if err != nil { return err } - variable.EnableMDL.Store(initValue) + variable.EnableMDL.Store(true) return nil } @@ -3297,7 +3293,7 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { analyzeConcurrencyQuota := int(config.GetGlobalConfig().Performance.AnalyzePartitionConcurrencyQuota) concurrency := int(config.GetGlobalConfig().Performance.StatsLoadConcurrency) - ses, err := createSessions(store, 9) + ses, err := createSessions(store, 10) if err != nil { return nil, err } @@ -3373,10 +3369,14 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { if dom.GetEtcdClient() != nil { // We only want telemetry data in production-like clusters. When TiDB is deployed over other engines, // for example, unistore engine (used for local tests), we just skip it. Its etcd client is nil. - go func() { - dom.TelemetryReportLoop(ses[5]) - dom.TelemetryRotateSubWindowLoop(ses[5]) - }() + if config.GetGlobalConfig().EnableTelemetry { + // There is no way to turn telemetry on with global variable `tidb_enable_telemetry` + // when it is disabled in config. See IsTelemetryEnabled function in telemetry/telemetry.go + go func() { + dom.TelemetryReportLoop(ses[5]) + dom.TelemetryRotateSubWindowLoop(ses[5]) + }() + } } planReplayerWorkerCnt := config.GetGlobalConfig().Performance.PlanReplayerDumpWorkerConcurrency @@ -3397,7 +3397,13 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { // setup historical stats worker dom.SetupHistoricalStatsWorker(ses[8]) dom.StartHistoricalStatsWorker() - + if runBootstrapSQLFile { + pm := &privileges.UserPrivileges{ + Handle: dom.PrivilegeHandle(), + } + privilege.BindPrivilegeManager(ses[9], pm) + doBootstrapSQLFile(ses[9]) + } // A sub context for update table stats, and other contexts for concurrent stats loading. cnt := 1 + concurrency syncStatsCtxs, err := createSessions(store, cnt) @@ -3412,6 +3418,22 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { return nil, err } + // start TTL job manager after setup stats collector + // because TTL could modify a lot of columns, and need to trigger auto analyze + ttlworker.AttachStatsCollector = func(s sqlexec.SQLExecutor) sqlexec.SQLExecutor { + if s, ok := s.(*session); ok { + return attachStatsCollector(s, dom) + } + return s + } + ttlworker.DetachStatsCollector = func(s sqlexec.SQLExecutor) sqlexec.SQLExecutor { + if s, ok := s.(*session); ok { + return detachStatsCollector(s) + } + return s + } + dom.StartTTLJobManager() + analyzeCtxs, err := createSessions(store, analyzeConcurrencyQuota) if err != nil { return nil, err @@ -3517,6 +3539,26 @@ func createSessionWithOpt(store kv.Storage, opt *Opt) (*session, error) { return s, nil } +// attachStatsCollector attaches the stats collector in the dom for the session +func attachStatsCollector(s *session, dom *domain.Domain) *session { + if dom.StatsHandle() != nil && dom.StatsUpdating() { + s.statsCollector = dom.StatsHandle().NewSessionStatsCollector() + if GetIndexUsageSyncLease() > 0 { + s.idxUsageCollector = dom.StatsHandle().NewSessionIndexUsageCollector() + } + } + + return s +} + +// detachStatsCollector removes the stats collector in the session +func detachStatsCollector(s *session) *session { + s.statsCollector = nil + s.idxUsageCollector = nil + + return s +} + // CreateSessionWithDomain creates a new Session and binds it with a Domain. // We need this because when we start DDL in Domain, the DDL need a session // to change some system tables. But at that time, we have been already in @@ -4155,23 +4197,26 @@ func (s *session) setRequestSource(ctx context.Context, stmtLabel string, stmtNo } else { s.sessionVars.RequestSourceType = stmtLabel } - } else { - if source := ctx.Value(kv.RequestSourceKey); source != nil { - s.sessionVars.RequestSourceType = source.(kv.RequestSource).RequestSourceType - } else { - // panic in test mode in case there are requests without source in the future. - // log warnings in production mode. - if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { - panic("unexpected no source type context, if you see this error, " + - "the `RequestSourceTypeKey` is missing in your context") - } else { - logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, "+ - "the `RequestSourceTypeKey` is missing in the context", - zap.Bool("internal", s.isInternal()), - zap.String("sql", stmtNode.Text())) - } + return + } + if source := ctx.Value(kv.RequestSourceKey); source != nil { + requestSource := source.(kv.RequestSource) + if requestSource.RequestSourceType != "" { + s.sessionVars.RequestSourceType = requestSource.RequestSourceType + return } } + // panic in test mode in case there are requests without source in the future. + // log warnings in production mode. + if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { + panic("unexpected no source type context, if you see this error, " + + "the `RequestSourceTypeKey` is missing in your context") + } else { + logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, "+ + "the `RequestSourceTypeKey` is missing in the context", + zap.Bool("internal", s.isInternal()), + zap.String("sql", stmtNode.Text())) + } } // RemoveLockDDLJobs removes the DDL jobs which doesn't get the metadata lock from job2ver. diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 9a49c24851c08..01ead10e580fc 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -172,7 +172,6 @@ type StatementContext struct { InNullRejectCheck bool AllowInvalidDate bool IgnoreNoPartition bool - SkipPlanCache bool IgnoreExplainIDSuffix bool SkipUTF8Check bool SkipASCIICheck bool @@ -380,6 +379,13 @@ type StatementContext struct { HasFKCascades bool } + // MPPQueryInfo stores some id and timestamp of current MPP query statement. + MPPQueryInfo struct { + QueryID atomic2.Uint64 + QueryTS atomic2.Uint64 + AllocatedMPPTaskID atomic2.Int64 + } + // TableStats stores the visited runtime table stats by table id during query TableStats map[int64]interface{} // useChunkAlloc indicates whether statement use chunk alloc @@ -602,6 +608,15 @@ func (sc *StatementContext) SetPlanHint(hint string) { sc.planHint = hint } +// SetSkipPlanCache sets to skip the plan cache and records the reason. +func (sc *StatementContext) SetSkipPlanCache(reason error) { + if !sc.UseCache { + return // avoid unnecessary warnings + } + sc.UseCache = false + sc.AppendWarning(reason) +} + // TableEntry presents table in db. type TableEntry struct { DB string @@ -1147,9 +1162,8 @@ func (sc *StatementContext) GetLockWaitStartTime() time.Time { func (sc *StatementContext) RecordRangeFallback(rangeMaxSize int64) { // If range fallback happens, it means ether the query is unreasonable(for example, several long IN lists) or tidb_opt_range_max_size is too small // and the generated plan is probably suboptimal. In that case we don't put it into plan cache. - sc.SkipPlanCache = true if sc.UseCache { - sc.AppendWarning(errors.Errorf("skip plan-cache: in-list is too long")) + sc.SetSkipPlanCache(errors.Errorf("skip plan-cache: in-list is too long")) } if !sc.RangeFallback { sc.AppendWarning(errors.Errorf("Memory capacity of %v bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", rangeMaxSize)) diff --git a/sessionctx/variable/BUILD.bazel b/sessionctx/variable/BUILD.bazel index e964aaa1fcdcb..b178ccf0a95da 100644 --- a/sessionctx/variable/BUILD.bazel +++ b/sessionctx/variable/BUILD.bazel @@ -32,7 +32,6 @@ go_library( "//parser/types", "//sessionctx/sessionstates", "//sessionctx/stmtctx", - "//sessionctx/variable/featuretag/concurrencyddl", "//sessionctx/variable/featuretag/distributereorg", "//tidb-binlog/pump_client", "//types", diff --git a/sessionctx/variable/featuretag/concurrencyddl/BUILD.bazel b/sessionctx/variable/featuretag/concurrencyddl/BUILD.bazel deleted file mode 100644 index 44c1cede3c2b7..0000000000000 --- a/sessionctx/variable/featuretag/concurrencyddl/BUILD.bazel +++ /dev/null @@ -1,11 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "concurrencyddl", - srcs = [ - "default.go", - "non_default.go", - ], - importpath = "github.com/pingcap/tidb/sessionctx/variable/featuretag/concurrencyddl", - visibility = ["//visibility:public"], -) diff --git a/sessionctx/variable/featuretag/concurrencyddl/default.go b/sessionctx/variable/featuretag/concurrencyddl/default.go deleted file mode 100644 index 8aca4924268f0..0000000000000 --- a/sessionctx/variable/featuretag/concurrencyddl/default.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !featuretag - -package concurrencyddl - -// TiDBEnableConcurrentDDL is a feature tag -const TiDBEnableConcurrentDDL bool = true diff --git a/sessionctx/variable/featuretag/concurrencyddl/non_default.go b/sessionctx/variable/featuretag/concurrencyddl/non_default.go deleted file mode 100644 index 72218abe958a3..0000000000000 --- a/sessionctx/variable/featuretag/concurrencyddl/non_default.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build featuretag - -package concurrencyddl - -// TiDBEnableConcurrentDDL is a feature tag -const TiDBEnableConcurrentDDL bool = false diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index d3d30d9c7bcc5..977b34b1578a9 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -671,13 +671,6 @@ type SessionVars struct { value string } - // mppTaskIDAllocator is used to allocate mpp task id for a session. - mppTaskIDAllocator struct { - mu sync.Mutex - lastTS uint64 - taskID int64 - } - // Status stands for the session status. e.g. in transaction or not, auto commit is on or off, and so on. Status uint16 @@ -1451,20 +1444,6 @@ func (s *SessionVars) InitStatementContext() *stmtctx.StatementContext { return sc } -// AllocMPPTaskID allocates task id for mpp tasks. It will reset the task id if the query's -// startTs is different. -func (s *SessionVars) AllocMPPTaskID(startTS uint64) int64 { - s.mppTaskIDAllocator.mu.Lock() - defer s.mppTaskIDAllocator.mu.Unlock() - if s.mppTaskIDAllocator.lastTS == startTS { - s.mppTaskIDAllocator.taskID++ - return s.mppTaskIDAllocator.taskID - } - s.mppTaskIDAllocator.lastTS = startTS - s.mppTaskIDAllocator.taskID = 1 - return 1 -} - // IsMPPAllowed returns whether mpp execution is allowed. func (s *SessionVars) IsMPPAllowed() bool { return s.allowMPPExecution diff --git a/sessionctx/variable/session_test.go b/sessionctx/variable/session_test.go index b624005b7b512..5df5e187088d0 100644 --- a/sessionctx/variable/session_test.go +++ b/sessionctx/variable/session_test.go @@ -130,16 +130,9 @@ func TestSession(t *testing.T) { func TestAllocMPPID(t *testing.T) { ctx := mock.NewContext() - - seVar := ctx.GetSessionVars() - require.NotNil(t, seVar) - - require.Equal(t, int64(1), seVar.AllocMPPTaskID(1)) - require.Equal(t, int64(2), seVar.AllocMPPTaskID(1)) - require.Equal(t, int64(3), seVar.AllocMPPTaskID(1)) - require.Equal(t, int64(1), seVar.AllocMPPTaskID(2)) - require.Equal(t, int64(2), seVar.AllocMPPTaskID(2)) - require.Equal(t, int64(3), seVar.AllocMPPTaskID(2)) + require.Equal(t, int64(1), plannercore.AllocMPPTaskID(ctx)) + require.Equal(t, int64(2), plannercore.AllocMPPTaskID(ctx)) + require.Equal(t, int64(3), plannercore.AllocMPPTaskID(ctx)) } func TestSlowLogFormat(t *testing.T) { diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 777b6996b49be..d56bcab9fb3b2 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1101,18 +1101,6 @@ var defaultSysVars = []*SysVar{ return err }, }, - {Scope: ScopeGlobal, Name: TiDBEnableConcurrentDDL, Value: BoolToOnOff(DefTiDBEnableConcurrentDDL), Type: TypeBool, SetGlobal: func(_ context.Context, s *SessionVars, val string) error { - if EnableConcurrentDDL.Load() != TiDBOptOn(val) { - err := SwitchConcurrentDDL(TiDBOptOn(val)) - if err != nil { - return err - } - EnableConcurrentDDL.Store(TiDBOptOn(val)) - } - return nil - }, GetGlobal: func(_ context.Context, s *SessionVars) (string, error) { - return BoolToOnOff(EnableConcurrentDDL.Load()), nil - }}, {Scope: ScopeGlobal, Name: TiDBEnableMDL, Value: BoolToOnOff(DefTiDBEnableMDL), Type: TypeBool, SetGlobal: func(_ context.Context, vars *SessionVars, val string) error { if EnableMDL.Load() != TiDBOptOn(val) { err := SwitchMDL(TiDBOptOn(val)) diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index c8da3ed5c10e6..5ae1adfac403a 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/sessionctx/variable/featuretag/concurrencyddl" "github.com/pingcap/tidb/sessionctx/variable/featuretag/distributereorg" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/paging" @@ -845,8 +844,6 @@ const ( // TiDBMaxAutoAnalyzeTime is the max time that auto analyze can run. If auto analyze runs longer than the value, it // will be killed. 0 indicates that there is no time limit. TiDBMaxAutoAnalyzeTime = "tidb_max_auto_analyze_time" - // TiDBEnableConcurrentDDL indicates whether to enable the new DDL framework. - TiDBEnableConcurrentDDL = "tidb_enable_concurrent_ddl" // TiDBDDLEnableDistributeReorg indicates whether to enable the new Reorg framework. TiDBDDLEnableDistributeReorg = "tidb_ddl_distribute_reorg" // TiDBGenerateBinaryPlan indicates whether binary plan should be generated in slow log and statements summary. @@ -1097,7 +1094,6 @@ const ( DefTiDBPrepPlanCacheSize = 100 DefTiDBEnablePrepPlanCacheMemoryMonitor = true DefTiDBPrepPlanCacheMemoryGuardRatio = 0.1 - DefTiDBEnableConcurrentDDL = concurrencyddl.TiDBEnableConcurrentDDL DefTiDBDDLEnableDistributeReorg = distributereorg.TiDBEnableDistributeReorg DefTiDBSimplifiedMetrics = false DefTiDBEnablePaging = true @@ -1202,7 +1198,6 @@ var ( MaxAutoAnalyzeTime = atomic.NewInt64(DefTiDBMaxAutoAnalyzeTime) // variables for plan cache PreparedPlanCacheMemoryGuardRatio = atomic.NewFloat64(DefTiDBPrepPlanCacheMemoryGuardRatio) - EnableConcurrentDDL = atomic.NewBool(DefTiDBEnableConcurrentDDL) DDLEnableDistributeReorg = atomic.NewBool(DefTiDBDDLEnableDistributeReorg) DDLForce2Queue = atomic.NewBool(false) EnableNoopVariables = atomic.NewBool(DefTiDBEnableNoopVariables) @@ -1249,8 +1244,6 @@ var ( SetStatsCacheCapacity atomic.Value // SetPDClientDynamicOption is the func registered by domain SetPDClientDynamicOption atomic.Pointer[func(string, string)] - // SwitchConcurrentDDL is the func registered by DDL to switch concurrent DDL. - SwitchConcurrentDDL func(bool) error = nil // SwitchMDL is the func registered by DDL to switch MDL. SwitchMDL func(bool2 bool) error = nil // EnableDDL is the func registered by ddl to enable running ddl in this instance. diff --git a/sessiontxn/interface.go b/sessiontxn/interface.go index 85d9217a90c0f..d91f1c291b588 100644 --- a/sessiontxn/interface.go +++ b/sessiontxn/interface.go @@ -160,8 +160,10 @@ type TxnManager interface { // GetReadReplicaScope returns the read replica scope GetReadReplicaScope() string // GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) + // Calling this method will activate the txn implicitly if current read is not stale/historical read GetStmtReadTS() (uint64, error) // GetStmtForUpdateTS returns the read timestamp used by update/insert/delete or select ... for update + // Calling this method will activate the txn implicitly if current read is not stale/historical read GetStmtForUpdateTS() (uint64, error) // GetContextProvider returns the current TxnContextProvider GetContextProvider() TxnContextProvider diff --git a/sessiontxn/isolation/optimistic.go b/sessiontxn/isolation/optimistic.go index 3c60eba09331b..9a1f8d58aabbd 100644 --- a/sessiontxn/isolation/optimistic.go +++ b/sessiontxn/isolation/optimistic.go @@ -114,6 +114,12 @@ func (p *OptimisticTxnContextProvider) AdviseOptimizeWithPlan(plan interface{}) return nil } + if p.txn != nil { + // `p.txn != nil` means the txn has already been activated, we should not optimize the startTS because the startTS + // has already been used. + return nil + } + realPlan, ok := plan.(plannercore.Plan) if !ok { return nil @@ -141,7 +147,7 @@ func (p *OptimisticTxnContextProvider) AdviseOptimizeWithPlan(plan interface{}) zap.Uint64("conn", sessVars.ConnectionID), zap.String("text", sessVars.StmtCtx.OriginalSQL), ) - return nil + return err } p.optimizeWithMaxTS = true diff --git a/statistics/handle/gc.go b/statistics/handle/gc.go index 46a2a9fc691e3..f16e2c9719088 100644 --- a/statistics/handle/gc.go +++ b/statistics/handle/gc.go @@ -22,7 +22,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/sqlexec" @@ -153,15 +155,32 @@ func (h *Handle) ClearOutdatedHistoryStats() error { h.mu.Lock() defer h.mu.Unlock() exec := h.mu.ctx.(sqlexec.SQLExecutor) - sql := "delete from mysql.stats_meta_history where NOW() - create_time >= %?" - _, err := exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds()) + sql := "select count(*) from mysql.stats_meta_history where NOW() - create_time >= %?" + rs, err := exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds()) if err != nil { return err } - sql = "delete from mysql.stats_history where NOW() - create_time >= %? " - _, err = exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds()) - logutil.BgLogger().Info("clear outdated historical stats") - return err + if rs == nil { + return nil + } + var rows []chunk.Row + defer terror.Call(rs.Close) + if rows, err = sqlexec.DrainRecordSet(ctx, rs, 8); err != nil { + return errors.Trace(err) + } + count := rows[0].GetInt64(0) + if count > 0 { + sql = "delete from mysql.stats_meta_history where NOW() - create_time >= %?" + _, err = exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds()) + if err != nil { + return err + } + sql = "delete from mysql.stats_history where NOW() - create_time >= %? " + _, err = exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds()) + logutil.BgLogger().Info("clear outdated historical stats") + return err + } + return nil } func (h *Handle) gcHistoryStatsFromKV(physicalID int64) error { diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index c85656ff99d4b..f53f075301acb 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -1906,7 +1906,9 @@ func (h *Handle) histogramFromStorage(reader *statsReader, tableID int64, colID lowerBound = rows[i].GetDatum(2, &fields[2].Column.FieldType) upperBound = rows[i].GetDatum(3, &fields[3].Column.FieldType) } else { - sc := &stmtctx.StatementContext{TimeZone: time.UTC} + // Invalid date values may be inserted into table under some relaxed sql mode. Those values may exist in statistics. + // Hence, when reading statistics, we should skip invalid date check. See #39336. + sc := &stmtctx.StatementContext{TimeZone: time.UTC, AllowInvalidDate: true, IgnoreZeroInDate: true} d := rows[i].GetDatum(2, &fields[2].Column.FieldType) // For new collation data, when storing the bounds of the histogram, we store the collate key instead of the // original value. diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index 5e41b1c1279a2..9bb80498bc90f 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -3543,3 +3543,35 @@ func TestStatsLockAndUnlockTables(t *testing.T) { tbl2Stats2 := handle.GetTableStats(tbl2.Meta()) require.Equal(t, int64(2), tbl2Stats2.Count) } + +func TestIssue39336(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(` +create table t1 ( + a datetime(3) default null, + b int +) partition by range (b) ( + partition p0 values less than (1000), + partition p1 values less than (maxvalue) +)`) + tk.MustExec("set @@sql_mode=''") + tk.MustExec("set @@tidb_analyze_version=2") + tk.MustExec("set @@tidb_partition_prune_mode='dynamic'") + tk.MustExec(` +insert into t1 values +('1000-00-09 00:00:00.000', 1), +('1000-00-06 00:00:00.000', 1), +('1000-00-06 00:00:00.000', 1), +('2022-11-23 14:24:30.000', 1), +('2022-11-23 14:24:32.000', 1), +('2022-11-23 14:24:33.000', 1), +('2022-11-23 14:24:35.000', 1), +('2022-11-23 14:25:08.000', 1001), +('2022-11-23 14:25:09.000', 1001)`) + tk.MustExec("analyze table t1 with 0 topn") + rows := tk.MustQuery("show analyze status where job_info like 'merge global stats%'").Rows() + require.Len(t, rows, 1) + require.Equal(t, "finished", rows[0][7]) +} diff --git a/store/copr/coprocessor.go b/store/copr/coprocessor.go index 390c5ffe8e63a..94a2f478290a8 100644 --- a/store/copr/coprocessor.go +++ b/store/copr/coprocessor.go @@ -213,7 +213,8 @@ func (c *CopClient) BuildCopIterator(ctx context.Context, req *kv.Request, vars // higher concurrency, the data is just cached and not consumed for a while, this increase the memory usage. // Set concurrency to 2 can reduce the memory usage and I've tested that it does not necessarily // decrease the performance. - if it.concurrency > 2 { + // For ReqTypeAnalyze, we keep its concurrency to avoid slow analyze(see https://github.com/pingcap/tidb/issues/40162 for details). + if it.concurrency > 2 && it.req.Tp != kv.ReqTypeAnalyze { oldConcurrency := it.concurrency it.concurrency = 2 diff --git a/store/copr/mpp.go b/store/copr/mpp.go index 02b66478958d4..c3225c40d1455 100644 --- a/store/copr/mpp.go +++ b/store/copr/mpp.go @@ -143,7 +143,8 @@ type mppIterator struct { tasks []*kv.MPPDispatchRequest finishCh chan struct{} - startTs uint64 + startTs uint64 + mppQueryID kv.MPPQueryID respChan chan *mppResponse @@ -220,7 +221,8 @@ func (m *mppIterator) handleDispatchReq(ctx context.Context, bo *Backoffer, req } // meta for current task. - taskMeta := &mpp.TaskMeta{StartTs: req.StartTs, TaskId: req.ID, Address: req.Meta.GetAddress()} + taskMeta := &mpp.TaskMeta{StartTs: req.StartTs, QueryTs: req.MppQueryID.QueryTs, LocalQueryId: req.MppQueryID.LocalQueryID, TaskId: req.ID, ServerId: req.MppQueryID.ServerID, + Address: req.Meta.GetAddress()} mppReq := &mpp.DispatchTaskRequest{ Meta: taskMeta, @@ -334,7 +336,7 @@ func (m *mppIterator) cancelMppTasks() { m.mu.Lock() defer m.mu.Unlock() killReq := &mpp.CancelTaskRequest{ - Meta: &mpp.TaskMeta{StartTs: m.startTs}, + Meta: &mpp.TaskMeta{StartTs: m.startTs, QueryTs: m.mppQueryID.QueryTs, LocalQueryId: m.mppQueryID.LocalQueryID, ServerId: m.mppQueryID.ServerID}, } disaggregatedTiFlash := config.GetGlobalConfig().DisaggregatedTiFlash @@ -374,8 +376,11 @@ func (m *mppIterator) establishMPPConns(bo *Backoffer, req *kv.MPPDispatchReques connReq := &mpp.EstablishMPPConnectionRequest{ SenderMeta: taskMeta, ReceiverMeta: &mpp.TaskMeta{ - StartTs: req.StartTs, - TaskId: -1, + StartTs: req.StartTs, + QueryTs: m.mppQueryID.QueryTs, + LocalQueryId: m.mppQueryID.LocalQueryID, + ServerId: m.mppQueryID.ServerID, + TaskId: -1, }, } @@ -528,7 +533,7 @@ func (m *mppIterator) Next(ctx context.Context) (kv.ResultSubset, error) { } // DispatchMPPTasks dispatches all the mpp task and waits for the responses. -func (c *MPPClient) DispatchMPPTasks(ctx context.Context, variables interface{}, dispatchReqs []*kv.MPPDispatchRequest, needTriggerFallback bool, startTs uint64) kv.Response { +func (c *MPPClient) DispatchMPPTasks(ctx context.Context, variables interface{}, dispatchReqs []*kv.MPPDispatchRequest, needTriggerFallback bool, startTs uint64, mppQueryID kv.MPPQueryID) kv.Response { vars := variables.(*tikv.Variables) ctxChild, cancelFunc := context.WithCancel(ctx) iter := &mppIterator{ @@ -539,6 +544,7 @@ func (c *MPPClient) DispatchMPPTasks(ctx context.Context, variables interface{}, cancelFunc: cancelFunc, respChan: make(chan *mppResponse), startTs: startTs, + mppQueryID: mppQueryID, vars: vars, needTriggerFallback: needTriggerFallback, enableCollectExecutionInfo: config.GetGlobalConfig().Instance.EnableCollectExecutionInfo.Load(), diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index 054ec83ee7e8a..87a990f7f096c 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -912,6 +912,10 @@ func needsGCOperationForStore(store *metapb.Store) (bool, error) { // skip physical resolve locks for it. return false, nil + case placement.EngineLabelTiFlashCompute: + logutil.BgLogger().Debug("[gc worker] will ignore gc tiflash_compute node") + return false, nil + case placement.EngineLabelTiKV, "": // If no engine label is set, it should be a TiKV node. return true, nil diff --git a/table/tables/index.go b/table/tables/index.go index 446a2e7288595..607afb9640aad 100644 --- a/table/tables/index.go +++ b/table/tables/index.go @@ -103,10 +103,61 @@ func (c *index) GenIndexValue(sc *stmtctx.StatementContext, distinct bool, index return tablecodec.GenIndexValuePortal(sc, c.tblInfo, c.idxInfo, c.needRestoredData, distinct, false, indexedValues, h, c.phyTblID, restoredData) } +// getIndexedValue will produce the result like: +// 1. If not multi-valued index, return directly. +// 2. (i1, [m1,m2], i2, ...) ==> [(i1, m1, i2, ...), (i1, m2, i2, ...)] +// 3. (i1, null, i2, ...) ==> [(i1, null, i2, ...)] +// 4. (i1, [], i2, ...) ==> nothing. +func (c *index) getIndexedValue(indexedValues []types.Datum) [][]types.Datum { + if !c.idxInfo.MVIndex { + return [][]types.Datum{indexedValues} + } + + vals := make([][]types.Datum, 0, 16) + jsonIdx := 0 + jsonIsNull := false + existsVals := make(map[string]struct{}) + var buf []byte + for !jsonIsNull { + val := make([]types.Datum, 0, len(indexedValues)) + for i, v := range indexedValues { + if !c.tblInfo.Columns[c.idxInfo.Columns[i].Offset].FieldType.IsArray() { + val = append(val, v) + } else { + if v.IsNull() { + val = append(val, v) + jsonIsNull = true + continue + } + elemCount := v.GetMysqlJSON().GetElemCount() + for { + // JSON cannot be indexed, if the value is JSON type, it must be multi-valued index. + if jsonIdx >= elemCount { + goto out + } + binaryJSON := v.GetMysqlJSON().ArrayGetElem(jsonIdx) + jsonIdx++ + buf = buf[:0] + key := string(binaryJSON.HashValue(buf)) + if _, exists := existsVals[key]; exists { + continue + } + existsVals[key] = struct{}{} + val = append(val, types.NewDatum(binaryJSON.GetValue())) + break + } + } + } + vals = append(vals, val) + } +out: + return vals +} + // Create creates a new entry in the kvIndex data. // If the index is unique and there is an existing entry with the same key, // Create will return the existing entry's handle as the first return value, ErrKeyExists as the second return value. -func (c *index) Create(sctx sessionctx.Context, txn kv.Transaction, indexedValues []types.Datum, h kv.Handle, handleRestoreData []types.Datum, opts ...table.CreateIdxOptFunc) (kv.Handle, error) { +func (c *index) Create(sctx sessionctx.Context, txn kv.Transaction, indexedValue []types.Datum, h kv.Handle, handleRestoreData []types.Datum, opts ...table.CreateIdxOptFunc) (kv.Handle, error) { if c.Meta().Unique { txn.CacheTableInfo(c.phyTblID, c.tblInfo) } @@ -114,225 +165,241 @@ func (c *index) Create(sctx sessionctx.Context, txn kv.Transaction, indexedValue for _, fn := range opts { fn(&opt) } + + indexedValues := c.getIndexedValue(indexedValue) + ctx := opt.Ctx + if ctx != nil { + if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { + span1 := span.Tracer().StartSpan("index.Create", opentracing.ChildOf(span.Context())) + defer span1.Finish() + ctx = opentracing.ContextWithSpan(ctx, span1) + } + } else { + ctx = context.TODO() + } vars := sctx.GetSessionVars() writeBufs := vars.GetWriteStmtBufs() skipCheck := vars.StmtCtx.BatchCheck - key, distinct, err := c.GenIndexKey(vars.StmtCtx, indexedValues, h, writeBufs.IndexKeyBuf) - if err != nil { - return nil, err - } - - var ( - tempKey []byte - keyVer byte - keyIsTempIdxKey bool - ) - if !opt.FromBackFill { - key, tempKey, keyVer = GenTempIdxKeyByState(c.idxInfo, key) - if keyVer == TempIndexKeyTypeBackfill || keyVer == TempIndexKeyTypeDelete { - key, tempKey = tempKey, nil - keyIsTempIdxKey = true + for _, value := range indexedValues { + key, distinct, err := c.GenIndexKey(vars.StmtCtx, value, h, writeBufs.IndexKeyBuf) + if err != nil { + return nil, err } - } - ctx := opt.Ctx - if opt.Untouched { - txn, err1 := sctx.Txn(true) - if err1 != nil { - return nil, err1 + var ( + tempKey []byte + keyVer byte + keyIsTempIdxKey bool + ) + if !opt.FromBackFill { + key, tempKey, keyVer = GenTempIdxKeyByState(c.idxInfo, key) + if keyVer == TempIndexKeyTypeBackfill || keyVer == TempIndexKeyTypeDelete { + key, tempKey = tempKey, nil + keyIsTempIdxKey = true + } } - // If the index kv was untouched(unchanged), and the key/value already exists in mem-buffer, - // should not overwrite the key with un-commit flag. - // So if the key exists, just do nothing and return. - v, err := txn.GetMemBuffer().Get(ctx, key) - if err == nil { - if len(v) != 0 { - return nil, nil + + if opt.Untouched { + txn, err1 := sctx.Txn(true) + if err1 != nil { + return nil, err1 } - // The key is marked as deleted in the memory buffer, as the existence check is done lazily - // for optimistic transactions by default. The "untouched" key could still exist in the store, - // it's needed to commit this key to do the existence check so unset the untouched flag. - if !txn.IsPessimistic() { - keyFlags, err := txn.GetMemBuffer().GetFlags(key) - if err != nil { - return nil, err + // If the index kv was untouched(unchanged), and the key/value already exists in mem-buffer, + // should not overwrite the key with un-commit flag. + // So if the key exists, just do nothing and return. + v, err := txn.GetMemBuffer().Get(ctx, key) + if err == nil { + if len(v) != 0 { + continue } - if keyFlags.HasPresumeKeyNotExists() { - opt.Untouched = false + // The key is marked as deleted in the memory buffer, as the existence check is done lazily + // for optimistic transactions by default. The "untouched" key could still exist in the store, + // it's needed to commit this key to do the existence check so unset the untouched flag. + if !txn.IsPessimistic() { + keyFlags, err := txn.GetMemBuffer().GetFlags(key) + if err != nil { + return nil, err + } + if keyFlags.HasPresumeKeyNotExists() { + opt.Untouched = false + } } } } - } - - // save the key buffer to reuse. - writeBufs.IndexKeyBuf = key - c.initNeedRestoreData.Do(func() { - c.needRestoredData = NeedRestoredData(c.idxInfo.Columns, c.tblInfo.Columns) - }) - idxVal, err := tablecodec.GenIndexValuePortal(sctx.GetSessionVars().StmtCtx, c.tblInfo, c.idxInfo, c.needRestoredData, distinct, opt.Untouched, indexedValues, h, c.phyTblID, handleRestoreData) - if err != nil { - return nil, err - } - - opt.IgnoreAssertion = opt.IgnoreAssertion || c.idxInfo.State != model.StatePublic - if !distinct || skipCheck || opt.Untouched { - if keyIsTempIdxKey && !opt.Untouched { // Untouched key-values never occur in the storage. - idxVal = tablecodec.EncodeTempIndexValue(idxVal, keyVer) - } - err = txn.GetMemBuffer().Set(key, idxVal) + // save the key buffer to reuse. + writeBufs.IndexKeyBuf = key + c.initNeedRestoreData.Do(func() { + c.needRestoredData = NeedRestoredData(c.idxInfo.Columns, c.tblInfo.Columns) + }) + idxVal, err := tablecodec.GenIndexValuePortal(sctx.GetSessionVars().StmtCtx, c.tblInfo, c.idxInfo, c.needRestoredData, distinct, opt.Untouched, value, h, c.phyTblID, handleRestoreData) if err != nil { return nil, err } - if len(tempKey) > 0 { - if !opt.Untouched { // Untouched key-values never occur in the storage. + + opt.IgnoreAssertion = opt.IgnoreAssertion || c.idxInfo.State != model.StatePublic + + if !distinct || skipCheck || opt.Untouched { + if keyIsTempIdxKey && !opt.Untouched { // Untouched key-values never occur in the storage. idxVal = tablecodec.EncodeTempIndexValue(idxVal, keyVer) } - err = txn.GetMemBuffer().Set(tempKey, idxVal) + err = txn.GetMemBuffer().Set(key, idxVal) if err != nil { return nil, err } - } - if !opt.IgnoreAssertion && (!opt.Untouched) { - if sctx.GetSessionVars().LazyCheckKeyNotExists() && !txn.IsPessimistic() { - err = txn.SetAssertion(key, kv.SetAssertUnknown) - } else { - err = txn.SetAssertion(key, kv.SetAssertNotExist) + if len(tempKey) > 0 { + if !opt.Untouched { // Untouched key-values never occur in the storage. + idxVal = tablecodec.EncodeTempIndexValue(idxVal, keyVer) + } + err = txn.GetMemBuffer().Set(tempKey, idxVal) + if err != nil { + return nil, err + } } + if !opt.IgnoreAssertion && (!opt.Untouched) { + if sctx.GetSessionVars().LazyCheckKeyNotExists() && !txn.IsPessimistic() { + err = txn.SetAssertion(key, kv.SetAssertUnknown) + } else { + err = txn.SetAssertion(key, kv.SetAssertNotExist) + } + } + if err != nil { + return nil, err + } + continue } - return nil, err - } - if ctx != nil { - if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { - span1 := span.Tracer().StartSpan("index.Create", opentracing.ChildOf(span.Context())) - defer span1.Finish() - ctx = opentracing.ContextWithSpan(ctx, span1) + var value []byte + if c.tblInfo.TempTableType != model.TempTableNone { + // Always check key for temporary table because it does not write to TiKV + value, err = txn.Get(ctx, key) + } else if sctx.GetSessionVars().LazyCheckKeyNotExists() { + value, err = txn.GetMemBuffer().Get(ctx, key) + } else { + value, err = txn.Get(ctx, key) } - } else { - ctx = context.TODO() - } - - var value []byte - if c.tblInfo.TempTableType != model.TempTableNone { - // Always check key for temporary table because it does not write to TiKV - value, err = txn.Get(ctx, key) - } else if sctx.GetSessionVars().LazyCheckKeyNotExists() { - value, err = txn.GetMemBuffer().Get(ctx, key) - } else { - value, err = txn.Get(ctx, key) - } - if err != nil && !kv.IsErrNotFound(err) { - return nil, err - } - if err != nil || len(value) == 0 || (keyIsTempIdxKey && tablecodec.CheckTempIndexValueIsDelete(value)) { - lazyCheck := sctx.GetSessionVars().LazyCheckKeyNotExists() && err != nil - var needPresumeKey TempIndexKeyState - if keyIsTempIdxKey { - idxVal = tablecodec.EncodeTempIndexValue(idxVal, keyVer) - needPresumeKey, _, err = KeyExistInTempIndex(ctx, txn, key, distinct, h, c.tblInfo.IsCommonHandle) + if err != nil && !kv.IsErrNotFound(err) { + return nil, err + } + if err != nil || len(value) == 0 || (keyIsTempIdxKey && tablecodec.CheckTempIndexValueIsDelete(value)) { + lazyCheck := sctx.GetSessionVars().LazyCheckKeyNotExists() && err != nil + var needPresumeKey TempIndexKeyState + if keyIsTempIdxKey { + idxVal = tablecodec.EncodeTempIndexValue(idxVal, keyVer) + needPresumeKey, _, err = KeyExistInTempIndex(ctx, txn, key, distinct, h, c.tblInfo.IsCommonHandle) + if err != nil { + return nil, err + } + } else { + if len(tempKey) > 0 { + needPresumeKey, _, err = KeyExistInTempIndex(ctx, txn, tempKey, distinct, h, c.tblInfo.IsCommonHandle) + if err != nil { + return nil, err + } + } + } + if lazyCheck { + var flags []kv.FlagsOp + if needPresumeKey != KeyInTempIndexIsDeleted { + flags = []kv.FlagsOp{kv.SetPresumeKeyNotExists} + } + if !vars.ConstraintCheckInPlacePessimistic && vars.TxnCtx.IsPessimistic && vars.InTxn() && + !vars.InRestrictedSQL && vars.ConnectionID > 0 { + flags = append(flags, kv.SetNeedConstraintCheckInPrewrite) + } + err = txn.GetMemBuffer().SetWithFlags(key, idxVal, flags...) + } else { + err = txn.GetMemBuffer().Set(key, idxVal) + } if err != nil { return nil, err } - } else { if len(tempKey) > 0 { - needPresumeKey, _, err = KeyExistInTempIndex(ctx, txn, tempKey, distinct, h, c.tblInfo.IsCommonHandle) + idxVal = tablecodec.EncodeTempIndexValue(idxVal, keyVer) + if lazyCheck && needPresumeKey != KeyInTempIndexIsDeleted { + err = txn.GetMemBuffer().SetWithFlags(tempKey, idxVal, kv.SetPresumeKeyNotExists) + } else { + err = txn.GetMemBuffer().Set(tempKey, idxVal) + } if err != nil { return nil, err } } - } - if lazyCheck { - var flags []kv.FlagsOp - if needPresumeKey != KeyInTempIndexIsDeleted { - flags = []kv.FlagsOp{kv.SetPresumeKeyNotExists} + if opt.IgnoreAssertion { + continue } - if !vars.ConstraintCheckInPlacePessimistic && vars.TxnCtx.IsPessimistic && vars.InTxn() && - !vars.InRestrictedSQL && vars.ConnectionID > 0 { - flags = append(flags, kv.SetNeedConstraintCheckInPrewrite) - } - err = txn.GetMemBuffer().SetWithFlags(key, idxVal, flags...) - } else { - err = txn.GetMemBuffer().Set(key, idxVal) - } - if err != nil { - return nil, err - } - if len(tempKey) > 0 { - idxVal = tablecodec.EncodeTempIndexValue(idxVal, keyVer) - if lazyCheck && needPresumeKey != KeyInTempIndexIsDeleted { - err = txn.GetMemBuffer().SetWithFlags(tempKey, idxVal, kv.SetPresumeKeyNotExists) + if lazyCheck && !txn.IsPessimistic() { + err = txn.SetAssertion(key, kv.SetAssertUnknown) } else { - err = txn.GetMemBuffer().Set(tempKey, idxVal) + err = txn.SetAssertion(key, kv.SetAssertNotExist) } if err != nil { return nil, err } + continue } - if opt.IgnoreAssertion { - return nil, nil + + if keyIsTempIdxKey { + value = tablecodec.DecodeTempIndexOriginValue(value) } - if lazyCheck && !txn.IsPessimistic() { - err = txn.SetAssertion(key, kv.SetAssertUnknown) - } else { - err = txn.SetAssertion(key, kv.SetAssertNotExist) + handle, err := tablecodec.DecodeHandleInUniqueIndexValue(value, c.tblInfo.IsCommonHandle) + if err != nil { + return nil, err } - return nil, err - } - - if keyIsTempIdxKey { - value = tablecodec.DecodeTempIndexOriginValue(value) - } - handle, err := tablecodec.DecodeHandleInUniqueIndexValue(value, c.tblInfo.IsCommonHandle) - if err != nil { - return nil, err + return handle, kv.ErrKeyExists } - return handle, kv.ErrKeyExists + return nil, nil } // Delete removes the entry for handle h and indexedValues from KV index. -func (c *index) Delete(sc *stmtctx.StatementContext, txn kv.Transaction, indexedValues []types.Datum, h kv.Handle) error { - key, distinct, err := c.GenIndexKey(sc, indexedValues, h, nil) - if err != nil { - return err - } +func (c *index) Delete(sc *stmtctx.StatementContext, txn kv.Transaction, indexedValue []types.Datum, h kv.Handle) error { + indexedValues := c.getIndexedValue(indexedValue) + for _, value := range indexedValues { + key, distinct, err := c.GenIndexKey(sc, value, h, nil) + if err != nil { + return err + } - key, tempKey, tempKeyVer := GenTempIdxKeyByState(c.idxInfo, key) + key, tempKey, tempKeyVer := GenTempIdxKeyByState(c.idxInfo, key) - if distinct { - if len(key) > 0 { - err = txn.GetMemBuffer().DeleteWithFlags(key, kv.SetNeedLocked) - if err != nil { - return err + if distinct { + if len(key) > 0 { + err = txn.GetMemBuffer().DeleteWithFlags(key, kv.SetNeedLocked) + if err != nil { + return err + } } - } - if len(tempKey) > 0 { - tempVal := tablecodec.EncodeTempIndexValueDeletedUnique(h, tempKeyVer) - err = txn.GetMemBuffer().Set(tempKey, tempVal) - if err != nil { - return err + if len(tempKey) > 0 { + tempVal := tablecodec.EncodeTempIndexValueDeletedUnique(h, tempKeyVer) + err = txn.GetMemBuffer().Set(tempKey, tempVal) + if err != nil { + return err + } } - } - } else { - if len(key) > 0 { - err = txn.GetMemBuffer().Delete(key) - if err != nil { - return err + } else { + if len(key) > 0 { + err = txn.GetMemBuffer().Delete(key) + if err != nil { + return err + } } - } - if len(tempKey) > 0 { - tempVal := tablecodec.EncodeTempIndexValueDeleted(tempKeyVer) - err = txn.GetMemBuffer().Set(tempKey, tempVal) - if err != nil { - return err + if len(tempKey) > 0 { + tempVal := tablecodec.EncodeTempIndexValueDeleted(tempKeyVer) + err = txn.GetMemBuffer().Set(tempKey, tempVal) + if err != nil { + return err + } } } + if c.idxInfo.State == model.StatePublic { + // If the index is in public state, delete this index means it must exists. + err = txn.SetAssertion(key, kv.SetAssertExist) + } + if err != nil { + return err + } } - if c.idxInfo.State == model.StatePublic { - // If the index is in public state, delete this index means it must exists. - err = txn.SetAssertion(key, kv.SetAssertExist) - } - return err + return nil } const ( diff --git a/table/tables/mutation_checker.go b/table/tables/mutation_checker.go index e4513b8cae409..8445a266e3d2d 100644 --- a/table/tables/mutation_checker.go +++ b/table/tables/mutation_checker.go @@ -236,7 +236,7 @@ func checkIndexKeys( } for i, v := range decodedIndexValues { - fieldType := &t.Columns[indexInfo.Columns[i].Offset].FieldType + fieldType := t.Columns[indexInfo.Columns[i].Offset].FieldType.ArrayType() datum, err := tablecodec.DecodeColumnValue(v, fieldType, sessVars.Location()) if err != nil { return errors.Trace(err) @@ -347,9 +347,27 @@ func compareIndexData( cols[indexInfo.Columns[i].Offset].ColumnInfo, ) - comparison, err := decodedMutationDatum.Compare(sc, &expectedDatum, collate.GetCollator(decodedMutationDatum.Collation())) - if err != nil { - return errors.Trace(err) + var comparison int + var err error + // If it is multi-valued index, we should check the JSON contains the indexed value. + if cols[indexInfo.Columns[i].Offset].ColumnInfo.FieldType.IsArray() && expectedDatum.Kind() == types.KindMysqlJSON { + bj := expectedDatum.GetMysqlJSON() + count := bj.GetElemCount() + for elemIdx := 0; elemIdx < count; elemIdx++ { + jsonDatum := types.NewJSONDatum(bj.ArrayGetElem(elemIdx)) + comparison, err = jsonDatum.Compare(sc, &decodedMutationDatum, collate.GetBinaryCollator()) + if err != nil { + return errors.Trace(err) + } + if comparison == 0 { + break + } + } + } else { + comparison, err = decodedMutationDatum.Compare(sc, &expectedDatum, collate.GetCollator(decodedMutationDatum.Collation())) + if err != nil { + return errors.Trace(err) + } } if comparison != 0 { diff --git a/table/tables/tables.go b/table/tables/tables.go index 6b5ef87546503..e32328ce8fae9 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -1953,7 +1953,7 @@ func BuildTableScanFromInfos(tableInfo *model.TableInfo, columnInfos []*model.Co pkColIds := TryGetCommonPkColumnIds(tableInfo) tsExec := &tipb.TableScan{ TableId: tableInfo.ID, - Columns: util.ColumnsToProto(columnInfos, tableInfo.PKIsHandle), + Columns: util.ColumnsToProto(columnInfos, tableInfo.PKIsHandle, false), PrimaryColumnIds: pkColIds, } if tableInfo.IsCommonHandle { @@ -1967,7 +1967,7 @@ func BuildPartitionTableScanFromInfos(tableInfo *model.TableInfo, columnInfos [] pkColIds := TryGetCommonPkColumnIds(tableInfo) tsExec := &tipb.PartitionTableScan{ TableId: tableInfo.ID, - Columns: util.ColumnsToProto(columnInfos, tableInfo.PKIsHandle), + Columns: util.ColumnsToProto(columnInfos, tableInfo.PKIsHandle, false), PrimaryColumnIds: pkColIds, IsFastScan: &fastScan, } diff --git a/telemetry/data_feature_usage_test.go b/telemetry/data_feature_usage_test.go index 369073009c0a4..cb3272d110b29 100644 --- a/telemetry/data_feature_usage_test.go +++ b/telemetry/data_feature_usage_test.go @@ -499,10 +499,6 @@ func TestFlashbackCluster(t *testing.T) { } func TestAddIndexAccelerationAndMDL(t *testing.T) { - if !variable.EnableConcurrentDDL.Load() { - t.Skipf("test requires concurrent ddl") - } - store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) usage, err := telemetry.GetFeatureUsage(tk.Session()) diff --git a/tests/realtikvtest/addindextest/add_index_test.go b/tests/realtikvtest/addindextest/add_index_test.go index 7dd4919570594..1c1403f66a922 100644 --- a/tests/realtikvtest/addindextest/add_index_test.go +++ b/tests/realtikvtest/addindextest/add_index_test.go @@ -100,3 +100,29 @@ func TestCreateMultiColsIndex(t *testing.T) { ctx := initTest(t) testTwoColsFrame(ctx, coliIDs, coljIDs, addIndexMultiCols) } + +func TestAddForeignKeyWithAutoCreateIndex(t *testing.T) { + store := realtikvtest.CreateMockStoreAndSetup(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("drop database if exists fk_index;") + tk.MustExec("create database fk_index;") + tk.MustExec("use fk_index;") + tk.MustExec(`set global tidb_ddl_enable_fast_reorg=1;`) + tk.MustExec("create table employee (id bigint auto_increment key, pid bigint)") + tk.MustExec("insert into employee (id) values (1),(2),(3),(4),(5),(6),(7),(8)") + for i := 0; i < 14; i++ { + tk.MustExec("insert into employee (pid) select pid from employee") + } + tk.MustExec("update employee set pid=id-1 where id>1") + tk.MustQuery("select count(*) from employee").Check(testkit.Rows("131072")) + tk.MustExec("alter table employee add foreign key fk_1(pid) references employee(id)") + tk.MustExec("alter table employee drop foreign key fk_1") + tk.MustExec("alter table employee drop index fk_1") + tk.MustExec("update employee set pid=0 where id=1") + tk.MustGetErrMsg("alter table employee add foreign key fk_1(pid) references employee(id)", + "[ddl:1452]Cannot add or update a child row: a foreign key constraint fails (`fk_index`.`employee`, CONSTRAINT `fk_1` FOREIGN KEY (`pid`) REFERENCES `employee` (`id`))") + tk.MustExec("update employee set pid=null where id=1") + tk.MustExec("insert into employee (pid) select pid from employee") + tk.MustExec("update employee set pid=id-1 where id>1 and pid is null") + tk.MustExec("alter table employee add foreign key fk_1(pid) references employee(id)") +} diff --git a/tests/realtikvtest/brietest/BUILD.bazel b/tests/realtikvtest/brietest/BUILD.bazel index 62de71ea3b77d..c3118c4d7a88a 100644 --- a/tests/realtikvtest/brietest/BUILD.bazel +++ b/tests/realtikvtest/brietest/BUILD.bazel @@ -6,26 +6,20 @@ go_test( srcs = [ "backup_restore_test.go", "binlog_test.go", - "flashback_test.go", "main_test.go", ], flaky = True, race = "on", deps = [ "//config", - "//ddl/util", - "//parser/model", "//parser/mysql", "//sessionctx/binloginfo", "//store/mockstore/mockcopr", "//testkit", "//testkit/testsetup", "//tests/realtikvtest", - "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_tipb//go-binlog", "@com_github_stretchr_testify//require", - "@com_github_tikv_client_go_v2//oracle", - "@com_github_tikv_client_go_v2//util", "@org_golang_google_grpc//:grpc", "@org_uber_go_goleak//:goleak", ], diff --git a/tests/realtikvtest/brietest/flashback_test.go b/tests/realtikvtest/brietest/flashback_test.go deleted file mode 100644 index 470a62fb90d93..0000000000000 --- a/tests/realtikvtest/brietest/flashback_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package brietest - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/pingcap/failpoint" - ddlutil "github.com/pingcap/tidb/ddl/util" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/tests/realtikvtest" - "github.com/stretchr/testify/require" - "github.com/tikv/client-go/v2/oracle" - tikvutil "github.com/tikv/client-go/v2/util" -) - -// MockGC is used to make GC work in the test environment. -func MockGC(tk *testkit.TestKit) (string, string, string, func()) { - originGC := ddlutil.IsEmulatorGCEnable() - resetGC := func() { - if originGC { - ddlutil.EmulatorGCEnable() - } else { - ddlutil.EmulatorGCDisable() - } - } - - // disable emulator GC. - // Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl. - ddlutil.EmulatorGCDisable() - timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(tikvutil.GCTimeFormat) - timeAfterDrop := time.Now().Add(48 * 60 * 60 * time.Second).Format(tikvutil.GCTimeFormat) - safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '') - ON DUPLICATE KEY - UPDATE variable_value = '%[1]s'` - // clear GC variables first. - tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )") - return timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC -} - -func TestFlashback(t *testing.T) { - if *realtikvtest.WithRealTiKV { - store := realtikvtest.CreateMockStoreAndSetup(t) - - tk := testkit.NewTestKit(t, store) - - timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk) - defer resetGC() - - tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, index i(a))") - tk.MustExec("insert t values (1), (2), (3)") - - time.Sleep(1 * time.Second) - - ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) - require.NoError(t, err) - - injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second)) - require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", - fmt.Sprintf("return(%v)", injectSafeTS))) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", - fmt.Sprintf("return(%v)", injectSafeTS))) - - tk.MustExec("insert t values (4), (5), (6)") - tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts))) - - tk.MustExec("admin check table t") - require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3") - require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3") - - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) - require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) - } -} - -func TestPrepareFlashbackFailed(t *testing.T) { - if *realtikvtest.WithRealTiKV { - store := realtikvtest.CreateMockStoreAndSetup(t) - - tk := testkit.NewTestKit(t, store) - - timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk) - defer resetGC() - - tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, index i(a))") - tk.MustExec("insert t values (1), (2), (3)") - - time.Sleep(1 * time.Second) - - ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) - require.NoError(t, err) - - injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second)) - require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", - fmt.Sprintf("return(%v)", injectSafeTS))) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", - fmt.Sprintf("return(%v)", injectSafeTS))) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockPrepareMeetsEpochNotMatch", `return(true)`)) - - tk.MustExec("insert t values (4), (5), (6)") - tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts))) - - tk.MustExec("admin check table t") - require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3") - require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3") - - jobMeta := tk.MustQuery("select job_meta from mysql.tidb_ddl_history order by job_id desc limit 1").Rows()[0][0].(string) - job := model.Job{} - require.NoError(t, job.Decode([]byte(jobMeta))) - require.Equal(t, job.ErrorCount, int64(0)) - - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) - require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockPrepareMeetsEpochNotMatch")) - } -} diff --git a/tests/realtikvtest/flashbacktest/BUILD.bazel b/tests/realtikvtest/flashbacktest/BUILD.bazel new file mode 100644 index 0000000000000..6e2410abfc1ea --- /dev/null +++ b/tests/realtikvtest/flashbacktest/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "flashbacktest_test", + srcs = [ + "flashback_test.go", + "main_test.go", + ], + deps = [ + "//ddl/util", + "//errno", + "//parser/model", + "//testkit", + "//testkit/testsetup", + "//tests/realtikvtest", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//util", + "@org_uber_go_goleak//:goleak", + ], +) diff --git a/tests/realtikvtest/flashbacktest/flashback_test.go b/tests/realtikvtest/flashbacktest/flashback_test.go new file mode 100644 index 0000000000000..50cf274490a5a --- /dev/null +++ b/tests/realtikvtest/flashbacktest/flashback_test.go @@ -0,0 +1,288 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flashbacktest + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/pingcap/failpoint" + ddlutil "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/tests/realtikvtest" + "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" + tikvutil "github.com/tikv/client-go/v2/util" +) + +// MockGC is used to make GC work in the test environment. +func MockGC(tk *testkit.TestKit) (string, string, string, func()) { + originGC := ddlutil.IsEmulatorGCEnable() + resetGC := func() { + if originGC { + ddlutil.EmulatorGCEnable() + } else { + ddlutil.EmulatorGCDisable() + } + } + + // disable emulator GC. + // Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl. + ddlutil.EmulatorGCDisable() + timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(tikvutil.GCTimeFormat) + timeAfterDrop := time.Now().Add(48 * 60 * 60 * time.Second).Format(tikvutil.GCTimeFormat) + safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '') + ON DUPLICATE KEY + UPDATE variable_value = '%[1]s'` + // clear GC variables first. + tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )") + return timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC +} + +func TestFlashback(t *testing.T) { + if *realtikvtest.WithRealTiKV { + store := realtikvtest.CreateMockStoreAndSetup(t) + + tk := testkit.NewTestKit(t, store) + + timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk) + defer resetGC() + + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, index i(a))") + tk.MustExec("insert t values (1), (2), (3)") + + time.Sleep(1 * time.Second) + + ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) + require.NoError(t, err) + + injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second)) + require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + + tk.MustExec("insert t values (4), (5), (6)") + tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts))) + + tk.MustExec("admin check table t") + require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3") + require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3") + + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) + require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) + } +} + +func TestPrepareFlashbackFailed(t *testing.T) { + if *realtikvtest.WithRealTiKV { + store := realtikvtest.CreateMockStoreAndSetup(t) + + tk := testkit.NewTestKit(t, store) + + timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk) + defer resetGC() + + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, index i(a))") + tk.MustExec("insert t values (1), (2), (3)") + + time.Sleep(1 * time.Second) + + ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) + require.NoError(t, err) + + injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second)) + require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockPrepareMeetsEpochNotMatch", `return(true)`)) + + tk.MustExec("insert t values (4), (5), (6)") + tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts))) + + tk.MustExec("admin check table t") + require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3") + require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3") + + jobMeta := tk.MustQuery("select job_meta from mysql.tidb_ddl_history order by job_id desc limit 1").Rows()[0][0].(string) + job := model.Job{} + require.NoError(t, job.Decode([]byte(jobMeta))) + require.Equal(t, job.ErrorCount, int64(0)) + + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) + require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockPrepareMeetsEpochNotMatch")) + } +} + +func TestFlashbackAddDropIndex(t *testing.T) { + if *realtikvtest.WithRealTiKV { + store := realtikvtest.CreateMockStoreAndSetup(t) + + tk := testkit.NewTestKit(t, store) + + timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk) + defer resetGC() + + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, index i(a))") + tk.MustExec("insert t values (1), (2), (3)") + prevGCCount := tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0] + + time.Sleep(1 * time.Second) + + ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) + require.NoError(t, err) + + tk.MustExec("alter table t add index k(a)") + require.Equal(t, tk.MustQuery("select max(a) from t use index(k)").Rows()[0][0], "3") + tk.MustExec("alter table t drop index i") + tk.MustGetErrCode("select max(a) from t use index(i)", errno.ErrKeyDoesNotExist) + require.Greater(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount) + + injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second)) + require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + + tk.MustExec("insert t values (4), (5), (6)") + tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts))) + + tk.MustExec("admin check table t") + require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3") + tk.MustGetErrCode("select max(a) from t use index(k)", errno.ErrKeyDoesNotExist) + require.Equal(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount) + + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) + require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) + } +} + +func TestFlashbackAddDropModifyColumn(t *testing.T) { + if *realtikvtest.WithRealTiKV { + store := realtikvtest.CreateMockStoreAndSetup(t) + + tk := testkit.NewTestKit(t, store) + + timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk) + defer resetGC() + + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, index i(a))") + tk.MustExec("insert t values (1, 1), (2, 2), (3, 3)") + + time.Sleep(1 * time.Second) + + ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) + require.NoError(t, err) + + tk.MustExec("alter table t add column c int") + tk.MustExec("alter table t drop column b") + tk.MustExec("alter table t modify column a tinyint") + require.Equal(t, tk.MustQuery("show create table t").Rows()[0][1], "CREATE TABLE `t` (\n"+ + " `a` tinyint(4) DEFAULT NULL,\n"+ + " `c` int(11) DEFAULT NULL,\n"+ + " KEY `i` (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin") + + injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second)) + require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + + tk.MustExec("insert t values (4, 4), (5, 5), (6, 6)") + tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts))) + + tk.MustExec("admin check table t") + require.Equal(t, tk.MustQuery("show create table t").Rows()[0][1], "CREATE TABLE `t` (\n"+ + " `a` int(11) DEFAULT NULL,\n"+ + " `b` int(11) DEFAULT NULL,\n"+ + " KEY `i` (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin") + require.Equal(t, tk.MustQuery("select max(b) from t").Rows()[0][0], "3") + + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) + require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) + } +} + +func TestFlashbackRenameDropCreateTable(t *testing.T) { + if *realtikvtest.WithRealTiKV { + store := realtikvtest.CreateMockStoreAndSetup(t) + + tk := testkit.NewTestKit(t, store) + + timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk) + defer resetGC() + + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + tk.MustExec("use test") + tk.MustExec("drop table if exists t, t1, t2, t3") + tk.MustExec("create table t(a int, index i(a))") + tk.MustExec("insert t values (1), (2), (3)") + tk.MustExec("create table t1(a int, index i(a))") + tk.MustExec("insert t1 values (4), (5), (6)") + prevGCCount := tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0] + + time.Sleep(1 * time.Second) + + ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{}) + require.NoError(t, err) + + tk.MustExec("rename table t to t3") + tk.MustExec("drop table t1") + tk.MustExec("create table t2(a int, index i(a))") + tk.MustExec("insert t2 values (7), (8), (9)") + + require.Equal(t, tk.MustQuery("select max(a) from t3").Rows()[0][0], "3") + require.Equal(t, tk.MustQuery("select max(a) from t2").Rows()[0][0], "9") + + require.Greater(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount) + + injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second)) + + require.NoError(t, failpoint.Enable("tikvclient/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/expression/injectSafeTS", + fmt.Sprintf("return(%v)", injectSafeTS))) + tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts))) + + tk.MustExec("admin check table t") + require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3") + tk.MustExec("admin check table t1") + require.Equal(t, tk.MustQuery("select max(a) from t1").Rows()[0][0], "6") + require.Equal(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount) + + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/expression/injectSafeTS")) + require.NoError(t, failpoint.Disable("tikvclient/injectSafeTS")) + } +} diff --git a/ddl/concurrentddltest/main_test.go b/tests/realtikvtest/flashbacktest/main_test.go similarity index 67% rename from ddl/concurrentddltest/main_test.go rename to tests/realtikvtest/flashbacktest/main_test.go index 4ab7e96eab2ae..d24310861a836 100644 --- a/ddl/concurrentddltest/main_test.go +++ b/tests/realtikvtest/flashbacktest/main_test.go @@ -12,34 +12,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -package concurrentddltest +package flashbacktest import ( "testing" - "time" - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/testkit/testsetup" + "github.com/pingcap/tidb/tests/realtikvtest" "go.uber.org/goleak" ) func TestMain(m *testing.M) { - testsetup.SetupForCommonTest() - - config.UpdateGlobal(func(conf *config.Config) { - conf.TiKVClient.AsyncCommit.SafeWindow = 0 - conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 - }) - - ddl.SetWaitTimeWhenErrorOccurred(time.Microsecond) - opts := []goleak.Option{ + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), - goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("google.golang.org/grpc.(*ccBalancerWrapper).watcher"), + goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*http2Client).keepalive"), + goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*controlBuffer).get"), + goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), } - + testsetup.SetupForCommonTest() goleak.VerifyTestMain(m, opts...) + realtikvtest.RunTestMain(m) } diff --git a/tests/realtikvtest/pessimistictest/BUILD.bazel b/tests/realtikvtest/pessimistictest/BUILD.bazel index 97890c8b8b70b..67a01e83cf386 100644 --- a/tests/realtikvtest/pessimistictest/BUILD.bazel +++ b/tests/realtikvtest/pessimistictest/BUILD.bazel @@ -18,6 +18,7 @@ go_test( "//parser/model", "//parser/mysql", "//parser/terror", + "//planner/core", "//session", "//sessionctx/variable", "//sessiontxn", diff --git a/tests/realtikvtest/pessimistictest/pessimistic_test.go b/tests/realtikvtest/pessimistictest/pessimistic_test.go index ae7545e0e91f6..a70b31f0a87b8 100644 --- a/tests/realtikvtest/pessimistictest/pessimistic_test.go +++ b/tests/realtikvtest/pessimistictest/pessimistic_test.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" + plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" @@ -2816,6 +2817,66 @@ func TestAsyncCommitCalTSFail(t *testing.T) { tk2.MustExec("commit") } +func TestAsyncCommitAndForeignKey(t *testing.T) { + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.TiKVClient.AsyncCommit.SafeWindow = time.Second + conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 + }) + store := realtikvtest.CreateMockStoreAndSetup(t) + tk := createAsyncCommitTestKit(t, store) + tk.MustExec("drop table if exists t_parent, t_child") + tk.MustExec("create table t_parent (id int primary key)") + tk.MustExec("create table t_child (id int primary key, pid int, foreign key (pid) references t_parent(id) on delete cascade on update cascade)") + tk.MustExec("insert into t_parent values (1),(2),(3),(4)") + tk.MustExec("insert into t_child values (1,1),(2,2),(3,3)") + tk.MustExec("set tidb_enable_1pc = true") + tk.MustExec("begin pessimistic") + tk.MustExec("delete from t_parent where id in (1,4)") + tk.MustExec("update t_parent set id=22 where id=2") + tk.MustExec("commit") + tk.MustQuery("select * from t_parent order by id").Check(testkit.Rows("3", "22")) + tk.MustQuery("select * from t_child order by id").Check(testkit.Rows("2 22", "3 3")) +} + +func TestTransactionIsolationAndForeignKey(t *testing.T) { + if !*realtikvtest.WithRealTiKV { + t.Skip("The test only support test with tikv.") + } + store := realtikvtest.CreateMockStoreAndSetup(t) + tk := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk2.MustExec("use test") + tk.MustExec("drop table if exists t1,t2") + tk.MustExec("create table t1 (id int primary key)") + tk.MustExec("create table t2 (id int primary key, pid int, foreign key (pid) references t1(id) on delete cascade on update cascade)") + tk.MustExec("insert into t1 values (1)") + tk.MustExec("set tx_isolation = 'READ-COMMITTED'") + tk.MustExec("begin pessimistic") + tk.MustExec("insert into t2 values (1,1)") + tk.MustGetDBError("insert into t2 values (2,2)", plannercore.ErrNoReferencedRow2) + tk2.MustExec("insert into t1 values (2)") + tk.MustQuery("select * from t1").Check(testkit.Rows("1", "2")) + tk.MustExec("insert into t2 values (2,2)") + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + tk2.MustExec("delete from t1 where id=2") + }() + time.Sleep(time.Millisecond * 10) + tk.MustExec("commit") + wg.Wait() + tk.MustQuery("select * from t1").Check(testkit.Rows("1")) + tk.MustQuery("select * from t2").Check(testkit.Rows("1 1")) + tk2.MustExec("delete from t1 where id=1") + tk.MustQuery("select * from t1").Check(testkit.Rows()) + tk.MustQuery("select * from t2").Check(testkit.Rows()) + tk.MustExec("admin check table t1") + tk.MustExec("admin check table t2") +} + func TestChangeLockToPut(t *testing.T) { store := realtikvtest.CreateMockStoreAndSetup(t) diff --git a/tests/realtikvtest/testkit.go b/tests/realtikvtest/testkit.go index b3ae5f3c6a2ac..4b8a749e65c9d 100644 --- a/tests/realtikvtest/testkit.go +++ b/tests/realtikvtest/testkit.go @@ -19,6 +19,7 @@ package realtikvtest import ( "flag" "fmt" + "strings" "sync/atomic" "testing" "time" @@ -110,8 +111,12 @@ func CreateMockStoreAndDomainAndSetup(t *testing.T, opts ...mockstore.MockTiKVSt tk.MustExec(fmt.Sprintf("set global innodb_lock_wait_timeout = %d", variable.DefInnodbLockWaitTimeout)) tk.MustExec("use test") rs := tk.MustQuery("show tables") + tables := []string{} for _, row := range rs.Rows() { - tk.MustExec(fmt.Sprintf("drop table %s", row[0])) + tables = append(tables, fmt.Sprintf("`%v`", row[0])) + } + if len(tables) > 0 { + tk.MustExec(fmt.Sprintf("drop table %s", strings.Join(tables, ","))) } } else { store, err = mockstore.NewMockStore(opts...) diff --git a/tidb-server/main.go b/tidb-server/main.go index 8605c7d0c3aaa..97ad1917105ca 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -122,6 +122,7 @@ const ( nmInitializeSecure = "initialize-secure" nmInitializeInsecure = "initialize-insecure" + nmInitializeSQLFile = "initialize-sql-file" nmDisconnectOnExpiredPassword = "disconnect-on-expired-password" ) @@ -166,9 +167,10 @@ var ( proxyProtocolNetworks = flag.String(nmProxyProtocolNetworks, "", "proxy protocol networks allowed IP or *, empty mean disable proxy protocol support") proxyProtocolHeaderTimeout = flag.Uint(nmProxyProtocolHeaderTimeout, 5, "proxy protocol header read timeout, unit is second. (Deprecated: as proxy protocol using lazy mode, header read timeout no longer used)") - // Security + // Bootstrap and security initializeSecure = flagBoolean(nmInitializeSecure, false, "bootstrap tidb-server in secure mode") initializeInsecure = flagBoolean(nmInitializeInsecure, true, "bootstrap tidb-server in insecure mode") + initializeSQLFile = flag.String(nmInitializeSQLFile, "", "SQL file to execute on first bootstrap") disconnectOnExpiredPassword = flagBoolean(nmDisconnectOnExpiredPassword, true, "the server disconnects the client when the password is expired") ) @@ -531,7 +533,7 @@ func overrideConfig(cfg *config.Config) { // Sanity check: can't specify both options if actualFlags[nmInitializeSecure] && actualFlags[nmInitializeInsecure] { - err = fmt.Errorf("the options --initialize-insecure and --initialize-secure are mutually exclusive") + err = fmt.Errorf("the options -initialize-insecure and -initialize-secure are mutually exclusive") terror.MustNil(err) } // The option --initialize-secure=true ensures that a secure bootstrap is used. @@ -550,9 +552,19 @@ func overrideConfig(cfg *config.Config) { // which is not supported on windows. Only the insecure bootstrap // method is supported. if runtime.GOOS == "windows" && cfg.Security.SecureBootstrap { - err = fmt.Errorf("the option --initialize-secure is not supported on Windows") + err = fmt.Errorf("the option -initialize-secure is not supported on Windows") terror.MustNil(err) } + // Initialize SQL File is used to run a set of SQL statements after first bootstrap. + // It is important in the use case that you want to set GLOBAL variables, which + // are persisted to the cluster and not read from a config file. + if actualFlags[nmInitializeSQLFile] { + if _, err := os.Stat(*initializeSQLFile); err != nil { + err = fmt.Errorf("can not access -initialize-sql-file %s", *initializeSQLFile) + terror.MustNil(err) + } + cfg.InitializeSQLFile = *initializeSQLFile + } } func setVersions() { @@ -788,16 +800,7 @@ func setupMetrics() { systimeErrHandler := func() { metrics.TimeJumpBackCounter.Inc() } - callBackCount := 0 - successCallBack := func() { - callBackCount++ - // It is callback by monitor per second, we increase metrics.KeepAliveCounter per 5s. - if callBackCount >= 5 { - callBackCount = 0 - metrics.KeepAliveCounter.Inc() - } - } - go systimemon.StartMonitor(time.Now, systimeErrHandler, successCallBack) + go systimemon.StartMonitor(time.Now, systimeErrHandler) pushMetric(cfg.Status.MetricsAddr, time.Duration(cfg.Status.MetricsInterval)*time.Second) } diff --git a/ttl/cache/ttlstatus.go b/ttl/cache/ttlstatus.go index 1657105e6c3e7..f14c1a559ae95 100644 --- a/ttl/cache/ttlstatus.go +++ b/ttl/cache/ttlstatus.go @@ -16,7 +16,6 @@ package cache import ( "context" - "fmt" "time" "github.com/pingcap/tidb/sessionctx" @@ -43,8 +42,8 @@ const ( const selectFromTTLTableStatus = "SELECT LOW_PRIORITY table_id,parent_table_id,table_statistics,last_job_id,last_job_start_time,last_job_finish_time,last_job_ttl_expire,last_job_summary,current_job_id,current_job_owner_id,current_job_owner_addr,current_job_owner_hb_time,current_job_start_time,current_job_ttl_expire,current_job_state,current_job_status,current_job_status_update_time FROM mysql.tidb_ttl_table_status" // SelectFromTTLTableStatusWithID returns an SQL statement to get the table status from table id -func SelectFromTTLTableStatusWithID(tableID int64) string { - return selectFromTTLTableStatus + fmt.Sprintf(" WHERE table_id = %d", tableID) +func SelectFromTTLTableStatusWithID(tableID int64) (string, []interface{}) { + return selectFromTTLTableStatus + " WHERE table_id = %?", []interface{}{tableID} } // TableStatus contains the corresponding information in the system table `mysql.tidb_ttl_table_status` diff --git a/ttl/ttlworker/BUILD.bazel b/ttl/ttlworker/BUILD.bazel index 8d031e22e6e4a..42466bb0a568d 100644 --- a/ttl/ttlworker/BUILD.bazel +++ b/ttl/ttlworker/BUILD.bazel @@ -31,6 +31,7 @@ go_library( "//util/timeutil", "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", "@org_golang_x_time//rate", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", @@ -41,6 +42,7 @@ go_test( name = "ttlworker_test", srcs = [ "del_test.go", + "job_integration_test.go", "job_manager_integration_test.go", "job_manager_test.go", "job_test.go", @@ -51,12 +53,14 @@ go_test( flaky = True, deps = [ "//infoschema", + "//kv", "//parser/ast", "//parser/model", "//parser/mysql", "//session", "//sessionctx", "//sessionctx/variable", + "//statistics/handle", "//testkit", "//ttl/cache", "//ttl/session", @@ -65,6 +69,7 @@ go_test( "//util/logutil", "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@org_golang_x_time//rate", diff --git a/ttl/ttlworker/config.go b/ttl/ttlworker/config.go index a92f362241fcf..55d005a82e6c2 100644 --- a/ttl/ttlworker/config.go +++ b/ttl/ttlworker/config.go @@ -16,6 +16,8 @@ package ttlworker import ( "time" + + "github.com/pingcap/failpoint" ) const jobManagerLoopTickerInterval = 10 * time.Second @@ -27,3 +29,24 @@ const ttlInternalSQLTimeout = 30 * time.Second const resizeWorkersInterval = 30 * time.Second const splitScanCount = 64 const ttlJobTimeout = 6 * time.Hour + +func getUpdateInfoSchemaCacheInterval() time.Duration { + failpoint.Inject("update-info-schema-cache-interval", func(val failpoint.Value) time.Duration { + return time.Duration(val.(int)) + }) + return updateInfoSchemaCacheInterval +} + +func getUpdateTTLTableStatusCacheInterval() time.Duration { + failpoint.Inject("update-status-table-cache-interval", func(val failpoint.Value) time.Duration { + return time.Duration(val.(int)) + }) + return updateTTLTableStatusCacheInterval +} + +func getResizeWorkersInterval() time.Duration { + failpoint.Inject("resize-workers-interval", func(val failpoint.Value) time.Duration { + return time.Duration(val.(int)) + }) + return resizeWorkersInterval +} diff --git a/ttl/ttlworker/job.go b/ttl/ttlworker/job.go index 1a2f351465027..02f402f90888d 100644 --- a/ttl/ttlworker/job.go +++ b/ttl/ttlworker/job.go @@ -17,7 +17,6 @@ package ttlworker import ( "context" "encoding/json" - "fmt" "sync" "time" @@ -29,20 +28,34 @@ import ( "go.uber.org/zap" ) -const updateJobCurrentStatusTemplate = "UPDATE mysql.tidb_ttl_table_status SET current_job_status = '%s' WHERE table_id = %d AND current_job_status = '%s' AND current_job_id = '%s'" -const finishJobTemplate = "UPDATE mysql.tidb_ttl_table_status SET last_job_id = current_job_id, last_job_start_time = current_job_start_time, last_job_finish_time = '%s', last_job_ttl_expire = current_job_ttl_expire, last_job_summary = '%s', current_job_id = NULL, current_job_owner_id = NULL, current_job_owner_hb_time = NULL, current_job_start_time = NULL, current_job_ttl_expire = NULL, current_job_state = NULL, current_job_status = NULL, current_job_status_update_time = NULL WHERE table_id = %d AND current_job_id = '%s'" -const updateJobStateTemplate = "UPDATE mysql.tidb_ttl_table_status SET current_job_state = '%s' WHERE table_id = %d AND current_job_id = '%s' AND current_job_owner_id = '%s'" +const updateJobCurrentStatusTemplate = "UPDATE mysql.tidb_ttl_table_status SET current_job_status = %? WHERE table_id = %? AND current_job_status = %? AND current_job_id = %?" +const finishJobTemplate = `UPDATE mysql.tidb_ttl_table_status + SET last_job_id = current_job_id, + last_job_start_time = current_job_start_time, + last_job_finish_time = %?, + last_job_ttl_expire = current_job_ttl_expire, + last_job_summary = %?, + current_job_id = NULL, + current_job_owner_id = NULL, + current_job_owner_hb_time = NULL, + current_job_start_time = NULL, + current_job_ttl_expire = NULL, + current_job_state = NULL, + current_job_status = NULL, + current_job_status_update_time = NULL + WHERE table_id = %? AND current_job_id = %?` +const updateJobStateTemplate = "UPDATE mysql.tidb_ttl_table_status SET current_job_state = %? WHERE table_id = %? AND current_job_id = %? AND current_job_owner_id = %?" -func updateJobCurrentStatusSQL(tableID int64, oldStatus cache.JobStatus, newStatus cache.JobStatus, jobID string) string { - return fmt.Sprintf(updateJobCurrentStatusTemplate, newStatus, tableID, oldStatus, jobID) +func updateJobCurrentStatusSQL(tableID int64, oldStatus cache.JobStatus, newStatus cache.JobStatus, jobID string) (string, []interface{}) { + return updateJobCurrentStatusTemplate, []interface{}{string(newStatus), tableID, string(oldStatus), jobID} } -func finishJobSQL(tableID int64, finishTime time.Time, summary string, jobID string) string { - return fmt.Sprintf(finishJobTemplate, finishTime.Format(timeFormat), summary, tableID, jobID) +func finishJobSQL(tableID int64, finishTime time.Time, summary string, jobID string) (string, []interface{}) { + return finishJobTemplate, []interface{}{finishTime.Format(timeFormat), summary, tableID, jobID} } -func updateJobState(tableID int64, currentJobID string, currentJobState string, currentJobOwnerID string) string { - return fmt.Sprintf(updateJobStateTemplate, currentJobState, tableID, currentJobID, currentJobOwnerID) +func updateJobState(tableID int64, currentJobID string, currentJobState string, currentJobOwnerID string) (string, []interface{}) { + return updateJobStateTemplate, []interface{}{currentJobState, tableID, currentJobID, currentJobOwnerID} } type ttlJob struct { @@ -76,9 +89,10 @@ func (job *ttlJob) changeStatus(ctx context.Context, se session.Session, status job.status = status job.statusMutex.Unlock() - _, err := se.ExecuteSQL(ctx, updateJobCurrentStatusSQL(job.tbl.ID, oldStatus, status, job.id)) + sql, args := updateJobCurrentStatusSQL(job.tbl.ID, oldStatus, status, job.id) + _, err := se.ExecuteSQL(ctx, sql, args...) if err != nil { - return errors.Trace(err) + return errors.Wrapf(err, "execute sql: %s", sql) } return nil @@ -89,9 +103,10 @@ func (job *ttlJob) updateState(ctx context.Context, se session.Session) error { if err != nil { logutil.Logger(job.ctx).Warn("fail to generate summary for ttl job", zap.Error(err)) } - _, err = se.ExecuteSQL(ctx, updateJobState(job.tbl.ID, job.id, summary, job.ownerID)) + sql, args := updateJobState(job.tbl.ID, job.id, summary, job.ownerID) + _, err = se.ExecuteSQL(ctx, sql, args...) if err != nil { - return errors.Trace(err) + return errors.Wrapf(err, "execute sql: %s", sql) } return nil @@ -115,9 +130,10 @@ func (job *ttlJob) finish(se session.Session, now time.Time) { } // at this time, the job.ctx may have been canceled (to cancel this job) // even when it's canceled, we'll need to update the states, so use another context - _, err = se.ExecuteSQL(context.TODO(), finishJobSQL(job.tbl.ID, now, summary, job.id)) + sql, args := finishJobSQL(job.tbl.ID, now, summary, job.id) + _, err = se.ExecuteSQL(context.TODO(), sql, args...) if err != nil { - logutil.Logger(job.ctx).Error("fail to finish a ttl job", zap.Error(err), zap.Int64("tableID", job.tbl.ID), zap.String("jobID", job.id)) + logutil.Logger(job.ctx).Error("fail to finish a ttl job", zap.Error(err), zap.Int64("tableID", job.tbl.ID), zap.String("jobID", job.id), zap.String("sql", sql), zap.Any("arguments", args)) } } diff --git a/ttl/ttlworker/job_integration_test.go b/ttl/ttlworker/job_integration_test.go new file mode 100644 index 0000000000000..b90c4a550d858 --- /dev/null +++ b/ttl/ttlworker/job_integration_test.go @@ -0,0 +1,41 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ttlworker_test + +import ( + "context" + "testing" + + dbsession "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/ttl/cache" + "github.com/pingcap/tidb/ttl/session" + "github.com/pingcap/tidb/ttl/ttlworker" + "github.com/stretchr/testify/require" +) + +func TestChangeStatus(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + + dbSession, err := dbsession.CreateSession4Test(store) + require.NoError(t, err) + se := session.NewSession(dbSession, dbSession, nil) + + job := ttlworker.NewTTLJob(&cache.PhysicalTable{ID: 0}, "0", cache.JobStatusWaiting) + tk.MustExec("insert into mysql.tidb_ttl_table_status(table_id,current_job_id,current_job_status) VALUES(0, '0', 'waiting')") + require.NoError(t, job.ChangeStatus(context.Background(), se, cache.JobStatusRunning)) + tk.MustQuery("select current_job_status from mysql.tidb_ttl_table_status").Check(testkit.Rows("running")) +} diff --git a/ttl/ttlworker/job_manager.go b/ttl/ttlworker/job_manager.go index 58c47d05efe4f..8a9a3c17d48cf 100644 --- a/ttl/ttlworker/job_manager.go +++ b/ttl/ttlworker/job_manager.go @@ -16,7 +16,6 @@ package ttlworker import ( "context" - "fmt" "time" "github.com/pingcap/errors" @@ -31,22 +30,30 @@ import ( "go.uber.org/zap" ) -const insertNewTableIntoStatusTemplate = "INSERT INTO mysql.tidb_ttl_table_status (table_id,parent_table_id) VALUES (%d, %d)" -const setTableStatusOwnerTemplate = "UPDATE mysql.tidb_ttl_table_status SET current_job_id = UUID(), current_job_owner_id = '%s',current_job_start_time = '%s',current_job_status = 'waiting',current_job_status_update_time = '%s',current_job_ttl_expire = '%s',current_job_owner_hb_time = '%s' WHERE table_id = %d" -const updateHeartBeatTemplate = "UPDATE mysql.tidb_ttl_table_status SET current_job_owner_hb_time = '%s' WHERE table_id = %d AND current_job_owner_id = '%s'" +const insertNewTableIntoStatusTemplate = "INSERT INTO mysql.tidb_ttl_table_status (table_id,parent_table_id) VALUES (%?, %?)" +const setTableStatusOwnerTemplate = `UPDATE mysql.tidb_ttl_table_status + SET current_job_id = UUID(), + current_job_owner_id = %?, + current_job_start_time = %?, + current_job_status = 'waiting', + current_job_status_update_time = %?, + current_job_ttl_expire = %?, + current_job_owner_hb_time = %? + WHERE table_id = %?` +const updateHeartBeatTemplate = "UPDATE mysql.tidb_ttl_table_status SET current_job_owner_hb_time = %? WHERE table_id = %? AND current_job_owner_id = %?" const timeFormat = "2006-01-02 15:04:05" -func insertNewTableIntoStatusSQL(tableID int64, parentTableID int64) string { - return fmt.Sprintf(insertNewTableIntoStatusTemplate, tableID, parentTableID) +func insertNewTableIntoStatusSQL(tableID int64, parentTableID int64) (string, []interface{}) { + return insertNewTableIntoStatusTemplate, []interface{}{tableID, parentTableID} } -func setTableStatusOwnerSQL(tableID int64, now time.Time, currentJobTTLExpire time.Time, id string) string { - return fmt.Sprintf(setTableStatusOwnerTemplate, id, now.Format(timeFormat), now.Format(timeFormat), currentJobTTLExpire.Format(timeFormat), now.Format(timeFormat), tableID) +func setTableStatusOwnerSQL(tableID int64, now time.Time, currentJobTTLExpire time.Time, id string) (string, []interface{}) { + return setTableStatusOwnerTemplate, []interface{}{id, now.Format(timeFormat), now.Format(timeFormat), currentJobTTLExpire.Format(timeFormat), now.Format(timeFormat), tableID} } -func updateHeartBeatSQL(tableID int64, now time.Time, id string) string { - return fmt.Sprintf(updateHeartBeatTemplate, now.Format(timeFormat), tableID, id) +func updateHeartBeatSQL(tableID int64, now time.Time, id string) (string, []interface{}) { + return updateHeartBeatTemplate, []interface{}{now.Format(timeFormat), tableID, id} } // JobManager schedules and manages the ttl jobs on this instance @@ -95,8 +102,8 @@ func NewJobManager(id string, sessPool sessionPool, store kv.Storage) (manager * manager.init(manager.jobLoop) manager.ctx = logutil.WithKeyValue(manager.ctx, "ttl-worker", "manager") - manager.infoSchemaCache = cache.NewInfoSchemaCache(updateInfoSchemaCacheInterval) - manager.tableStatusCache = cache.NewTableStatusCache(updateTTLTableStatusCacheInterval) + manager.infoSchemaCache = cache.NewInfoSchemaCache(getUpdateInfoSchemaCacheInterval()) + manager.tableStatusCache = cache.NewTableStatusCache(getUpdateTTLTableStatusCacheInterval()) return } @@ -118,7 +125,7 @@ func (m *JobManager) jobLoop() error { updateScanTaskStateTicker := time.Tick(jobManagerLoopTickerInterval) infoSchemaCacheUpdateTicker := time.Tick(m.infoSchemaCache.GetInterval()) tableStatusCacheUpdateTicker := time.Tick(m.tableStatusCache.GetInterval()) - resizeWorkersTicker := time.Tick(resizeWorkersInterval) + resizeWorkersTicker := time.Tick(getResizeWorkersInterval()) for { m.reportMetrics() now := se.Now() @@ -480,7 +487,7 @@ func (m *JobManager) couldTrySchedule(table *cache.TableStatus, now time.Time) b hbTime := table.CurrentJobOwnerHBTime // a more concrete value is `2 * max(updateTTLTableStatusCacheInterval, jobManagerLoopTickerInterval)`, but the // `updateTTLTableStatusCacheInterval` is greater than `jobManagerLoopTickerInterval` in most cases. - if hbTime.Add(2 * updateTTLTableStatusCacheInterval).Before(now) { + if hbTime.Add(2 * getUpdateTTLTableStatusCacheInterval()).Before(now) { logutil.Logger(m.ctx).Info("task heartbeat has stopped", zap.Int64("tableID", table.TableID), zap.Time("hbTime", hbTime), zap.Time("now", now)) return true } @@ -503,19 +510,22 @@ func (m *JobManager) lockNewJob(ctx context.Context, se session.Session, table * var expireTime time.Time err := se.RunInTxn(ctx, func() error { - rows, err := se.ExecuteSQL(ctx, cache.SelectFromTTLTableStatusWithID(table.ID)) + sql, args := cache.SelectFromTTLTableStatusWithID(table.ID) + rows, err := se.ExecuteSQL(ctx, sql, args...) if err != nil { - return err + return errors.Wrapf(err, "execute sql: %s", sql) } if len(rows) == 0 { // cannot find the row, insert the status row - _, err = se.ExecuteSQL(ctx, insertNewTableIntoStatusSQL(table.ID, table.TableInfo.ID)) + sql, args := insertNewTableIntoStatusSQL(table.ID, table.TableInfo.ID) + _, err = se.ExecuteSQL(ctx, sql, args...) if err != nil { - return err + return errors.Wrapf(err, "execute sql: %s", sql) } - rows, err = se.ExecuteSQL(ctx, cache.SelectFromTTLTableStatusWithID(table.ID)) + sql, args = cache.SelectFromTTLTableStatusWithID(table.ID) + rows, err = se.ExecuteSQL(ctx, sql, args...) if err != nil { - return err + return errors.Wrapf(err, "execute sql: %s", sql) } if len(rows) == 0 { return errors.New("table status row still doesn't exist after insertion") @@ -534,9 +544,9 @@ func (m *JobManager) lockNewJob(ctx context.Context, se session.Session, table * return err } - _, err = se.ExecuteSQL(ctx, setTableStatusOwnerSQL(table.ID, now, expireTime, m.id)) - - return err + sql, args = setTableStatusOwnerSQL(table.ID, now, expireTime, m.id) + _, err = se.ExecuteSQL(ctx, sql, args...) + return errors.Wrapf(err, "execute sql: %s", sql) }) if err != nil { return nil, err @@ -599,9 +609,10 @@ func (m *JobManager) createNewJob(expireTime time.Time, now time.Time, table *ca func (m *JobManager) updateHeartBeat(ctx context.Context, se session.Session) error { now := se.Now() for _, job := range m.localJobs() { - _, err := se.ExecuteSQL(ctx, updateHeartBeatSQL(job.tbl.ID, now, m.id)) + sql, args := updateHeartBeatSQL(job.tbl.ID, now, m.id) + _, err := se.ExecuteSQL(ctx, sql, args...) if err != nil { - return errors.Trace(err) + return errors.Wrapf(err, "execute sql: %s", sql) } // also updates some internal state for this job err = job.updateState(ctx, se) diff --git a/ttl/ttlworker/job_manager_integration_test.go b/ttl/ttlworker/job_manager_integration_test.go index 8c299afcd48de..b4814bed9483a 100644 --- a/ttl/ttlworker/job_manager_integration_test.go +++ b/ttl/ttlworker/job_manager_integration_test.go @@ -21,10 +21,14 @@ import ( "testing" "time" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" dbsession "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/ttl/cache" "github.com/pingcap/tidb/ttl/session" @@ -35,10 +39,8 @@ import ( "go.uber.org/zap" ) -func TestParallelLockNewJob(t *testing.T) { - store := testkit.CreateMockStore(t) - - sessionFactory := func() session.Session { +func sessionFactory(t *testing.T, store kv.Storage) func() session.Session { + return func() session.Session { dbSession, err := dbsession.CreateSession4Test(store) require.NoError(t, err) se := session.NewSession(dbSession, dbSession, nil) @@ -50,6 +52,12 @@ func TestParallelLockNewJob(t *testing.T) { return se } +} + +func TestParallelLockNewJob(t *testing.T) { + store := testkit.CreateMockStore(t) + + sessionFactory := sessionFactory(t, store) storedTTLJobRunInterval := variable.TTLJobRunInterval.Load() variable.TTLJobRunInterval.Store(0) @@ -96,3 +104,79 @@ func TestParallelLockNewJob(t *testing.T) { successJob.Finish(se, time.Now()) } } + +func TestFinishJob(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + + sessionFactory := sessionFactory(t, store) + + testTable := &cache.PhysicalTable{ID: 2, TableInfo: &model.TableInfo{ID: 1, TTLInfo: &model.TTLInfo{IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay)}}} + + tk.MustExec("insert into mysql.tidb_ttl_table_status(table_id) values (2)") + + // finish with error + m := ttlworker.NewJobManager("test-id", nil, store) + se := sessionFactory() + job, err := m.LockNewJob(context.Background(), se, testTable, time.Now()) + require.NoError(t, err) + job.SetScanErr(errors.New(`"'an error message contains both single and double quote'"`)) + job.Finish(se, time.Now()) + + tk.MustQuery("select table_id, last_job_summary from mysql.tidb_ttl_table_status").Check(testkit.Rows("2 {\"total_rows\":0,\"success_rows\":0,\"error_rows\":0,\"total_scan_task\":1,\"scheduled_scan_task\":0,\"finished_scan_task\":0,\"scan_task_err\":\"\\\"'an error message contains both single and double quote'\\\"\"}")) +} + +func TestTTLAutoAnalyze(t *testing.T) { + failpoint.Enable("github.com/pingcap/tidb/ttl/ttlworker/update-info-schema-cache-interval", fmt.Sprintf("return(%d)", time.Second)) + failpoint.Enable("github.com/pingcap/tidb/ttl/ttlworker/update-status-table-cache-interval", fmt.Sprintf("return(%d)", time.Second)) + failpoint.Enable("github.com/pingcap/tidb/ttl/ttlworker/resize-workers-interval", fmt.Sprintf("return(%d)", time.Second)) + originAutoAnalyzeMinCnt := handle.AutoAnalyzeMinCnt + handle.AutoAnalyzeMinCnt = 0 + defer func() { + handle.AutoAnalyzeMinCnt = originAutoAnalyzeMinCnt + }() + + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + + tk.MustExec("use test") + tk.MustExec("create table t (id int, created_at datetime) ttl = `created_at` + interval 1 day") + + // insert ten rows, the 2,3,4,6,9,10 of them are expired + for i := 1; i <= 10; i++ { + t := time.Now() + if i%2 == 0 || i%3 == 0 { + t = t.Add(-time.Hour * 48) + } + + tk.MustExec("insert into t values(?, ?)", i, t.Format(time.RFC3339)) + } + // TODO: use a better way to pause and restart ttl worker after analyze the table to make it more stable + // but as the ttl worker takes several seconds to start, it's not too serious. + tk.MustExec("analyze table t") + rows := tk.MustQuery("show stats_meta").Rows() + require.Equal(t, rows[0][4], "0") + require.Equal(t, rows[0][5], "10") + + retryTime := 15 + retryInterval := time.Second * 2 + deleted := false + for retryTime >= 0 { + retryTime-- + time.Sleep(retryInterval) + + rows := tk.MustQuery("select count(*) from t").Rows() + count := rows[0][0].(string) + if count == "3" { + deleted = true + break + } + } + require.True(t, deleted, "ttl should remove expired rows") + + h := dom.StatsHandle() + is := dom.InfoSchema() + require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) + require.NoError(t, h.Update(is)) + require.True(t, h.HandleAutoAnalyze(is)) +} diff --git a/ttl/ttlworker/job_manager_test.go b/ttl/ttlworker/job_manager_test.go index 6718c384543fe..c218c7ee81a08 100644 --- a/ttl/ttlworker/job_manager_test.go +++ b/ttl/ttlworker/job_manager_test.go @@ -156,6 +156,10 @@ func (j *ttlJob) ID() string { return j.id } +func (j *ttlJob) SetScanErr(err error) { + j.scanTaskErr = err +} + func newMockTTLJob(tbl *cache.PhysicalTable, status cache.JobStatus) *ttlJob { statistics := &ttlStatistics{} return &ttlJob{tbl: tbl, ctx: context.Background(), statistics: statistics, status: status, tasks: []*ttlScanTask{{ctx: context.Background(), tbl: tbl, statistics: statistics}}} @@ -163,7 +167,8 @@ func newMockTTLJob(tbl *cache.PhysicalTable, status cache.JobStatus) *ttlJob { func TestReadyForNewJobTables(t *testing.T) { tbl := newMockTTLTbl(t, "t1") - m := NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m := NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) se := newMockSession(t, tbl) cases := []struct { @@ -216,8 +221,18 @@ func TestLockNewTable(t *testing.T) { testPhysicalTable := &cache.PhysicalTable{ID: 1, TableInfo: &model.TableInfo{ID: 1, TTLInfo: &model.TTLInfo{ColumnName: model.NewCIStr("test"), IntervalExprStr: "5 Year"}}} + type executeInfo struct { + sql string + args []interface{} + } + getExecuteInfo := func(sql string, args []interface{}) executeInfo { + return executeInfo{ + sql, + args, + } + } type sqlExecute struct { - sql string + executeInfo rows []chunk.Row err error @@ -231,47 +246,47 @@ func TestLockNewTable(t *testing.T) { }{ {"normal lock table", testPhysicalTable, []sqlExecute{ { - cache.SelectFromTTLTableStatusWithID(1), + getExecuteInfo(cache.SelectFromTTLTableStatusWithID(1)), newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil, }, { - setTableStatusOwnerSQL(1, now, expireTime, "test-id"), + getExecuteInfo(setTableStatusOwnerSQL(1, now, expireTime, "test-id")), nil, nil, }, { - updateStatusSQL, + getExecuteInfo(updateStatusSQL, nil), newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil, }, }, true, false}, {"select nothing", testPhysicalTable, []sqlExecute{ { - cache.SelectFromTTLTableStatusWithID(1), + getExecuteInfo(cache.SelectFromTTLTableStatusWithID(1)), nil, nil, }, { - insertNewTableIntoStatusSQL(1, 1), + getExecuteInfo(insertNewTableIntoStatusSQL(1, 1)), nil, nil, }, { - cache.SelectFromTTLTableStatusWithID(1), + getExecuteInfo(cache.SelectFromTTLTableStatusWithID(1)), newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil, }, { - setTableStatusOwnerSQL(1, now, expireTime, "test-id"), + getExecuteInfo(setTableStatusOwnerSQL(1, now, expireTime, "test-id")), nil, nil, }, { - updateStatusSQL, + getExecuteInfo(updateStatusSQL, nil), newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil, }, }, true, false}, {"return error", testPhysicalTable, []sqlExecute{ { - cache.SelectFromTTLTableStatusWithID(1), + getExecuteInfo(cache.SelectFromTTLTableStatusWithID(1)), newTTLTableStatusRows(&cache.TableStatus{TableID: 1}), nil, }, { - setTableStatusOwnerSQL(1, now, expireTime, "test-id"), + getExecuteInfo(setTableStatusOwnerSQL(1, now, expireTime, "test-id")), nil, errors.New("test error message"), }, }, false, true}, @@ -281,12 +296,14 @@ func TestLockNewTable(t *testing.T) { t.Run(c.name, func(t *testing.T) { tbl := newMockTTLTbl(t, "t1") - m := NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m := NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) sqlCounter := 0 se := newMockSession(t, tbl) se.executeSQL = func(ctx context.Context, sql string, args ...interface{}) (rows []chunk.Row, err error) { assert.Less(t, sqlCounter, len(c.sqls)) assert.Equal(t, sql, c.sqls[sqlCounter].sql) + assert.Equal(t, args, c.sqls[sqlCounter].args) rows = c.sqls[sqlCounter].rows err = c.sqls[sqlCounter].err @@ -318,7 +335,8 @@ func TestResizeWorkers(t *testing.T) { scanWorker1.Start() scanWorker2 := newMockScanWorker(t) - m := NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m := NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.SetScanWorkers4Test([]worker{ scanWorker1, }) @@ -336,7 +354,8 @@ func TestResizeWorkers(t *testing.T) { scanWorker2 = newMockScanWorker(t) scanWorker2.Start() - m = NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m = NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.SetScanWorkers4Test([]worker{ scanWorker1, scanWorker2, @@ -351,7 +370,8 @@ func TestResizeWorkers(t *testing.T) { scanWorker2 = newMockScanWorker(t) scanWorker2.Start() - m = NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m = NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.SetScanWorkers4Test([]worker{ scanWorker1, scanWorker2, @@ -369,7 +389,8 @@ func TestLocalJobs(t *testing.T) { tbl1.ID = 1 tbl2 := newMockTTLTbl(t, "t2") tbl2.ID = 2 - m := NewJobManager("test-id", newMockSessionPool(t, tbl1, tbl2), nil) + m := NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl1, tbl2) m.runningJobs = []*ttlJob{{tbl: tbl1, id: "1", ctx: context.Background()}, {tbl: tbl2, id: "2", ctx: context.Background()}} m.tableStatusCache.Tables = map[int64]*cache.TableStatus{ @@ -395,7 +416,8 @@ func TestRescheduleJobs(t *testing.T) { scanWorker2.Start() scanWorker2.setOneRowResult(tbl, 2022) - m := NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m := NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.SetScanWorkers4Test([]worker{ scanWorker1, scanWorker2, @@ -448,7 +470,8 @@ func TestRescheduleJobsOutOfWindow(t *testing.T) { scanWorker2.Start() scanWorker2.setOneRowResult(tbl, 2022) - m := NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m := NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.SetScanWorkers4Test([]worker{ scanWorker1, scanWorker2, @@ -493,7 +516,8 @@ func TestCheckFinishedJob(t *testing.T) { se := newMockSession(t, tbl) // cancelled job will be regarded as finished - m := NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m := NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.runningJobs = []*ttlJob{newMockTTLJob(tbl, cache.JobStatusCancelled)} m.checkFinishedJob(se, se.Now()) assert.Len(t, m.runningJobs, 0) @@ -502,20 +526,37 @@ func TestCheckFinishedJob(t *testing.T) { finishedStatistics := &ttlStatistics{} finishedStatistics.TotalRows.Store(1) finishedStatistics.SuccessRows.Store(1) - m = NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m = NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.runningJobs = []*ttlJob{newMockTTLJob(tbl, cache.JobStatusRunning)} m.runningJobs[0].statistics = finishedStatistics m.runningJobs[0].tasks[0].statistics = finishedStatistics m.runningJobs[0].taskIter = 1 m.runningJobs[0].finishedScanTaskCounter = 1 - m.checkFinishedJob(se, se.Now()) + // meetArg records whether the sql statement uses the arg + meetArg := false + now := se.Now() + jobID := m.runningJobs[0].id + se.executeSQL = func(ctx context.Context, sql string, args ...interface{}) ([]chunk.Row, error) { + if len(args) > 0 { + meetArg = true + expectedSQL, expectedArgs := finishJobSQL(tbl.ID, now, "{\"total_rows\":1,\"success_rows\":1,\"error_rows\":0,\"total_scan_task\":1,\"scheduled_scan_task\":1,\"finished_scan_task\":1}", jobID) + assert.Equal(t, expectedSQL, sql) + assert.Equal(t, expectedArgs, args) + } + return nil, nil + } + m.checkFinishedJob(se, now) assert.Len(t, m.runningJobs, 0) + assert.Equal(t, true, meetArg) + se.executeSQL = nil // check timeout job - now := se.Now() + now = se.Now() createTime := now.Add(-20 * time.Hour) - m = NewJobManager("test-id", newMockSessionPool(t, tbl), nil) + m = NewJobManager("test-id", nil, nil) + m.sessPool = newMockSessionPool(t, tbl) m.runningJobs = []*ttlJob{ { ctx: context.Background(), diff --git a/ttl/ttlworker/job_test.go b/ttl/ttlworker/job_test.go index 7645777327225..19075b905e22d 100644 --- a/ttl/ttlworker/job_test.go +++ b/ttl/ttlworker/job_test.go @@ -15,12 +15,27 @@ package ttlworker import ( + "context" "testing" "github.com/pingcap/errors" + "github.com/pingcap/tidb/ttl/cache" + "github.com/pingcap/tidb/ttl/session" "github.com/stretchr/testify/assert" ) +func NewTTLJob(tbl *cache.PhysicalTable, id string, status cache.JobStatus) *ttlJob { + return &ttlJob{ + tbl: tbl, + id: id, + status: status, + } +} + +func (j *ttlJob) ChangeStatus(ctx context.Context, se session.Session, status cache.JobStatus) error { + return j.changeStatus(ctx, se, status) +} + func TestIterScanTask(t *testing.T) { tbl := newMockTTLTbl(t, "t1") diff --git a/ttl/ttlworker/session.go b/ttl/ttlworker/session.go index d1500b0e533ee..aa83f195aa43e 100644 --- a/ttl/ttlworker/session.go +++ b/ttl/ttlworker/session.go @@ -33,6 +33,27 @@ import ( "go.uber.org/zap" ) +// The following two functions are using `sqlexec.SQLExecutor` to represent session +// which is actually not correct. It's a work around for the cyclic dependency problem. +// It actually doesn't accept arbitrary SQLExecutor, but just `*session.session`, which means +// you cannot pass the `(ttl/session).Session` into it. +// Use `sqlexec.SQLExecutor` and `sessionctx.Session` or another other interface (including +// `interface{}`) here is the same, I just pick one small enough interface. +// Also, we cannot use the functions in `session/session.go` (to avoid cyclic dependency), so +// registering function here is really needed. + +// AttachStatsCollector attaches the stats collector for the session. +// this function is registered in BootstrapSession in /session/session.go +var AttachStatsCollector = func(s sqlexec.SQLExecutor) sqlexec.SQLExecutor { + return s +} + +// DetachStatsCollector removes the stats collector for the session +// this function is registered in BootstrapSession in /session/session.go +var DetachStatsCollector = func(s sqlexec.SQLExecutor) sqlexec.SQLExecutor { + return s +} + type sessionPool interface { Get() (pools.Resource, error) Put(pools.Resource) @@ -80,9 +101,13 @@ func getSession(pool sessionPool) (session.Session, error) { terror.Log(err) } + DetachStatsCollector(exec) + pool.Put(resource) }) + exec = AttachStatsCollector(exec) + // store and set the retry limit to 0 _, err = se.ExecuteSQL(context.Background(), "set tidb_retry_limit=0") if err != nil { diff --git a/ttl/ttlworker/session_test.go b/ttl/ttlworker/session_test.go index 877fd7996eaa7..712c696fa5f35 100644 --- a/ttl/ttlworker/session_test.go +++ b/ttl/ttlworker/session_test.go @@ -179,7 +179,7 @@ func (s *mockSession) ExecuteSQL(ctx context.Context, sql string, args ...interf } if s.executeSQL != nil { - return s.executeSQL(ctx, sql, args) + return s.executeSQL(ctx, sql, args...) } return s.rows, s.execErr } diff --git a/types/convert.go b/types/convert.go index 96a12f64ca641..1b97b7336ef66 100644 --- a/types/convert.go +++ b/types/convert.go @@ -758,6 +758,8 @@ func ToString(value interface{}) (string, error) { return v.String(), nil case Set: return v.String(), nil + case BinaryJSON: + return v.String(), nil default: return "", errors.Errorf("cannot convert %v(type %T) to string", value, value) } diff --git a/types/datum.go b/types/datum.go index 34639cdf10bdb..231de4dd9a069 100644 --- a/types/datum.go +++ b/types/datum.go @@ -2016,6 +2016,10 @@ func (d *Datum) ToMysqlJSON() (j BinaryJSON, err error) { in = d.GetBinaryLiteral().ToString() case KindNull: in = nil + case KindMysqlTime: + in = d.GetMysqlTime() + case KindMysqlDuration: + in = d.GetMysqlDuration() default: in, err = d.ToString() } diff --git a/types/json_binary.go b/types/json_binary.go index 6fe01d2b4f28e..cacf5b69b025a 100644 --- a/types/json_binary.go +++ b/types/json_binary.go @@ -31,6 +31,8 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/util/hack" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" "golang.org/x/exp/slices" ) @@ -578,6 +580,26 @@ func (bj BinaryJSON) HashValue(buf []byte) []byte { return buf } +// GetValue return the primitive value of the JSON. +func (bj BinaryJSON) GetValue() any { + switch bj.TypeCode { + case JSONTypeCodeInt64: + return bj.GetInt64() + case JSONTypeCodeUint64: + return bj.GetUint64() + case JSONTypeCodeDuration: + return bj.GetDuration() + case JSONTypeCodeFloat64: + return bj.GetFloat64() + case JSONTypeCodeString: + return bj.GetString() + case JSONTypeCodeDate, JSONTypeCodeDatetime: + return bj.GetTime() + } + logutil.BgLogger().Error("unreachable JSON type", zap.Any("type", bj.TypeCode)) + return nil +} + // CreateBinaryJSON creates a BinaryJSON from interface. func CreateBinaryJSON(in interface{}) BinaryJSON { bj, err := CreateBinaryJSONWithCheck(in) diff --git a/util/cpu/BUILD.bazel b/util/cpu/BUILD.bazel index 08893520caaa0..b831ba544947b 100644 --- a/util/cpu/BUILD.bazel +++ b/util/cpu/BUILD.bazel @@ -21,5 +21,6 @@ go_test( srcs = ["cpu_test.go"], embed = [":cpu"], flaky = True, + race = "on", deps = ["@com_github_stretchr_testify//require"], ) diff --git a/util/cpu/cpu_test.go b/util/cpu/cpu_test.go index cd330a11e5196..a191227b72f78 100644 --- a/util/cpu/cpu_test.go +++ b/util/cpu/cpu_test.go @@ -24,8 +24,7 @@ import ( ) func TestCPUValue(t *testing.T) { - Observer := NewCPUObserver() - Observer.Start() + observer := NewCPUObserver() exit := make(chan struct{}) var wg sync.WaitGroup for i := 0; i < 10; i++ { @@ -42,11 +41,11 @@ func TestCPUValue(t *testing.T) { } }() } - Observer.Start() + observer.Start() time.Sleep(5 * time.Second) require.GreaterOrEqual(t, GetCPUUsage(), 0.0) require.Less(t, GetCPUUsage(), 1.0) - Observer.Stop() + observer.Stop() close(exit) wg.Wait() } diff --git a/util/dbterror/ddl_terror.go b/util/dbterror/ddl_terror.go index 5db0d90c848e1..e2042513ba0f1 100644 --- a/util/dbterror/ddl_terror.go +++ b/util/dbterror/ddl_terror.go @@ -369,6 +369,8 @@ var ( ErrDependentByFunctionalIndex = ClassDDL.NewStd(mysql.ErrDependentByFunctionalIndex) // ErrFunctionalIndexOnBlob when the expression of expression index returns blob or text. ErrFunctionalIndexOnBlob = ClassDDL.NewStd(mysql.ErrFunctionalIndexOnBlob) + // ErrDependentByPartitionFunctional returns when the dropped column depends by expression partition. + ErrDependentByPartitionFunctional = ClassDDL.NewStd(mysql.ErrDependentByPartitionFunctional) // ErrUnsupportedAlterTableSpec means we don't support this alter table specification (i.e. unknown) ErrUnsupportedAlterTableSpec = ClassDDL.NewStdErr(mysql.ErrUnsupportedDDLOperation, parser_mysql.Message(fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation].Raw, "Unsupported/unknown ALTER TABLE specification"), nil)) diff --git a/util/misc.go b/util/misc.go index 0e28baa5f62fc..d336aa5765451 100644 --- a/util/misc.go +++ b/util/misc.go @@ -394,10 +394,10 @@ func TLSCipher2String(n uint16) string { } // ColumnsToProto converts a slice of model.ColumnInfo to a slice of tipb.ColumnInfo. -func ColumnsToProto(columns []*model.ColumnInfo, pkIsHandle bool) []*tipb.ColumnInfo { +func ColumnsToProto(columns []*model.ColumnInfo, pkIsHandle bool, forIndex bool) []*tipb.ColumnInfo { cols := make([]*tipb.ColumnInfo, 0, len(columns)) for _, c := range columns { - col := ColumnToProto(c) + col := ColumnToProto(c, forIndex) // TODO: Here `PkHandle`'s meaning is changed, we will change it to `IsHandle` when tikv's old select logic // is abandoned. if (pkIsHandle && mysql.HasPriKeyFlag(c.GetFlag())) || c.ID == model.ExtraHandleID { @@ -411,7 +411,7 @@ func ColumnsToProto(columns []*model.ColumnInfo, pkIsHandle bool) []*tipb.Column } // ColumnToProto converts model.ColumnInfo to tipb.ColumnInfo. -func ColumnToProto(c *model.ColumnInfo) *tipb.ColumnInfo { +func ColumnToProto(c *model.ColumnInfo, forIndex bool) *tipb.ColumnInfo { pc := &tipb.ColumnInfo{ ColumnId: c.ID, Collation: collate.RewriteNewCollationIDIfNeeded(int32(mysql.CollationNames[c.GetCollate()])), @@ -420,7 +420,12 @@ func ColumnToProto(c *model.ColumnInfo) *tipb.ColumnInfo { Flag: int32(c.GetFlag()), Elems: c.GetElems(), } - pc.Tp = int32(c.GetType()) + if forIndex { + // Use array type for read the multi-valued index. + pc.Tp = int32(c.FieldType.ArrayType().GetType()) + } else { + pc.Tp = int32(c.GetType()) + } return pc } diff --git a/util/misc_test.go b/util/misc_test.go index c1510625593f0..7a5baeb197d28 100644 --- a/util/misc_test.go +++ b/util/misc_test.go @@ -174,8 +174,8 @@ func TestToPB(t *testing.T) { } column2.SetCollate("utf8mb4_bin") - assert.Equal(t, "column_id:1 collation:-45 columnLen:-1 decimal:-1 ", ColumnToProto(column).String()) - assert.Equal(t, "column_id:1 collation:-45 columnLen:-1 decimal:-1 ", ColumnsToProto([]*model.ColumnInfo{column, column2}, false)[0].String()) + assert.Equal(t, "column_id:1 collation:-45 columnLen:-1 decimal:-1 ", ColumnToProto(column, false).String()) + assert.Equal(t, "column_id:1 collation:-45 columnLen:-1 decimal:-1 ", ColumnsToProto([]*model.ColumnInfo{column, column2}, false, false)[0].String()) } func TestComposeURL(t *testing.T) { diff --git a/util/ranger/detacher.go b/util/ranger/detacher.go index 596d27c27d9d8..606f53c40265f 100644 --- a/util/ranger/detacher.go +++ b/util/ranger/detacher.go @@ -581,9 +581,8 @@ func ExtractEqAndInCondition(sctx sessionctx.Context, conditions []expression.Ex points[offset] = rb.intersection(points[offset], rb.build(cond, collator), collator) if len(points[offset]) == 0 { // Early termination if false expression found if expression.MaybeOverOptimized4PlanCache(sctx, conditions) { - // cannot return an empty-range for plan-cache since the range may become non-empty as parameters change - // for safety, return the whole conditions in this case - return nil, conditions, nil, nil, false + // `a>@x and a<@y` --> `invalid-range if @x>=@y` + sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: some parameters may be overwritten")) } return nil, nil, nil, nil, true } @@ -606,9 +605,8 @@ func ExtractEqAndInCondition(sctx sessionctx.Context, conditions []expression.Ex accesses[i] = nil } else if len(points[i]) == 0 { // Early termination if false expression found if expression.MaybeOverOptimized4PlanCache(sctx, conditions) { - // cannot return an empty-range for plan-cache since the range may become non-empty as parameters change - // for safety, return the whole conditions in this case - return nil, conditions, nil, nil, false + // `a>@x and a<@y` --> `invalid-range if @x>=@y` + sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: some parameters may be overwritten")) } return nil, nil, nil, nil, true } else { @@ -622,8 +620,7 @@ func ExtractEqAndInCondition(sctx sessionctx.Context, conditions []expression.Ex } if expression.MaybeOverOptimized4PlanCache(sctx, conditions) { // `a=@x and a=@y` --> `a=@x if @x==@y` - sctx.GetSessionVars().StmtCtx.SkipPlanCache = true - sctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("skip plan-cache: some parameters may be overwritten")) + sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("skip plan-cache: some parameters may be overwritten")) } } } diff --git a/util/systimemon/systime_mon.go b/util/systimemon/systime_mon.go index 67b23ed894b5b..26819a4159776 100644 --- a/util/systimemon/systime_mon.go +++ b/util/systimemon/systime_mon.go @@ -22,11 +22,10 @@ import ( ) // StartMonitor calls systimeErrHandler if system time jump backward. -func StartMonitor(now func() time.Time, systimeErrHandler func(), successCallback func()) { +func StartMonitor(now func() time.Time, systimeErrHandler func()) { logutil.BgLogger().Info("start system time monitor") tick := time.NewTicker(100 * time.Millisecond) defer tick.Stop() - tickCount := 0 for { last := now().UnixNano() <-tick.C @@ -34,11 +33,5 @@ func StartMonitor(now func() time.Time, systimeErrHandler func(), successCallbac logutil.BgLogger().Error("system time jump backward", zap.Int64("last", last)) systimeErrHandler() } - // call successCallback per second. - tickCount++ - if tickCount >= 10 { - tickCount = 0 - successCallback() - } } } diff --git a/util/systimemon/systime_mon_test.go b/util/systimemon/systime_mon_test.go index f05098b63db7e..6964a9ac3d70d 100644 --- a/util/systimemon/systime_mon_test.go +++ b/util/systimemon/systime_mon_test.go @@ -35,7 +35,7 @@ func TestSystimeMonitor(t *testing.T) { return time.Now().Add(-2 * time.Second) }, func() { errTriggered.Store(true) - }, func() {}) + }) require.Eventually(t, errTriggered.Load, time.Second, 10*time.Millisecond) }