From e8734dcf768aa675875fa953f3e77899e2049a1d Mon Sep 17 00:00:00 2001 From: YangKian <1207783292@qq.com> Date: Mon, 28 Sep 2020 14:54:47 +0800 Subject: [PATCH 01/16] update pingcap/parser --- go.mod | 5 +++-- go.sum | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index d8baecbe2f4d2..0ade29c631ffb 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200828054126-d677e6fd224a github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 - github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e + github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34 github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.5-0.20200820092506-34ea90c93237+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 @@ -52,8 +52,9 @@ require ( github.com/uber-go/atomic v1.3.2 github.com/uber/jaeger-client-go v2.22.1+incompatible go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 - go.uber.org/atomic v1.6.0 + go.uber.org/atomic v1.7.0 go.uber.org/automaxprocs v1.2.0 + go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.16.0 golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 diff --git a/go.sum b/go.sum index 1a13096c1705e..5616bf4124853 100644 --- a/go.sum +++ b/go.sum @@ -509,6 +509,8 @@ github.com/pingcap/parser v0.0.0-20200813083329-a4bff035d3e2/go.mod h1:vQdbJqobJ github.com/pingcap/parser v0.0.0-20200821073936-cf85e80665c4/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e h1:IFD2pEbIcN+EzG/RGMLrv/Tt6U9KzJGT6hSbGkQ1v7c= github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= +github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34 h1:Ubcf5qesK50AVNCeW5EDOtVDSzpecEDxpzYeVU/CLwA= +github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200714122454-1a64f969cb3c/go.mod h1:v/dY4mVkt3dh/Liphhk0E4ScOkZpIk0m0GvWJ9FapDs= @@ -700,6 +702,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.2.0 h1:+RUihKM+nmYUoB9w0D0Ov5TJ2PpFO2FgenTxMJiZBZA= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= @@ -711,6 +715,8 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= From a30be605cd9b86ea253e7ddc0763026d7635f618 Mon Sep 17 00:00:00 2001 From: xiongjiwei Date: Mon, 28 Sep 2020 15:11:42 +0800 Subject: [PATCH 02/16] ddl : table meta may hold wrong information if checkPartitionByRange panic (#19814) --- ddl/ddl_api.go | 10 +++++++--- ddl/failtest/fail_db_test.go | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index e77c13bd856ba..d3bc3e2e1fa01 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -30,6 +30,7 @@ import ( "github.com/cznic/mathutil" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/charset" "github.com/pingcap/parser/format" @@ -1896,6 +1897,9 @@ func checkPartitionByHash(ctx sessionctx.Context, tbInfo *model.TableInfo, s *as // checkPartitionByRange checks validity of a "BY RANGE" partition. func checkPartitionByRange(ctx sessionctx.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) error { + failpoint.Inject("CheckPartitionByRangeErr", func() { + panic("Out Of Memory Quota!") + }) pi := tbInfo.Partition if err := checkPartitionNameUnique(pi); err != nil { return err @@ -2758,11 +2762,11 @@ func (d *ddl) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, spec * // partInfo contains only the new added partition, we have to combine it with the // old partitions to check all partitions is strictly increasing. + clonedMeta := meta.Clone() tmp := *partInfo tmp.Definitions = append(pi.Definitions, tmp.Definitions...) - meta.Partition = &tmp - err = checkPartitionByRange(ctx, meta, nil) - meta.Partition = pi + clonedMeta.Partition = &tmp + err = checkPartitionByRange(ctx, clonedMeta, nil) if err != nil { if ErrSameNamePartition.Equal(err) && spec.IfNotExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) diff --git a/ddl/failtest/fail_db_test.go b/ddl/failtest/fail_db_test.go index e8898ccc19171..3681a528d325d 100644 --- a/ddl/failtest/fail_db_test.go +++ b/ddl/failtest/fail_db_test.go @@ -436,6 +436,7 @@ func (s *testFailDBSuite) TestPartitionAddIndexGC(c *C) { func (s *testFailDBSuite) TestModifyColumn(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") + tk.MustExec("drop table if exists t;") enableChangeColumnType := tk.Se.GetSessionVars().EnableChangeColumnType tk.Se.GetSessionVars().EnableChangeColumnType = true @@ -534,3 +535,20 @@ func (s *testFailDBSuite) TestModifyColumn(c *C) { tk.MustExec("drop table t, t1, t2, t3, t4, t5") } + +func (s *testFailDBSuite) TestPartitionAddPanic(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec(`use test;`) + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t (a int) partition by range(a) (partition p0 values less than (10));`) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/CheckPartitionByRangeErr", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/CheckPartitionByRangeErr"), IsNil) + }() + + _, err := tk.Exec(`alter table t add partition (partition p1 values less than (20));`) + c.Assert(err, NotNil) + result := tk.MustQuery("show create table t").Rows()[0][1] + c.Assert(result, Matches, `(?s).*PARTITION .p0. VALUES LESS THAN \(10\).*`) + c.Assert(result, Not(Matches), `(?s).*PARTITION .p0. VALUES LESS THAN \(20\).*`) +} From b61f8bb9ed14a41dd04fbed389cba58d5271ab39 Mon Sep 17 00:00:00 2001 From: xhe Date: Mon, 28 Sep 2020 18:44:12 +0800 Subject: [PATCH 03/16] ddl: make use of the new rule cache api[2/3] (#20208) Signed-off-by: xhe --- ddl/ddl_api.go | 9 ++++++-- ddl/ddl_worker.go | 2 +- ddl/partition.go | 42 +++++++++++++++++----------------- ddl/placement/types.go | 5 +++++ ddl/placement/types_test.go | 17 ++++++++++++++ ddl/placement_rule_test.go | 45 +++++++------------------------------ domain/infosync/info.go | 12 ++++++---- 7 files changed, 67 insertions(+), 65 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index d3bc3e2e1fa01..cf26fcd44dae4 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -5517,8 +5517,13 @@ func (d *ddl) AlterTablePartition(ctx sessionctx.Context, ident ast.Ident, spec EndKeyHex: endKey, }) } - bundle.Index = placement.RuleIndexPartition - bundle.Override = true + if len(bundle.Rules) == 0 { + bundle.Index = 0 + bundle.Override = false + } else { + bundle.Index = placement.RuleIndexPartition + bundle.Override = true + } job := &model.Job{ SchemaID: schema.ID, diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 3e858d8a2dddd..1a7c60ba9ae75 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -622,7 +622,7 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, case model.ActionDropTable, model.ActionDropView, model.ActionDropSequence: ver, err = onDropTableOrView(t, job) case model.ActionDropTablePartition: - ver, err = onDropTablePartition(t, job) + ver, err = onDropTablePartition(d, t, job) case model.ActionTruncateTablePartition: ver, err = onTruncateTablePartition(d, t, job) case model.ActionExchangeTablePartition: diff --git a/ddl/partition.go b/ddl/partition.go index a10d0ab6bd839..a3ccf17066c22 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -73,7 +73,7 @@ func checkAddPartition(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.P func onAddTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { // Handle the rolling back job if job.IsRollingback() { - ver, err := onDropTablePartition(t, job) + ver, err := onDropTablePartition(d, t, job) if err != nil { return ver, errors.Trace(err) } @@ -867,23 +867,14 @@ func getPartitionDef(tblInfo *model.TableInfo, partName string) (index int, def return index, nil, table.ErrUnknownPartition.GenWithStackByArgs(partName, tblInfo.Name.O) } -func buildPlacementDropRules(schemaID, tableID int64, partitionIDs []int64) []*placement.RuleOp { - rules := make([]*placement.RuleOp, 0, len(partitionIDs)) - for _, partitionID := range partitionIDs { - rules = append(rules, &placement.RuleOp{ - Action: placement.RuleOpDel, - DeleteByIDPrefix: true, - Rule: &placement.Rule{ - GroupID: placement.RuleDefaultGroupID, - ID: fmt.Sprintf("%d_t%d_p%d", schemaID, tableID, partitionID), - }, - }) - } - return rules +func buildPlacementDropBundle(partitionID int64) *placement.Bundle { + return &placement.Bundle{ + ID: placement.GroupID(partitionID), + } } // onDropTablePartition deletes old partition meta. -func onDropTablePartition(t *meta.Meta, job *model.Job) (ver int64, _ error) { +func onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { var partNames []string if err := job.DecodeArgs(&partNames); err != nil { job.State = model.JobStateCancelled @@ -907,11 +898,20 @@ func onDropTablePartition(t *meta.Meta, job *model.Job) (ver int64, _ error) { physicalTableIDs = removePartitionInfo(tblInfo, partNames) } - rules := buildPlacementDropRules(job.SchemaID, tblInfo.ID, physicalTableIDs) - err = infosync.UpdatePlacementRules(nil, rules) - if err != nil { - job.State = model.JobStateCancelled - return ver, errors.Wrapf(err, "failed to notify PD the placement rules") + if d.infoHandle != nil { + bundles := make([]*placement.Bundle, 0, len(physicalTableIDs)) + for _, ID := range physicalTableIDs { + oldBundle, ok := d.infoHandle.Get().BundleByName(placement.GroupID(ID)) + if ok && !oldBundle.IsEmpty() { + bundles = append(bundles, buildPlacementDropBundle(ID)) + } + } + + err = infosync.PutRuleBundles(nil, bundles) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Wrapf(err, "failed to notify PD the placement rules") + } } ver, err = updateVersionAndTableInfo(t, job, tblInfo, true) @@ -1536,7 +1536,7 @@ func onAlterTablePartition(t *meta.Meta, job *model.Job) (int64, error) { return 0, errors.Trace(table.ErrUnknownPartition.GenWithStackByArgs("drop?", tblInfo.Name.O)) } - err = infosync.PutRuleBundle(nil, bundle) + err = infosync.PutRuleBundles(nil, []*placement.Bundle{bundle}) if err != nil { job.State = model.JobStateCancelled return 0, errors.Wrapf(err, "failed to notify PD the placement rules") diff --git a/ddl/placement/types.go b/ddl/placement/types.go index c5dcda9b2e8e8..5bb282342e7e9 100644 --- a/ddl/placement/types.go +++ b/ddl/placement/types.go @@ -108,6 +108,11 @@ func (b *Bundle) Clone() *Bundle { return newBundle } +// IsEmpty is used to check if a bundle is empty. +func (b *Bundle) IsEmpty() bool { + return len(b.Rules) == 0 && b.Index == 0 && !b.Override +} + // RuleOpType indicates the operation type. type RuleOpType string diff --git a/ddl/placement/types_test.go b/ddl/placement/types_test.go index 1e7be83006300..0b048692d9481 100644 --- a/ddl/placement/types_test.go +++ b/ddl/placement/types_test.go @@ -28,6 +28,23 @@ var _ = Suite(&testRuleSuite{}) type testBundleSuite struct{} +func (t *testBundleSuite) TestEmpty(c *C) { + bundle := &Bundle{ID: GroupID(1)} + c.Assert(bundle.IsEmpty(), IsTrue) + + bundle = &Bundle{ID: GroupID(1), Index: 1} + c.Assert(bundle.IsEmpty(), IsFalse) + + bundle = &Bundle{ID: GroupID(1), Override: true} + c.Assert(bundle.IsEmpty(), IsFalse) + + bundle = &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}} + c.Assert(bundle.IsEmpty(), IsFalse) + + bundle = &Bundle{ID: GroupID(1), Index: 1, Override: true} + c.Assert(bundle.IsEmpty(), IsFalse) +} + func (t *testBundleSuite) TestClone(c *C) { bundle := &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}} diff --git a/ddl/placement_rule_test.go b/ddl/placement_rule_test.go index 3fe3882275bdb..ec6d9b170abd2 100644 --- a/ddl/placement_rule_test.go +++ b/ddl/placement_rule_test.go @@ -253,50 +253,21 @@ func (s *testPlacementSuite) TestPlacementBuild(c *C) { func (s *testPlacementSuite) TestPlacementBuildDrop(c *C) { tests := []struct { - input []int64 - output []*placement.RuleOp + input int64 + output *placement.Bundle }{ { - input: []int64{2}, - output: []*placement.RuleOp{ - { - Action: placement.RuleOpDel, - DeleteByIDPrefix: true, - Rule: &placement.Rule{ - GroupID: placement.RuleDefaultGroupID, - ID: "0_t0_p2", - }, - }, - }, + input: 2, + output: &placement.Bundle{ID: placement.GroupID(2)}, }, { - input: []int64{1, 2}, - output: []*placement.RuleOp{ - { - Action: placement.RuleOpDel, - DeleteByIDPrefix: true, - Rule: &placement.Rule{ - GroupID: placement.RuleDefaultGroupID, - ID: "0_t0_p1", - }, - }, - { - Action: placement.RuleOpDel, - DeleteByIDPrefix: true, - Rule: &placement.Rule{ - GroupID: placement.RuleDefaultGroupID, - ID: "0_t0_p2", - }, - }, - }, + input: 1, + output: &placement.Bundle{ID: placement.GroupID(1)}, }, } for _, t := range tests { - out := buildPlacementDropRules(0, 0, t.input) - c.Assert(len(out), Equals, len(t.output)) - for i := range t.output { - c.Assert(s.compareRuleOp(out[i], t.output[i]), IsTrue, Commentf("expect: %+v, obtained: %+v", t.output[i], out[i])) - } + out := buildPlacementDropBundle(t.input) + c.Assert(t.output, DeepEquals, out) } } diff --git a/domain/infosync/info.go b/domain/infosync/info.go index 3eb549e3d4534..0660a3d583624 100644 --- a/domain/infosync/info.go +++ b/domain/infosync/info.go @@ -423,8 +423,12 @@ func GetRuleBundle(ctx context.Context, name string) (*placement.Bundle, error) return bundle, err } -// PutRuleBundle is used to post one specific rule bundle to PD. -func PutRuleBundle(ctx context.Context, bundle *placement.Bundle) error { +// PutRuleBundles is used to post specific rule bundles to PD. +func PutRuleBundles(ctx context.Context, bundles []*placement.Bundle) error { + if len(bundles) == 0 { + return nil + } + is, err := getGlobalInfoSyncer() if err != nil { return err @@ -440,12 +444,12 @@ func PutRuleBundle(ctx context.Context, bundle *placement.Bundle) error { return errors.Errorf("pd unavailable") } - b, err := json.Marshal(bundle) + b, err := json.Marshal(bundles) if err != nil { return err } - _, err = doRequest(ctx, addrs, path.Join(pdapi.Config, "placement-rule", bundle.ID), "POST", bytes.NewReader(b)) + _, err = doRequest(ctx, addrs, path.Join(pdapi.Config, "placement-rule")+"?partial=true", "POST", bytes.NewReader(b)) return err } From d355c6d965d219f271626b22b35df3f95ca70c7d Mon Sep 17 00:00:00 2001 From: Lynn Date: Tue, 29 Sep 2020 10:57:49 +0800 Subject: [PATCH 04/16] docs/design: add the proposal of changing column types (#18435) --- docs/design/2020-07-07-change-column-types.md | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 docs/design/2020-07-07-change-column-types.md diff --git a/docs/design/2020-07-07-change-column-types.md b/docs/design/2020-07-07-change-column-types.md new file mode 100644 index 0000000000000..3212ff93158d6 --- /dev/null +++ b/docs/design/2020-07-07-change-column-types.md @@ -0,0 +1,105 @@ +# Proposal: Changing column types + +- Author(s): [zimuxia](https://github.com/zimulala) (Xia Li) +- Last updated: 2020-07-07 +- Discussion at: https://tidbcommunity.slack.com/archives/CMAKWBNJU + +## Abstract + +This proposal proposes a new feature that supports column type modification more comprehensively. + +## Background + +This feature mainly uses the syntax of `alter table ... change/modify column` to support the modification of column type. The specific syntax is as follows: + +```sql +ALTER TABLE tbl_name + [alter_specification] + +alter_specification: +CHANGE [COLUMN] old_col_name new_col_name column_definition + [FIRST | AFTER col_name] +| MODIFY [COLUMN] col_name column_definition + [FIRST | AFTER col_name] +``` + +At present, column type modification only supports the lengthening of the same type, that is, there is no true change to the data type in the storage layer. The limitation of the new support is as follows: +* Lossy changes are not supported, such as changing from BIGINT to INTEGER, or from VARCHAR(255) to VARCHAR(10) +* Modification of the precision of DECIMAL type is not supported +* Does not support changing the UNSIGNED attribute +* Only support changing CHARACTER SET attribute from utf8 to utf8mb4 + +## Proposal + +The column type modifications supported by this proposal will involve rewriting column data and refactoring related indexes. This operation still needs to meet the basic requirements of online DDL operation, and to meet the compatibility between previous and subsequent versions of TiDB. + +To support this feature, TiDB needs to add additional related columns to the modified column. The type of the newly added column is the type you want to modify. In addition, it is necessary to add each additional related index to the index that contains this column, and update the type of the corresponding column in new indexes. + +## Rationale + +This proposal suggests adding additional related columns and indexes which involve the column to be modified, that is, making a copy of the columns and indexes that contain `the changed/modified column`. And if user data is written during the copying process, the corresponding data needs to be updated in real time. Assume that the type of `colA` is changed from `originalType` to `newType`, `idxA` (index with `colA`, here it is assumed that there is only one corresponding general index, the index has no relation to column generation). + +### Pre-preparation + +Add `ChangingCol` and `ChangingIdx` fields to the column and related index that need to be modified, which are used to associate the modified column and index information. + +When performing an insert or update operation (executed in the `AddRecord` or `UpdateRecord` function) on the column whose type is to be modified, the column and index to be backed up are written or updated according to the `newType` type. + +### Process + +1. Update the metadata information, including creating a corresponding column for `changingcolA` and `changingIdxA` for idxA, and add them to the end of the column array or index array in `TableInfo`. The state of `changingColA` and `changingIdxA` can be changed similar to add index operation, especially in `StateWriteReorganization` state, such as initializing reorganization information. +2. In the reorganization processing stage, row data and index data need to be processed in batches, which is similar to the `add index` operation during initial implementation. Get the batch processing range, and then use the `newType` to construct the `changingColA` and `changingIdxA` information for the insert operation (this operation requires the corresponding row and index column to be locked). If data-truncated are encountered and errors at this stage, it is necessary to report the errors and roll back to exit. +3. The next phase of change requires 3 status changes: + * Lock this table and cannot write to it. + * Drop `colA` and `idxA`, change the status of `changingColA` and `changingIdxA` to public, and change their names, offsets, etc. + * Unlock this table. + +### Note + +This operation itself is an `alter table tableName modify/change column …` statement, which includes changing the column type, changing the column attribute from null to not null, changing the column offset, etc. Considering the atomicity, some operations need to be changed together. + +In addition, due to the complexity of this feature, considering that the friendly review needs to be divided into multiple PRs, a switch can be added to deal with the problem of multiple PRs. + +### Rolling back + +This requires changing the state of `colA` and `idxA` to `StatePublic`, and then dropping `changingColA` and `changingIdxA`. Of course, if you modify the flag and other attributes, you need to roll back. + +## Compatibility + +### Compatibility issues with MySQL + +* Considering the complexity of supporting clustered-index and currently TiDB does not fully support clustered-index, so temporarily this feature does not support type modification for columns with the primary key. + +### Compatibility issues with TiDB + +The DDL statement itself already has a corresponding type (ActionModifyColumn). If you need to improve this operation, you will encounter compatibility issues when performing rolling upgrades. This problem is handled by adding a global variable `tidb_enable_change_column_type`. When this value is true, the `modify/change column` statement is allowed to perform more types of column type changes (features supported by this proposal). + +* For new clusters, set this variable to true during bootstrap. +* For clusters requiring rolling upgrade, the default value is false, and users need to explicitly set this variable to true. + +### Compatible issues with other components + +This feature needs to be synchronized with components such as Tools, TiFlash, and BR. + +## Implementation + +In addition to considering the update of the corresponding column type and data, and the update of the data in the index column involved, the implementation of column type modification also needs to consider the following characteristics. + +### Generate columns + +When the modified type relates to generated columns (either stored or virtual), the implementation has the following characteristics: + +* The generated column type related to this column is unchanged. +* When performing an insert operation, the column value of the modified type will be affected by the generated column expression with this column. +* The value of the relevant generated column changes with the value of the column of the modified type. + +### Expression index + +When columns of the modified type have a related expression index, the implementation has the following characteristics: + +* The Type, DBType and Max_length of the related expression index may be modified. +* When performing an insert operation, the value of the column of the modified type will be affected by the expression in the expression index of this column. + +## Open issues (if applicable) + +https://github.com/pingcap/tidb/issues/17526 From 132d9817d7da4a77294c3566e4947ea462679e40 Mon Sep 17 00:00:00 2001 From: Lingyu Song Date: Tue, 29 Sep 2020 11:08:53 +0800 Subject: [PATCH 05/16] executor: support partition pruning for `IndexJoin` inner table (#19990) --- executor/builder.go | 223 +++++++++++++++++++++++++++++++--- executor/index_lookup_join.go | 7 +- executor/partition_table.go | 38 +++++- table/tables/partition.go | 41 ++++++- 4 files changed, 282 insertions(+), 27 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index 5f6154c1ee6d3..7e2b01ba4358f 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2563,7 +2563,7 @@ func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) E } } - nextPartition := nextPartitionForTableReader{ret} + nextPartition := nextPartitionForTableReader{exec: ret} exec, err := buildPartitionTable(b, ts.Table, &v.PartitionInfo, ret, nextPartition) if err != nil { b.err = err @@ -2590,6 +2590,111 @@ func buildPartitionTable(b *executorBuilder, tblInfo *model.TableInfo, partition }, nil } +func buildIndexRangeForEachPartition(ctx sessionctx.Context, usedPartitions []table.PhysicalTable, contentPos []int64, + lookUpContent []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) (map[int64][]*ranger.Range, error) { + contentBucket := make(map[int64][]*indexJoinLookUpContent) + for _, p := range usedPartitions { + contentBucket[p.GetPhysicalID()] = make([]*indexJoinLookUpContent, 0, 8) + } + for i, pos := range contentPos { + if _, ok := contentBucket[pos]; ok { + contentBucket[pos] = append(contentBucket[pos], lookUpContent[i]) + } + } + nextRange := make(map[int64][]*ranger.Range) + for _, p := range usedPartitions { + ranges, err := buildRangesForIndexJoin(ctx, contentBucket[p.GetPhysicalID()], indexRanges, keyOff2IdxOff, cwc) + if err != nil { + return nil, err + } + nextRange[p.GetPhysicalID()] = ranges + } + return nextRange, nil +} + +func buildKVRangeForEachPartition(ctx sessionctx.Context, usedPartitions []table.PhysicalTable, contentPos []int64, isCommonHandle bool, + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) (map[int64]kvRangeBuilder, error) { + rangeBuilders := make(map[int64]kvRangeBuilder) + contentBucket := make(map[int64][]*indexJoinLookUpContent) + for _, p := range usedPartitions { + contentBucket[p.GetPhysicalID()] = make([]*indexJoinLookUpContent, 0, 8) + } + for i, pos := range contentPos { + if _, ok := contentBucket[pos]; ok { + contentBucket[pos] = append(contentBucket[pos], lookUpContents[i]) + } + } + for _, p := range usedPartitions { + if isCommonHandle { + rangeBuilders[p.GetPhysicalID()] = kvRangeBuilderFromFunc(func(pid int64) ([]kv.KeyRange, error) { + return buildKvRangesForIndexJoin(ctx, pid, -1, contentBucket[pid], indexRanges, keyOff2IdxOff, cwc) + }) + } else { + handles := make([]kv.Handle, 0, len(contentBucket[p.GetPhysicalID()])) + for _, content := range contentBucket[p.GetPhysicalID()] { + handle := kv.IntHandle(content.keys[0].GetInt64()) + handles = append(handles, handle) + } + rangeBuilders[p.GetPhysicalID()] = kvRangeBuilderFromHandles(handles) + } + } + return rangeBuilders, nil +} + +func prunePartitionForInnerExecutor(ctx sessionctx.Context, tbl table.Table, schema *expression.Schema, partitionInfo *plannercore.PartitionInfo, + lookUpContent []*indexJoinLookUpContent) (usedPartition []table.PhysicalTable, canPrune bool, contentPos []int64, err error) { + partitionTbl := tbl.(table.PartitionedTable) + locateKey := make([]types.Datum, schema.Len()) + // TODO: condition based pruning can be do in advance. + condPruneResult, err := partitionPruning(ctx, partitionTbl, partitionInfo.PruningConds, partitionInfo.PartitionNames, partitionInfo.Columns, partitionInfo.ColumnNames) + if err != nil { + return nil, false, nil, err + } + + // check whether can runtime prune. + type partitionExpr interface { + PartitionExpr() (*tables.PartitionExpr, error) + } + pe, err := tbl.(partitionExpr).PartitionExpr() + if err != nil { + return nil, false, nil, err + } + offsetMap := make(map[int]bool) + for _, offset := range lookUpContent[0].keyCols { + offsetMap[offset] = true + } + for _, offset := range pe.ColumnOffset { + if _, ok := offsetMap[offset]; !ok { + logutil.BgLogger().Warn("can not runtime prune in index join") + return condPruneResult, false, nil, nil + } + } + + partitions := make(map[int64]table.PhysicalTable) + contentPos = make([]int64, len(lookUpContent)) + for idx, content := range lookUpContent { + for i, date := range content.keys { + locateKey[content.keyCols[i]] = date + } + p, err := partitionTbl.GetPartitionByRow(ctx, locateKey) + if err != nil { + return nil, false, nil, err + } + if _, ok := partitions[p.GetPhysicalID()]; !ok { + partitions[p.GetPhysicalID()] = p + } + contentPos[idx] = p.GetPhysicalID() + } + + usedPartition = make([]table.PhysicalTable, 0, len(partitions)) + for _, p := range condPruneResult { + if _, ok := partitions[p.GetPhysicalID()]; ok { + usedPartition = append(usedPartition, p) + } + } + return usedPartition, true, contentPos, nil +} + func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexReader) (*IndexReaderExecutor, error) { dagReq, streaming, err := b.constructDAGReq(v.IndexPlans, kv.TiKV) if err != nil { @@ -3038,10 +3143,33 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte e.kvRangeBuilder = kvRangeBuilderFromFunc(func(pid int64) ([]kv.KeyRange, error) { return buildKvRangesForIndexJoin(e.ctx, pid, -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc) }) - nextPartition := nextPartitionForTableReader{e} - return buildPartitionTable(builder.executorBuilder, tbInfo, &v.PartitionInfo, e, nextPartition) + nextPartition := nextPartitionForTableReader{exec: e, innerPartitionInfo: &innerPartitionInfo{isFullPartition: true}} + tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) + usedPartition, canPrune, contentPos, err := prunePartitionForInnerExecutor(builder.executorBuilder.ctx, tbl, e.Schema(), &v.PartitionInfo, lookUpContents) + if err != nil { + return nil, err + } + if len(usedPartition) != 0 { + if canPrune { + rangeBuilders, err := buildKVRangeForEachPartition(e.ctx, usedPartition, contentPos, v.IsCommonHandle, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + if err != nil { + return nil, err + } + nextPartition.rangeBuilders = rangeBuilders + nextPartition.isFullPartition = false + } + partitionExec := &PartitionTableExecutor{ + baseExecutor: *e.base(), + partitions: usedPartition, + nextPartition: nextPartition, + } + return partitionExec, nil + } + ret := &TableDualExec{baseExecutor: *e.base()} + return ret, err } handles := make([]kv.Handle, 0, len(lookUpContents)) + validLookUpContents := make([]*indexJoinLookUpContent, 0, len(lookUpContents)) for _, content := range lookUpContents { isValidHandle := true handle := kv.IntHandle(content.keys[0].GetInt64()) @@ -3053,6 +3181,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte } if isValidHandle { handles = append(handles, handle) + validLookUpContents = append(validLookUpContents, content) } } @@ -3062,10 +3191,31 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte if !builder.ctx.GetSessionVars().UseDynamicPartitionPrune() { return builder.buildTableReaderFromHandles(ctx, e, handles, canReorderHandles) } - e.kvRangeBuilder = kvRangeBuilderFromHandles(handles) - nextPartition := nextPartitionForTableReader{e} - return buildPartitionTable(builder.executorBuilder, tbInfo, &v.PartitionInfo, e, nextPartition) + nextPartition := nextPartitionForTableReader{exec: e, innerPartitionInfo: &innerPartitionInfo{isFullPartition: true}} + tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) + usedPartition, canPrune, contentPos, err := prunePartitionForInnerExecutor(builder.executorBuilder.ctx, tbl, e.Schema(), &v.PartitionInfo, validLookUpContents) + if err != nil { + return nil, err + } + if len(usedPartition) != 0 { + if canPrune { + rangeBuilders, err := buildKVRangeForEachPartition(e.ctx, usedPartition, contentPos, v.IsCommonHandle, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + if err != nil { + return nil, err + } + nextPartition.rangeBuilders = rangeBuilders + nextPartition.isFullPartition = false + } + partitionExec := &PartitionTableExecutor{ + baseExecutor: *e.base(), + partitions: usedPartition, + nextPartition: nextPartition, + } + return partitionExec, nil + } + ret := &TableDualExec{baseExecutor: *e.base()} + return ret, err } type kvRangeBuilderFromFunc func(pid int64) ([]kv.KeyRange, error) @@ -3148,15 +3298,35 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte return e, err } - e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + nextPartition := nextPartitionForIndexReader{exec: e, innerPartitionInfo: &innerPartitionInfo{isFullPartition: true}} + tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) + usedPartition, canPrune, contentPos, err := prunePartitionForInnerExecutor(builder.executorBuilder.ctx, tbl, e.Schema(), &v.PartitionInfo, lookUpContents) if err != nil { return nil, err } - nextPartition := nextPartitionForIndexReader{exec: e} - ret, err := buildPartitionTable(builder.executorBuilder, tbInfo, &v.PartitionInfo, e, nextPartition) - if err != nil { - return nil, err + if len(usedPartition) != 0 { + if canPrune { + rangeMap, err := buildIndexRangeForEachPartition(e.ctx, usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + if err != nil { + return nil, err + } + nextPartition.isFullPartition = false + nextPartition.nextRange = rangeMap + } else { + e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + if err != nil { + return nil, err + } + } + partitionExec := &PartitionTableExecutor{ + baseExecutor: *e.base(), + partitions: usedPartition, + nextPartition: nextPartition, + } + err = partitionExec.Open(ctx) + return partitionExec, err } + ret := &TableDualExec{baseExecutor: *e.base()} err = ret.Open(ctx) return ret, err } @@ -3177,16 +3347,35 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context err = e.open(ctx) return e, err } - - e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + nextPartition := nextPartitionForIndexLookUp{exec: e, innerPartitionInfo: &innerPartitionInfo{isFullPartition: true}} + tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) + usedPartition, canPrune, contentPos, err := prunePartitionForInnerExecutor(builder.executorBuilder.ctx, tbl, e.Schema(), &v.PartitionInfo, lookUpContents) if err != nil { return nil, err } - nextPartition := nextPartitionForIndexLookUp{exec: e} - ret, err := buildPartitionTable(builder.executorBuilder, tbInfo, &v.PartitionInfo, e, nextPartition) - if err != nil { - return nil, err + if len(usedPartition) != 0 { + if canPrune { + rangeMap, err := buildIndexRangeForEachPartition(e.ctx, usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + if err != nil { + return nil, err + } + nextPartition.isFullPartition = false + nextPartition.nextRange = rangeMap + } else { + e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + if err != nil { + return nil, err + } + } + partitionExec := &PartitionTableExecutor{ + baseExecutor: *e.base(), + partitions: usedPartition, + nextPartition: nextPartition, + } + err = partitionExec.Open(ctx) + return partitionExec, err } + ret := &TableDualExec{baseExecutor: *e.base()} err = ret.Open(ctx) return ret, err } diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index 8dde95a671c5b..be972aade587c 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -492,8 +492,9 @@ func (iw *innerWorker) run(ctx context.Context, wg *sync.WaitGroup) { } type indexJoinLookUpContent struct { - keys []types.Datum - row chunk.Row + keys []types.Datum + row chunk.Row + keyCols []int } func (iw *innerWorker) handleTask(ctx context.Context, task *lookUpJoinTask) error { @@ -558,7 +559,7 @@ func (iw *innerWorker) constructLookupContent(task *lookUpJoinTask) ([]*indexJoi // dLookUpKey is sorted and deduplicated at sortAndDedupLookUpContents. // So we don't need to do it here. } - lookUpContents = append(lookUpContents, &indexJoinLookUpContent{keys: dLookUpKey, row: chk.GetRow(rowIdx)}) + lookUpContents = append(lookUpContents, &indexJoinLookUpContent{keys: dLookUpKey, row: chk.GetRow(rowIdx), keyCols: iw.keyCols}) } } diff --git a/executor/partition_table.go b/executor/partition_table.go index 6989c0eef7a7f..88ac2ad769b4c 100644 --- a/executor/partition_table.go +++ b/executor/partition_table.go @@ -22,6 +22,7 @@ import ( plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tipb/go-tipb" ) @@ -40,13 +41,32 @@ type nextPartition interface { nextPartition(context.Context, table.PhysicalTable) (Executor, error) } +type innerPartitionInfo struct { + isFullPartition bool + nextRange map[int64][]*ranger.Range +} + +type innerNextPartition interface { + nextPartition + GetInnerPartitionInfo() *innerPartitionInfo +} + type nextPartitionForTableReader struct { - exec *TableReaderExecutor + *innerPartitionInfo + rangeBuilders map[int64]kvRangeBuilder + exec *TableReaderExecutor +} + +func (n nextPartitionForTableReader) GetInnerPartitionInfo() *innerPartitionInfo { + return n.innerPartitionInfo } func (n nextPartitionForTableReader) nextPartition(ctx context.Context, tbl table.PhysicalTable) (Executor, error) { n.exec.table = tbl n.exec.kvRanges = n.exec.kvRanges[:0] + if n.innerPartitionInfo != nil && !n.isFullPartition { + n.exec.kvRangeBuilder = n.rangeBuilders[tbl.GetPhysicalID()] + } if err := updateDAGRequestTableID(ctx, n.exec.dagPB, tbl.Meta().ID, tbl.GetPhysicalID()); err != nil { return nil, err } @@ -54,22 +74,38 @@ func (n nextPartitionForTableReader) nextPartition(ctx context.Context, tbl tabl } type nextPartitionForIndexLookUp struct { + *innerPartitionInfo exec *IndexLookUpExecutor } +func (n nextPartitionForIndexLookUp) GetInnerPartitionInfo() *innerPartitionInfo { + return n.innerPartitionInfo +} + func (n nextPartitionForIndexLookUp) nextPartition(ctx context.Context, tbl table.PhysicalTable) (Executor, error) { n.exec.table = tbl + if n.innerPartitionInfo != nil && !n.isFullPartition { + n.exec.ranges = n.nextRange[tbl.GetPhysicalID()] + } return n.exec, nil } type nextPartitionForIndexReader struct { + *innerPartitionInfo exec *IndexReaderExecutor } +func (n nextPartitionForIndexReader) GetInnerPartitionInfo() *innerPartitionInfo { + return n.innerPartitionInfo +} + func (n nextPartitionForIndexReader) nextPartition(ctx context.Context, tbl table.PhysicalTable) (Executor, error) { exec := n.exec exec.table = tbl exec.physicalTableID = tbl.GetPhysicalID() + if n.innerPartitionInfo != nil && !n.isFullPartition { + exec.ranges = n.nextRange[tbl.GetPhysicalID()] + } return exec, nil } diff --git a/table/tables/partition.go b/table/tables/partition.go index 4cfe8092d1315..bcb5e6a2b28c2 100644 --- a/table/tables/partition.go +++ b/table/tables/partition.go @@ -132,6 +132,8 @@ type PartitionExpr struct { *ForRangePruning // Used in the range column pruning process. *ForRangeColumnsPruning + // ColOffset is the offsets of partition columns. + ColumnOffset []int } func initEvalBufferType(t *partitionedTable) { @@ -292,12 +294,28 @@ func generateRangePartitionExpr(ctx sessionctx.Context, pi *model.PartitionInfo, UpperBounds: locateExprs, } + // build column offset. + partExp := pi.Expr + if len(pi.Columns) == 1 { + partExp = pi.Columns[0].L + } + exprs, err := parseSimpleExprWithNames(p, ctx, partExp, schema, names) + if err != nil { + return nil, err + } + partitionCols := expression.ExtractColumns(exprs) + offset := make([]int, len(partitionCols)) + for i, col := range columns { + for j, partitionCol := range partitionCols { + if partitionCol.UniqueID == col.UniqueID { + offset[j] = i + } + } + } + ret.ColumnOffset = offset + switch len(pi.Columns) { case 0: - exprs, err := parseSimpleExprWithNames(p, ctx, pi.Expr, schema, names) - if err != nil { - return nil, err - } tmp, err := dataForRangePruning(ctx, pi) if err != nil { return nil, errors.Trace(err) @@ -330,10 +348,21 @@ func generateHashPartitionExpr(ctx sessionctx.Context, pi *model.PartitionInfo, logutil.BgLogger().Error("wrong table partition expression", zap.String("expression", pi.Expr), zap.Error(err)) return nil, errors.Trace(err) } + // build column offset. + partitionCols := expression.ExtractColumns(exprs) + offset := make([]int, len(partitionCols)) + for i, col := range columns { + for j, partitionCol := range partitionCols { + if partitionCol.UniqueID == col.UniqueID { + offset[j] = i + } + } + } exprs.HashCode(ctx.GetSessionVars().StmtCtx) return &PartitionExpr{ - Expr: exprs, - OrigExpr: origExpr, + Expr: exprs, + OrigExpr: origExpr, + ColumnOffset: offset, }, nil } From 2809a57981e4a97be870ee16e8836d93ab70c5fb Mon Sep 17 00:00:00 2001 From: xhe Date: Tue, 29 Sep 2020 11:51:07 +0800 Subject: [PATCH 06/16] types: fix year conversion (#19781) --- types/convert_test.go | 16 ++++++++++++++++ types/datum.go | 7 +++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/types/convert_test.go b/types/convert_test.go index 4b898f82bf007..7d5147f0f7de0 100644 --- a/types/convert_test.go +++ b/types/convert_test.go @@ -687,10 +687,26 @@ func (s *testTypeConvertSuite) TestConvert(c *C) { signedAccept(c, mysql.TypeYear, 1901, "1901") signedAccept(c, mysql.TypeYear, 1900.567, "1901") signedDeny(c, mysql.TypeYear, 1900.456, "0") + signedAccept(c, mysql.TypeYear, 0, "0") + signedAccept(c, mysql.TypeYear, "0", "2000") + signedAccept(c, mysql.TypeYear, "00", "2000") + signedAccept(c, mysql.TypeYear, " 0", "2000") + signedAccept(c, mysql.TypeYear, " 00", "2000") + signedAccept(c, mysql.TypeYear, " 000", "0") + signedAccept(c, mysql.TypeYear, " 0000 ", "2000") + signedAccept(c, mysql.TypeYear, " 0ab", "0") + signedAccept(c, mysql.TypeYear, "00bc", "0") + signedAccept(c, mysql.TypeYear, "000a", "0") + signedAccept(c, mysql.TypeYear, " 000a ", "2000") signedAccept(c, mysql.TypeYear, 1, "2001") + signedAccept(c, mysql.TypeYear, "1", "2001") + signedAccept(c, mysql.TypeYear, "01", "2001") signedAccept(c, mysql.TypeYear, 69, "2069") + signedAccept(c, mysql.TypeYear, "69", "2069") signedAccept(c, mysql.TypeYear, 70, "1970") + signedAccept(c, mysql.TypeYear, "70", "1970") signedAccept(c, mysql.TypeYear, 99, "1999") + signedAccept(c, mysql.TypeYear, "99", "1999") signedDeny(c, mysql.TypeYear, 100, "0") signedDeny(c, mysql.TypeYear, "99999999999999999999999999999999999", "0") diff --git a/types/datum.go b/types/datum.go index 5b4d2187bdd35..fbccd3bdfedaa 100644 --- a/types/datum.go +++ b/types/datum.go @@ -1333,12 +1333,15 @@ func (d *Datum) convertToMysqlYear(sc *stmtctx.StatementContext, target *FieldTy switch d.k { case KindString, KindBytes: s := d.GetString() - y, err = StrToInt(sc, s, false) + trimS := strings.TrimSpace(s) + y, err = StrToInt(sc, trimS, false) if err != nil { ret.SetInt64(0) return ret, errors.Trace(err) } - if len(s) != 4 && len(s) > 0 && s[0:1] == "0" { + // condition: + // parsed to 0, not a string of length 4, the first valid char is a 0 digit + if len(s) != 4 && y == 0 && strings.HasPrefix(trimS, "0") { adjust = true } case KindMysqlTime: From fa81ec51b4181863553e70f0505710a6bf3f3c23 Mon Sep 17 00:00:00 2001 From: Null not nil <67764674+nullnotnil@users.noreply.github.com> Date: Tue, 29 Sep 2020 00:04:24 -0600 Subject: [PATCH 07/16] *: refactor sysvars to allow component registration (#20289) --- executor/infoschema_reader.go | 2 +- executor/show.go | 6 +- executor/simple.go | 6 +- planner/core/expression_rewriter.go | 2 +- .../conn_ip_example/conn_ip_example_test.go | 2 - plugin/plugin.go | 2 +- plugin/plugin_test.go | 1 - server/server.go | 16 +- session/bootstrap.go | 6 +- session/bootstrap_test.go | 2 +- session/session.go | 4 +- sessionctx/variable/mock_globalaccessor.go | 2 +- sessionctx/variable/sysvar.go | 1341 +++++++++-------- sessionctx/variable/varsutil.go | 11 +- tidb-server/main.go | 23 +- tidb-server/main_test.go | 8 +- 16 files changed, 743 insertions(+), 691 deletions(-) diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index e4b7e09039fe5..137a29b8cde60 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -1583,7 +1583,7 @@ func (e *memtableRetriever) setDataFromSessionVar(ctx sessionctx.Context) error var rows [][]types.Datum var err error sessionVars := ctx.GetSessionVars() - for _, v := range variable.SysVars { + for _, v := range variable.GetSysVars() { var value string value, err = variable.GetSessionSystemVar(sessionVars, v.Name) if err != nil { diff --git a/executor/show.go b/executor/show.go index 7bb9c4afeb256..521532988b71e 100644 --- a/executor/show.go +++ b/executor/show.go @@ -650,9 +650,9 @@ func (e *ShowExec) fetchShowVariables() (err error) { value string ok bool sessionVars = e.ctx.GetSessionVars() - unreachedVars = make([]string, 0, len(variable.SysVars)) + unreachedVars = make([]string, 0, len(variable.GetSysVars())) ) - for _, v := range variable.SysVars { + for _, v := range variable.GetSysVars() { if !e.GlobalScope { // For a session scope variable, // 1. try to fetch value from SessionVars.Systems; @@ -682,7 +682,7 @@ func (e *ShowExec) fetchShowVariables() (err error) { for _, varName := range unreachedVars { varValue, ok := systemVars[varName] if !ok { - varValue = variable.SysVars[varName].Value + varValue = variable.GetSysVar(varName).Value } e.appendRow([]interface{}{varName, varValue}) } diff --git a/executor/simple.go b/executor/simple.go index 79b8e36a73373..21b04b9c16532 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -1128,9 +1128,9 @@ func (e *SimpleExec) executeAlterInstance(s *ast.AlterInstanceStmt) error { logutil.BgLogger().Info("execute reload tls", zap.Bool("NoRollbackOnError", s.NoRollbackOnError)) sm := e.ctx.GetSessionManager() tlsCfg, err := util.LoadTLSCertificates( - variable.SysVars["ssl_ca"].Value, - variable.SysVars["ssl_key"].Value, - variable.SysVars["ssl_cert"].Value, + variable.GetSysVar("ssl_ca").Value, + variable.GetSysVar("ssl_key").Value, + variable.GetSysVar("ssl_cert").Value, ) if err != nil { if !s.NoRollbackOnError || config.GetGlobalConfig().Security.RequireSecureTransport { diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index 90a8fb42b45fd..1df958dfdbc5e 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -1136,7 +1136,7 @@ func (er *expressionRewriter) rewriteVariable(v *ast.VariableExpr) { return } } - sysVar := variable.SysVars[name] + sysVar := variable.GetSysVar(name) if sysVar == nil { er.err = variable.ErrUnknownSystemVar.GenWithStackByArgs(name) return diff --git a/plugin/conn_ip_example/conn_ip_example_test.go b/plugin/conn_ip_example/conn_ip_example_test.go index 096ef7e3a6003..fa2a776488f25 100644 --- a/plugin/conn_ip_example/conn_ip_example_test.go +++ b/plugin/conn_ip_example/conn_ip_example_test.go @@ -19,7 +19,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/tidb/plugin" - "github.com/pingcap/tidb/sessionctx/variable" ) func LoadRunShutdownPluginExample() { @@ -28,7 +27,6 @@ func LoadRunShutdownPluginExample() { cfg := plugin.Config{ Plugins: []string{"conn_ip_example-1"}, PluginDir: "/home/robi/Code/go/src/github.com/pingcap/tidb/plugin/conn_ip_example", - GlobalSysVar: &variable.SysVars, PluginVarNames: &pluginVarNames, } diff --git a/plugin/plugin.go b/plugin/plugin.go index 36a8fb287bf3d..4c6b7534c68a6 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -200,7 +200,7 @@ func Load(ctx context.Context, cfg Config) (err error) { } if cfg.GlobalSysVar != nil { for key, value := range tiPlugins.plugins[kind][i].SysVars { - (*cfg.GlobalSysVar)[key] = value + variable.RegisterSysVar(value) if value.Scope != variable.ScopeSession && cfg.PluginVarNames != nil { *cfg.PluginVarNames = append(*cfg.PluginVarNames, key) } diff --git a/plugin/plugin_test.go b/plugin/plugin_test.go index 851afd5420d19..224484075b435 100644 --- a/plugin/plugin_test.go +++ b/plugin/plugin_test.go @@ -37,7 +37,6 @@ func TestLoadPluginSuccess(t *testing.T) { cfg := Config{ Plugins: []string{pluginSign}, PluginDir: "", - GlobalSysVar: &variable.SysVars, PluginVarNames: &variable.PluginVarNames, EnvVersion: map[string]uint16{"go": 1112}, } diff --git a/server/server.go b/server/server.go index cfbd9bddb602b..8954eca113a77 100644 --- a/server/server.go +++ b/server/server.go @@ -220,7 +220,7 @@ func NewServer(cfg *config.Config, driver IDriver) (*Server, error) { if tlsConfig != nil { setSSLVariable(s.cfg.Security.SSLCA, s.cfg.Security.SSLKey, s.cfg.Security.SSLCert) atomic.StorePointer(&s.tlsConfig, unsafe.Pointer(tlsConfig)) - logutil.BgLogger().Info("mysql protocol server secure connection is enabled", zap.Bool("client verification enabled", len(variable.SysVars["ssl_ca"].Value) > 0)) + logutil.BgLogger().Info("mysql protocol server secure connection is enabled", zap.Bool("client verification enabled", len(variable.GetSysVar("ssl_ca").Value) > 0)) } else if cfg.Security.RequireSecureTransport { return nil, errSecureTransportRequired.FastGenByArgs() } @@ -278,11 +278,11 @@ func NewServer(cfg *config.Config, driver IDriver) (*Server, error) { } func setSSLVariable(ca, key, cert string) { - variable.SysVars["have_openssl"].Value = "YES" - variable.SysVars["have_ssl"].Value = "YES" - variable.SysVars["ssl_cert"].Value = cert - variable.SysVars["ssl_key"].Value = key - variable.SysVars["ssl_ca"].Value = ca + variable.SetSysVar("have_openssl", "YES") + variable.SetSysVar("have_ssl", "YES") + variable.SetSysVar("ssl_cert", cert) + variable.SetSysVar("ssl_key", key) + variable.SetSysVar("ssl_ca", ca) } // Run runs the server. @@ -631,10 +631,10 @@ func setSystemTimeZoneVariable() { logutil.BgLogger().Error( "Error getting SystemTZ, use default value instead", zap.Error(err), - zap.String("default system_time_zone", variable.SysVars["system_time_zone"].Value)) + zap.String("default system_time_zone", variable.GetSysVar("system_time_zone").Value)) return } - variable.SysVars["system_time_zone"].Value = tz + variable.SetSysVar("system_time_zone", tz) }) } diff --git a/session/bootstrap.go b/session/bootstrap.go index 10884c094c4cc..1551a84336cb0 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -584,7 +584,7 @@ func upgradeToVer2(s Session, ver int64) { distSQLVars := []string{variable.TiDBDistSQLScanConcurrency} values := make([]string, 0, len(distSQLVars)) for _, v := range distSQLVars { - value := fmt.Sprintf(`("%s", "%s")`, v, variable.SysVars[v].Value) + value := fmt.Sprintf(`("%s", "%s")`, v, variable.GetSysVar(v).Value) values = append(values, value) } sql := fmt.Sprintf("INSERT HIGH_PRIORITY IGNORE INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, @@ -1262,8 +1262,8 @@ func doDMLWorks(s Session) { ("%", "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "N", "Y", "Y", "Y", "Y", "Y")`) // Init global system variables table. - values := make([]string, 0, len(variable.SysVars)) - for k, v := range variable.SysVars { + values := make([]string, 0, len(variable.GetSysVars())) + for k, v := range variable.GetSysVars() { // Session only variable should not be inserted. if v.Scope != variable.ScopeSession { vVal := v.Value diff --git a/session/bootstrap_test.go b/session/bootstrap_test.go index d4ce4bffb4608..a0c4c6d9127c5 100644 --- a/session/bootstrap_test.go +++ b/session/bootstrap_test.go @@ -104,7 +104,7 @@ func (s *testBootstrapSuite) TestBootstrap(c *C) { func globalVarsCount() int64 { var count int64 - for _, v := range variable.SysVars { + for _, v := range variable.GetSysVars() { if v.Scope != variable.ScopeSession { count++ } diff --git a/session/session.go b/session/session.go index 1ff66585d2723..38370ec38afc1 100644 --- a/session/session.go +++ b/session/session.go @@ -983,7 +983,8 @@ func (s *session) GetGlobalSysVar(name string) (string, error) { sysVar, err := s.getExecRet(s, sql) if err != nil { if executor.ErrResultIsEmpty.Equal(err) { - if sv, ok := variable.SysVars[name]; ok { + sv := variable.GetSysVar(name) + if sv != nil { return sv.Value, nil } return "", variable.ErrUnknownSystemVar.GenWithStackByArgs(name) @@ -1769,7 +1770,6 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { err := plugin.Load(context.Background(), plugin.Config{ Plugins: strings.Split(cfg.Plugin.Load, ","), PluginDir: cfg.Plugin.Dir, - GlobalSysVar: &variable.SysVars, PluginVarNames: &variable.PluginVarNames, }) if err != nil { diff --git a/sessionctx/variable/mock_globalaccessor.go b/sessionctx/variable/mock_globalaccessor.go index 24ab573eb2610..9230de81e75b2 100644 --- a/sessionctx/variable/mock_globalaccessor.go +++ b/sessionctx/variable/mock_globalaccessor.go @@ -24,7 +24,7 @@ func NewMockGlobalAccessor() *MockGlobalAccessor { // GetGlobalSysVar implements GlobalVarAccessor.GetGlobalSysVar interface. func (m *MockGlobalAccessor) GetGlobalSysVar(name string) (string, error) { - v, ok := SysVars[name] + v, ok := sysVars[name] if ok { return v.Value, nil } diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 4063639923f0d..a613745326521 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -16,6 +16,7 @@ package variable import ( "strconv" "strings" + "sync" "sync/atomic" "github.com/pingcap/parser/mysql" @@ -28,6 +29,9 @@ import ( // ScopeFlag is for system variable whether can be changed in global/session dynamically or not. type ScopeFlag uint8 +// TypeFlag is the SysVar type, which doesn't exactly match MySQL types. +type TypeFlag byte + const ( // ScopeNone means the system variable can not be changed dynamically. ScopeNone ScopeFlag = 0 @@ -35,6 +39,25 @@ const ( ScopeGlobal ScopeFlag = 1 << 0 // ScopeSession means the system variable can only be changed in current session. ScopeSession ScopeFlag = 1 << 1 + + // TypeBool for boolean + TypeBool TypeFlag = 0 + // TypeInt for integer + TypeInt TypeFlag = 1 + // TypeLong for Long + TypeLong TypeFlag = 2 + // TypeLongLong for LongLong + TypeLongLong TypeFlag = 3 + // TypeStr for String + TypeStr TypeFlag = 4 + // TypeEnum for Enum + TypeEnum TypeFlag = 5 + // TypeSet for Set + TypeSet TypeFlag = 6 + // TypeDouble for Double + TypeDouble TypeFlag = 7 + // TypeUnsigned for Unsigned integer + TypeUnsigned TypeFlag = 8 ) // SysVar is for system variable. @@ -47,24 +70,58 @@ type SysVar struct { // Value is the variable value. Value string + + // Type is the MySQL type (optional) + Type TypeFlag + + // MinValue will automatically be validated when specified (optional) + MinValue int64 + + // MaxValue will automatically be validated when specified (optional) + MaxValue int64 } -// SysVars is global sys vars map. -var SysVars map[string]*SysVar +var sysVars map[string]*SysVar +var sysVarsLock sync.RWMutex + +// RegisterSysVar adds a sysvar to the SysVars list +func RegisterSysVar(sv *SysVar) { + name := strings.ToLower(sv.Name) + sysVarsLock.Lock() + sysVars[name] = sv + sysVarsLock.Unlock() +} // GetSysVar returns sys var info for name as key. func GetSysVar(name string) *SysVar { name = strings.ToLower(name) - return SysVars[name] + sysVarsLock.RLock() + defer sysVarsLock.RUnlock() + return sysVars[name] +} + +// SetSysVar sets a sysvar. This will not propagate to the cluster, so it should only be used for instance scoped AUTO variables such as system_time_zone. +func SetSysVar(name string, value string) { + name = strings.ToLower(name) + sysVarsLock.Lock() + defer sysVarsLock.Unlock() + sysVars[name].Value = value +} + +// GetSysVars returns the sysVars list under a RWLock +func GetSysVars() map[string]*SysVar { + sysVarsLock.RLock() + defer sysVarsLock.RUnlock() + return sysVars } // PluginVarNames is global plugin var names set. var PluginVarNames []string func init() { - SysVars = make(map[string]*SysVar) + sysVars = make(map[string]*SysVar) for _, v := range defaultSysVars { - SysVars[v.Name] = v + RegisterSysVar(v) } initSynonymsSysVariables() } @@ -94,652 +151,652 @@ func BoolToInt32(b bool) int32 { // we only support MySQL now var defaultSysVars = []*SysVar{ - {ScopeGlobal, "gtid_mode", "OFF"}, - {ScopeGlobal, FlushTime, "0"}, - {ScopeNone, "performance_schema_max_mutex_classes", "200"}, - {ScopeGlobal | ScopeSession, LowPriorityUpdates, "0"}, - {ScopeGlobal | ScopeSession, SessionTrackGtids, "OFF"}, - {ScopeGlobal | ScopeSession, "ndbinfo_max_rows", ""}, - {ScopeGlobal | ScopeSession, "ndb_index_stat_option", ""}, - {ScopeGlobal | ScopeSession, OldPasswords, "0"}, - {ScopeNone, "innodb_version", "5.6.25"}, - {ScopeGlobal, MaxConnections, "151"}, - {ScopeGlobal | ScopeSession, BigTables, "0"}, - {ScopeNone, "skip_external_locking", "1"}, - {ScopeNone, "innodb_sync_array_size", "1"}, - {ScopeSession, "rand_seed2", ""}, - {ScopeGlobal, ValidatePasswordCheckUserName, "0"}, - {ScopeGlobal, "validate_password_number_count", "1"}, - {ScopeSession, "gtid_next", ""}, - {ScopeGlobal | ScopeSession, SQLSelectLimit, "18446744073709551615"}, - {ScopeGlobal, "ndb_show_foreign_key_mock_tables", ""}, - {ScopeNone, "multi_range_count", "256"}, - {ScopeGlobal | ScopeSession, DefaultWeekFormat, "0"}, - {ScopeGlobal | ScopeSession, "binlog_error_action", "IGNORE_ERROR"}, - {ScopeGlobal | ScopeSession, "default_storage_engine", "InnoDB"}, - {ScopeNone, "ft_query_expansion_limit", "20"}, - {ScopeGlobal, MaxConnectErrors, "100"}, - {ScopeGlobal, SyncBinlog, "0"}, - {ScopeNone, "max_digest_length", "1024"}, - {ScopeNone, "innodb_force_load_corrupted", "0"}, - {ScopeNone, "performance_schema_max_table_handles", "4000"}, - {ScopeGlobal, InnodbFastShutdown, "1"}, - {ScopeNone, "ft_max_word_len", "84"}, - {ScopeGlobal, "log_backward_compatible_user_definitions", ""}, - {ScopeNone, "lc_messages_dir", "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"}, - {ScopeGlobal, "ft_boolean_syntax", "+ -><()~*:\"\"&|"}, - {ScopeGlobal, TableDefinitionCache, "-1"}, - {ScopeNone, SkipNameResolve, "0"}, - {ScopeNone, "performance_schema_max_file_handles", "32768"}, - {ScopeSession, "transaction_allow_batching", ""}, - {ScopeGlobal | ScopeSession, SQLModeVar, mysql.DefaultSQLMode}, - {ScopeNone, "performance_schema_max_statement_classes", "168"}, - {ScopeGlobal, "server_id", "0"}, - {ScopeGlobal, "innodb_flushing_avg_loops", "30"}, - {ScopeGlobal | ScopeSession, TmpTableSize, "16777216"}, - {ScopeGlobal, "innodb_max_purge_lag", "0"}, - {ScopeGlobal | ScopeSession, "preload_buffer_size", "32768"}, - {ScopeGlobal, CheckProxyUsers, "0"}, - {ScopeNone, "have_query_cache", "YES"}, - {ScopeGlobal, "innodb_flush_log_at_timeout", "1"}, - {ScopeGlobal, "innodb_max_undo_log_size", ""}, - {ScopeGlobal | ScopeSession, "range_alloc_block_size", "4096"}, - {ScopeGlobal, ConnectTimeout, "10"}, - {ScopeGlobal | ScopeSession, MaxExecutionTime, "0"}, - {ScopeGlobal | ScopeSession, CollationServer, mysql.DefaultCollationName}, - {ScopeNone, "have_rtree_keys", "YES"}, - {ScopeGlobal, "innodb_old_blocks_pct", "37"}, - {ScopeGlobal, "innodb_file_format", "Antelope"}, - {ScopeGlobal, "innodb_compression_failure_threshold_pct", "5"}, - {ScopeNone, "performance_schema_events_waits_history_long_size", "10000"}, - {ScopeGlobal, "innodb_checksum_algorithm", "innodb"}, - {ScopeNone, "innodb_ft_sort_pll_degree", "2"}, - {ScopeNone, "thread_stack", "262144"}, - {ScopeGlobal, "relay_log_info_repository", "FILE"}, - {ScopeGlobal | ScopeSession, SQLLogBin, "1"}, - {ScopeGlobal, SuperReadOnly, "0"}, - {ScopeGlobal | ScopeSession, "max_delayed_threads", "20"}, - {ScopeNone, "protocol_version", "10"}, - {ScopeGlobal | ScopeSession, "new", "OFF"}, - {ScopeGlobal | ScopeSession, "myisam_sort_buffer_size", "8388608"}, - {ScopeGlobal | ScopeSession, "optimizer_trace_offset", "-1"}, - {ScopeGlobal, InnodbBufferPoolDumpAtShutdown, "0"}, - {ScopeGlobal | ScopeSession, SQLNotes, "1"}, - {ScopeGlobal, InnodbCmpPerIndexEnabled, "0"}, - {ScopeGlobal, "innodb_ft_server_stopword_table", ""}, - {ScopeNone, "performance_schema_max_file_instances", "7693"}, - {ScopeNone, "log_output", "FILE"}, - {ScopeGlobal, "binlog_group_commit_sync_delay", ""}, - {ScopeGlobal, "binlog_group_commit_sync_no_delay_count", ""}, - {ScopeNone, "have_crypt", "YES"}, - {ScopeGlobal, "innodb_log_write_ahead_size", ""}, - {ScopeNone, "innodb_log_group_home_dir", "./"}, - {ScopeNone, "performance_schema_events_statements_history_size", "10"}, - {ScopeGlobal, GeneralLog, "0"}, - {ScopeGlobal, "validate_password_dictionary_file", ""}, - {ScopeGlobal, BinlogOrderCommits, "1"}, - {ScopeGlobal, "key_cache_division_limit", "100"}, - {ScopeGlobal | ScopeSession, "max_insert_delayed_threads", "20"}, - {ScopeNone, "performance_schema_session_connect_attrs_size", "512"}, - {ScopeGlobal | ScopeSession, "time_zone", "SYSTEM"}, - {ScopeGlobal, "innodb_max_dirty_pages_pct", "75"}, - {ScopeGlobal, InnodbFilePerTable, "1"}, - {ScopeGlobal, InnodbLogCompressedPages, "1"}, - {ScopeNone, "skip_networking", "0"}, - {ScopeGlobal, "innodb_monitor_reset", ""}, - {ScopeNone, "have_ssl", "DISABLED"}, - {ScopeNone, "have_openssl", "DISABLED"}, - {ScopeNone, "ssl_ca", ""}, - {ScopeNone, "ssl_cert", ""}, - {ScopeNone, "ssl_key", ""}, - {ScopeNone, "ssl_cipher", ""}, - {ScopeNone, "tls_version", "TLSv1,TLSv1.1,TLSv1.2"}, - {ScopeNone, "system_time_zone", "CST"}, - {ScopeGlobal, InnodbPrintAllDeadlocks, "0"}, - {ScopeNone, "innodb_autoinc_lock_mode", "1"}, - {ScopeGlobal, "key_buffer_size", "8388608"}, - {ScopeGlobal | ScopeSession, ForeignKeyChecks, "OFF"}, - {ScopeGlobal, "host_cache_size", "279"}, - {ScopeGlobal, DelayKeyWrite, "ON"}, - {ScopeNone, "metadata_locks_cache_size", "1024"}, - {ScopeNone, "innodb_force_recovery", "0"}, - {ScopeGlobal, "innodb_file_format_max", "Antelope"}, - {ScopeGlobal | ScopeSession, "debug", ""}, - {ScopeGlobal, "log_warnings", "1"}, - {ScopeGlobal, OfflineMode, "0"}, - {ScopeGlobal | ScopeSession, InnodbStrictMode, "1"}, - {ScopeGlobal, "innodb_rollback_segments", "128"}, - {ScopeGlobal | ScopeSession, "join_buffer_size", "262144"}, - {ScopeNone, "innodb_mirrored_log_groups", "1"}, - {ScopeGlobal, "max_binlog_size", "1073741824"}, - {ScopeGlobal, "concurrent_insert", "AUTO"}, - {ScopeGlobal, InnodbAdaptiveHashIndex, "1"}, - {ScopeGlobal, InnodbFtEnableStopword, "1"}, - {ScopeGlobal, "general_log_file", "/usr/local/mysql/data/localhost.log"}, - {ScopeGlobal | ScopeSession, InnodbSupportXA, "1"}, - {ScopeGlobal, "innodb_compression_level", "6"}, - {ScopeNone, "innodb_file_format_check", "1"}, - {ScopeNone, "myisam_mmap_size", "18446744073709551615"}, - {ScopeNone, "innodb_buffer_pool_instances", "8"}, - {ScopeGlobal | ScopeSession, BlockEncryptionMode, "aes-128-ecb"}, - {ScopeGlobal | ScopeSession, "max_length_for_sort_data", "1024"}, - {ScopeNone, "character_set_system", "utf8"}, - {ScopeGlobal | ScopeSession, InteractiveTimeout, "28800"}, - {ScopeGlobal, InnodbOptimizeFullTextOnly, "0"}, - {ScopeNone, "character_sets_dir", "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"}, - {ScopeGlobal | ScopeSession, QueryCacheType, "OFF"}, - {ScopeNone, "innodb_rollback_on_timeout", "0"}, - {ScopeGlobal | ScopeSession, "query_alloc_block_size", "8192"}, - {ScopeGlobal | ScopeSession, InitConnect, ""}, - {ScopeNone, "have_compress", "YES"}, - {ScopeNone, "thread_concurrency", "10"}, - {ScopeGlobal | ScopeSession, "query_prealloc_size", "8192"}, - {ScopeNone, "relay_log_space_limit", "0"}, - {ScopeGlobal | ScopeSession, MaxUserConnections, "0"}, - {ScopeNone, "performance_schema_max_thread_classes", "50"}, - {ScopeGlobal, "innodb_api_trx_level", "0"}, - {ScopeNone, "disconnect_on_expired_password", "1"}, - {ScopeNone, "performance_schema_max_file_classes", "50"}, - {ScopeGlobal, "expire_logs_days", "0"}, - {ScopeGlobal | ScopeSession, BinlogRowQueryLogEvents, "0"}, - {ScopeGlobal, "default_password_lifetime", ""}, - {ScopeNone, "pid_file", "/usr/local/mysql/data/localhost.pid"}, - {ScopeNone, "innodb_undo_tablespaces", "0"}, - {ScopeGlobal, InnodbStatusOutputLocks, "0"}, - {ScopeNone, "performance_schema_accounts_size", "100"}, - {ScopeGlobal | ScopeSession, "max_error_count", "64"}, - {ScopeGlobal, "max_write_lock_count", "18446744073709551615"}, - {ScopeNone, "performance_schema_max_socket_instances", "322"}, - {ScopeNone, "performance_schema_max_table_instances", "12500"}, - {ScopeGlobal, "innodb_stats_persistent_sample_pages", "20"}, - {ScopeGlobal, "show_compatibility_56", ""}, - {ScopeNone, "innodb_open_files", "2000"}, - {ScopeGlobal, "innodb_spin_wait_delay", "6"}, - {ScopeGlobal, "thread_cache_size", "9"}, - {ScopeGlobal, LogSlowAdminStatements, "0"}, - {ScopeNone, "innodb_checksums", "ON"}, - {ScopeNone, "hostname", ServerHostname}, - {ScopeGlobal | ScopeSession, "auto_increment_offset", "1"}, - {ScopeNone, "ft_stopword_file", "(built-in)"}, - {ScopeGlobal, "innodb_max_dirty_pages_pct_lwm", "0"}, - {ScopeGlobal, LogQueriesNotUsingIndexes, "0"}, - {ScopeSession, "timestamp", ""}, - {ScopeGlobal | ScopeSession, QueryCacheWlockInvalidate, "0"}, - {ScopeGlobal | ScopeSession, "sql_buffer_result", "OFF"}, - {ScopeGlobal | ScopeSession, "character_set_filesystem", "binary"}, - {ScopeGlobal | ScopeSession, "collation_database", mysql.DefaultCollationName}, - {ScopeGlobal | ScopeSession, AutoIncrementIncrement, strconv.FormatInt(DefAutoIncrementIncrement, 10)}, - {ScopeGlobal | ScopeSession, AutoIncrementOffset, strconv.FormatInt(DefAutoIncrementOffset, 10)}, - {ScopeGlobal | ScopeSession, "max_heap_table_size", "16777216"}, - {ScopeGlobal | ScopeSession, "div_precision_increment", "4"}, - {ScopeGlobal, "innodb_lru_scan_depth", "1024"}, - {ScopeGlobal, "innodb_purge_rseg_truncate_frequency", ""}, - {ScopeGlobal | ScopeSession, SQLAutoIsNull, "0"}, - {ScopeNone, "innodb_api_enable_binlog", "0"}, - {ScopeGlobal | ScopeSession, "innodb_ft_user_stopword_table", ""}, - {ScopeNone, "server_id_bits", "32"}, - {ScopeGlobal, "innodb_log_checksum_algorithm", ""}, - {ScopeNone, "innodb_buffer_pool_load_at_startup", "1"}, - {ScopeGlobal | ScopeSession, "sort_buffer_size", "262144"}, - {ScopeGlobal, "innodb_flush_neighbors", "1"}, - {ScopeNone, "innodb_use_sys_malloc", "1"}, - {ScopeSession, PluginLoad, ""}, - {ScopeSession, PluginDir, "/data/deploy/plugin"}, - {ScopeNone, "performance_schema_max_socket_classes", "10"}, - {ScopeNone, "performance_schema_max_stage_classes", "150"}, - {ScopeGlobal, "innodb_purge_batch_size", "300"}, - {ScopeNone, "have_profiling", "NO"}, - {ScopeGlobal | ScopeSession, "character_set_client", mysql.DefaultCharset}, - {ScopeGlobal, InnodbBufferPoolDumpNow, "0"}, - {ScopeGlobal, RelayLogPurge, "1"}, - {ScopeGlobal, "ndb_distribution", ""}, - {ScopeGlobal, "myisam_data_pointer_size", "6"}, - {ScopeGlobal, "ndb_optimization_delay", ""}, - {ScopeGlobal, "innodb_ft_num_word_optimize", "2000"}, - {ScopeGlobal | ScopeSession, "max_join_size", "18446744073709551615"}, - {ScopeNone, CoreFile, "0"}, - {ScopeGlobal | ScopeSession, "max_seeks_for_key", "18446744073709551615"}, - {ScopeNone, "innodb_log_buffer_size", "8388608"}, - {ScopeGlobal, "delayed_insert_timeout", "300"}, - {ScopeGlobal, "max_relay_log_size", "0"}, - {ScopeGlobal | ScopeSession, MaxSortLength, "1024"}, - {ScopeNone, "metadata_locks_hash_instances", "8"}, - {ScopeGlobal, "ndb_eventbuffer_free_percent", ""}, - {ScopeNone, "large_files_support", "1"}, - {ScopeGlobal, "binlog_max_flush_queue_time", "0"}, - {ScopeGlobal, "innodb_fill_factor", ""}, - {ScopeGlobal, "log_syslog_facility", ""}, - {ScopeNone, "innodb_ft_min_token_size", "3"}, - {ScopeGlobal | ScopeSession, "transaction_write_set_extraction", ""}, - {ScopeGlobal | ScopeSession, "ndb_blob_write_batch_bytes", ""}, - {ScopeGlobal, "automatic_sp_privileges", "1"}, - {ScopeGlobal, "innodb_flush_sync", ""}, - {ScopeNone, "performance_schema_events_statements_history_long_size", "10000"}, - {ScopeGlobal, "innodb_monitor_disable", ""}, - {ScopeNone, "innodb_doublewrite", "1"}, - {ScopeNone, "log_bin_use_v1_row_events", "0"}, - {ScopeSession, "innodb_optimize_point_storage", ""}, - {ScopeNone, "innodb_api_disable_rowlock", "0"}, - {ScopeGlobal, "innodb_adaptive_flushing_lwm", "10"}, - {ScopeNone, "innodb_log_files_in_group", "2"}, - {ScopeGlobal, InnodbBufferPoolLoadNow, "0"}, - {ScopeNone, "performance_schema_max_rwlock_classes", "40"}, - {ScopeNone, "binlog_gtid_simple_recovery", "1"}, - {ScopeNone, Port, "4000"}, - {ScopeNone, "performance_schema_digests_size", "10000"}, - {ScopeGlobal | ScopeSession, Profiling, "0"}, - {ScopeNone, "lower_case_table_names", "2"}, - {ScopeSession, "rand_seed1", ""}, - {ScopeGlobal, "sha256_password_proxy_users", ""}, - {ScopeGlobal | ScopeSession, SQLQuoteShowCreate, "1"}, - {ScopeGlobal | ScopeSession, "binlogging_impossible_mode", "IGNORE_ERROR"}, - {ScopeGlobal | ScopeSession, QueryCacheSize, "1048576"}, - {ScopeGlobal, "innodb_stats_transient_sample_pages", "8"}, - {ScopeGlobal, InnodbStatsOnMetadata, "0"}, - {ScopeNone, "server_uuid", "00000000-0000-0000-0000-000000000000"}, - {ScopeNone, "open_files_limit", "5000"}, - {ScopeGlobal | ScopeSession, "ndb_force_send", ""}, - {ScopeNone, "skip_show_database", "0"}, - {ScopeGlobal, "log_timestamps", ""}, - {ScopeNone, "version_compile_machine", "x86_64"}, - {ScopeGlobal, "event_scheduler", "OFF"}, - {ScopeGlobal | ScopeSession, "ndb_deferred_constraints", ""}, - {ScopeGlobal, "log_syslog_include_pid", ""}, - {ScopeSession, "last_insert_id", ""}, - {ScopeNone, "innodb_ft_cache_size", "8000000"}, - {ScopeNone, LogBin, "0"}, - {ScopeGlobal, InnodbDisableSortFileCache, "0"}, - {ScopeGlobal, "log_error_verbosity", ""}, - {ScopeNone, "performance_schema_hosts_size", "100"}, - {ScopeGlobal, "innodb_replication_delay", "0"}, - {ScopeGlobal, SlowQueryLog, "0"}, - {ScopeSession, "debug_sync", ""}, - {ScopeGlobal, InnodbStatsAutoRecalc, "1"}, - {ScopeGlobal | ScopeSession, "lc_messages", "en_US"}, - {ScopeGlobal | ScopeSession, "bulk_insert_buffer_size", "8388608"}, - {ScopeGlobal | ScopeSession, BinlogDirectNonTransactionalUpdates, "0"}, - {ScopeGlobal, "innodb_change_buffering", "all"}, - {ScopeGlobal | ScopeSession, SQLBigSelects, "1"}, - {ScopeGlobal | ScopeSession, CharacterSetResults, mysql.DefaultCharset}, - {ScopeGlobal, "innodb_max_purge_lag_delay", "0"}, - {ScopeGlobal | ScopeSession, "session_track_schema", ""}, - {ScopeGlobal, "innodb_io_capacity_max", "2000"}, - {ScopeGlobal, "innodb_autoextend_increment", "64"}, - {ScopeGlobal | ScopeSession, "binlog_format", "STATEMENT"}, - {ScopeGlobal | ScopeSession, "optimizer_trace", "enabled=off,one_line=off"}, - {ScopeGlobal | ScopeSession, "read_rnd_buffer_size", "262144"}, - {ScopeNone, "version_comment", "TiDB Server (Apache License 2.0) " + versioninfo.TiDBEdition + " Edition, MySQL 5.7 compatible"}, - {ScopeGlobal | ScopeSession, NetWriteTimeout, "60"}, - {ScopeGlobal, InnodbBufferPoolLoadAbort, "0"}, - {ScopeGlobal | ScopeSession, TxnIsolation, "REPEATABLE-READ"}, - {ScopeGlobal | ScopeSession, TransactionIsolation, "REPEATABLE-READ"}, - {ScopeGlobal | ScopeSession, "collation_connection", mysql.DefaultCollationName}, - {ScopeGlobal | ScopeSession, "transaction_prealloc_size", "4096"}, - {ScopeNone, "performance_schema_setup_objects_size", "100"}, - {ScopeGlobal, "sync_relay_log", "10000"}, - {ScopeGlobal, "innodb_ft_result_cache_limit", "2000000000"}, - {ScopeNone, "innodb_sort_buffer_size", "1048576"}, - {ScopeGlobal, "innodb_ft_enable_diag_print", "OFF"}, - {ScopeNone, "thread_handling", "one-thread-per-connection"}, - {ScopeGlobal, "stored_program_cache", "256"}, - {ScopeNone, "performance_schema_max_mutex_instances", "15906"}, - {ScopeGlobal, "innodb_adaptive_max_sleep_delay", "150000"}, - {ScopeNone, "large_pages", "OFF"}, - {ScopeGlobal | ScopeSession, "session_track_system_variables", ""}, - {ScopeGlobal, "innodb_change_buffer_max_size", "25"}, - {ScopeGlobal, LogBinTrustFunctionCreators, "0"}, - {ScopeNone, "innodb_write_io_threads", "4"}, - {ScopeGlobal, "mysql_native_password_proxy_users", ""}, - {ScopeGlobal, serverReadOnly, "0"}, - {ScopeNone, "large_page_size", "0"}, - {ScopeNone, "table_open_cache_instances", "1"}, - {ScopeGlobal, InnodbStatsPersistent, "1"}, - {ScopeGlobal | ScopeSession, "session_track_state_change", ""}, - {ScopeNone, "optimizer_switch", "index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on"}, - {ScopeGlobal, "delayed_queue_size", "1000"}, - {ScopeNone, "innodb_read_only", "0"}, - {ScopeNone, "datetime_format", "%Y-%m-%d %H:%i:%s"}, - {ScopeGlobal, "log_syslog", ""}, - {ScopeNone, "version", mysql.ServerVersion}, - {ScopeGlobal | ScopeSession, "transaction_alloc_block_size", "8192"}, - {ScopeGlobal, "innodb_large_prefix", "OFF"}, - {ScopeNone, "performance_schema_max_cond_classes", "80"}, - {ScopeGlobal, "innodb_io_capacity", "200"}, - {ScopeGlobal, "max_binlog_cache_size", "18446744073709547520"}, - {ScopeGlobal | ScopeSession, "ndb_index_stat_enable", ""}, - {ScopeGlobal, "executed_gtids_compression_period", ""}, - {ScopeNone, "time_format", "%H:%i:%s"}, - {ScopeGlobal | ScopeSession, OldAlterTable, "0"}, - {ScopeGlobal | ScopeSession, "long_query_time", "10.000000"}, - {ScopeNone, "innodb_use_native_aio", "0"}, - {ScopeGlobal, "log_throttle_queries_not_using_indexes", "0"}, - {ScopeNone, "locked_in_memory", "0"}, - {ScopeNone, "innodb_api_enable_mdl", "0"}, - {ScopeGlobal, "binlog_cache_size", "32768"}, - {ScopeGlobal, "innodb_compression_pad_pct_max", "50"}, - {ScopeGlobal, InnodbCommitConcurrency, "0"}, - {ScopeNone, "ft_min_word_len", "4"}, - {ScopeGlobal, EnforceGtidConsistency, "OFF"}, - {ScopeGlobal, SecureAuth, "1"}, - {ScopeNone, "max_tmp_tables", "32"}, - {ScopeGlobal, InnodbRandomReadAhead, "0"}, - {ScopeGlobal | ScopeSession, UniqueChecks, "1"}, - {ScopeGlobal, "internal_tmp_disk_storage_engine", ""}, - {ScopeGlobal | ScopeSession, "myisam_repair_threads", "1"}, - {ScopeGlobal, "ndb_eventbuffer_max_alloc", ""}, - {ScopeGlobal, "innodb_read_ahead_threshold", "56"}, - {ScopeGlobal, "key_cache_block_size", "1024"}, - {ScopeNone, "ndb_recv_thread_cpu_mask", ""}, - {ScopeGlobal, "gtid_purged", ""}, - {ScopeGlobal, "max_binlog_stmt_cache_size", "18446744073709547520"}, - {ScopeGlobal | ScopeSession, "lock_wait_timeout", "31536000"}, - {ScopeGlobal | ScopeSession, "read_buffer_size", "131072"}, - {ScopeNone, "innodb_read_io_threads", "4"}, - {ScopeGlobal | ScopeSession, MaxSpRecursionDepth, "0"}, - {ScopeNone, "ignore_builtin_innodb", "0"}, - {ScopeGlobal, "slow_query_log_file", "/usr/local/mysql/data/localhost-slow.log"}, - {ScopeGlobal, "innodb_thread_sleep_delay", "10000"}, - {ScopeNone, "license", "Apache License 2.0"}, - {ScopeGlobal, "innodb_ft_aux_table", ""}, - {ScopeGlobal | ScopeSession, SQLWarnings, "0"}, - {ScopeGlobal | ScopeSession, KeepFilesOnCreate, "0"}, - {ScopeNone, "innodb_data_file_path", "ibdata1:12M:autoextend"}, - {ScopeNone, "performance_schema_setup_actors_size", "100"}, - {ScopeNone, "innodb_additional_mem_pool_size", "8388608"}, - {ScopeNone, "log_error", "/usr/local/mysql/data/localhost.err"}, - {ScopeGlobal, "binlog_stmt_cache_size", "32768"}, - {ScopeNone, "relay_log_info_file", "relay-log.info"}, - {ScopeNone, "innodb_ft_total_cache_size", "640000000"}, - {ScopeNone, "performance_schema_max_rwlock_instances", "9102"}, - {ScopeGlobal, "table_open_cache", "2000"}, - {ScopeNone, "performance_schema_events_stages_history_long_size", "10000"}, - {ScopeGlobal | ScopeSession, AutoCommit, "1"}, - {ScopeSession, "insert_id", ""}, - {ScopeGlobal | ScopeSession, "default_tmp_storage_engine", "InnoDB"}, - {ScopeGlobal | ScopeSession, "optimizer_search_depth", "62"}, - {ScopeGlobal, "max_points_in_geometry", ""}, - {ScopeGlobal, "innodb_stats_sample_pages", "8"}, - {ScopeGlobal | ScopeSession, "profiling_history_size", "15"}, - {ScopeGlobal | ScopeSession, "character_set_database", mysql.DefaultCharset}, - {ScopeNone, "have_symlink", "YES"}, - {ScopeGlobal | ScopeSession, "storage_engine", "InnoDB"}, - {ScopeGlobal | ScopeSession, "sql_log_off", "0"}, + {Scope: ScopeGlobal, Name: "gtid_mode", Value: "OFF", Type: TypeBool}, + {Scope: ScopeGlobal, Name: FlushTime, Value: "0"}, + {Scope: ScopeNone, Name: "performance_schema_max_mutex_classes", Value: "200"}, + {Scope: ScopeGlobal | ScopeSession, Name: LowPriorityUpdates, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: SessionTrackGtids, Value: "OFF"}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_max_rows", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_option", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: OldPasswords, Value: "0"}, + {Scope: ScopeNone, Name: "innodb_version", Value: "5.6.25"}, + {Scope: ScopeGlobal, Name: MaxConnections, Value: "151"}, + {Scope: ScopeGlobal | ScopeSession, Name: BigTables, Value: "0"}, + {Scope: ScopeNone, Name: "skip_external_locking", Value: "1"}, + {Scope: ScopeNone, Name: "innodb_sync_array_size", Value: "1"}, + {Scope: ScopeSession, Name: "rand_seed2", Value: ""}, + {Scope: ScopeGlobal, Name: ValidatePasswordCheckUserName, Value: "0"}, + {Scope: ScopeGlobal, Name: "validate_password_number_count", Value: "1"}, + {Scope: ScopeSession, Name: "gtid_next", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLSelectLimit, Value: "18446744073709551615"}, + {Scope: ScopeGlobal, Name: "ndb_show_foreign_key_mock_tables", Value: ""}, + {Scope: ScopeNone, Name: "multi_range_count", Value: "256"}, + {Scope: ScopeGlobal | ScopeSession, Name: DefaultWeekFormat, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "binlog_error_action", Value: "IGNORE_ERROR"}, + {Scope: ScopeGlobal | ScopeSession, Name: "default_storage_engine", Value: "InnoDB"}, + {Scope: ScopeNone, Name: "ft_query_expansion_limit", Value: "20"}, + {Scope: ScopeGlobal, Name: MaxConnectErrors, Value: "100"}, + {Scope: ScopeGlobal, Name: SyncBinlog, Value: "0"}, + {Scope: ScopeNone, Name: "max_digest_length", Value: "1024"}, + {Scope: ScopeNone, Name: "innodb_force_load_corrupted", Value: "0"}, + {Scope: ScopeNone, Name: "performance_schema_max_table_handles", Value: "4000"}, + {Scope: ScopeGlobal, Name: InnodbFastShutdown, Value: "1"}, + {Scope: ScopeNone, Name: "ft_max_word_len", Value: "84"}, + {Scope: ScopeGlobal, Name: "log_backward_compatible_user_definitions", Value: ""}, + {Scope: ScopeNone, Name: "lc_messages_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"}, + {Scope: ScopeGlobal, Name: "ft_boolean_syntax", Value: "+ -><()~*:\"\"&|"}, + {Scope: ScopeGlobal, Name: TableDefinitionCache, Value: "-1"}, + {Scope: ScopeNone, Name: SkipNameResolve, Value: "0"}, + {Scope: ScopeNone, Name: "performance_schema_max_file_handles", Value: "32768"}, + {Scope: ScopeSession, Name: "transaction_allow_batching", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLModeVar, Value: mysql.DefaultSQLMode}, + {Scope: ScopeNone, Name: "performance_schema_max_statement_classes", Value: "168"}, + {Scope: ScopeGlobal, Name: "server_id", Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_flushing_avg_loops", Value: "30"}, + {Scope: ScopeGlobal | ScopeSession, Name: TmpTableSize, Value: "16777216"}, + {Scope: ScopeGlobal, Name: "innodb_max_purge_lag", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "preload_buffer_size", Value: "32768"}, + {Scope: ScopeGlobal, Name: CheckProxyUsers, Value: "0"}, + {Scope: ScopeNone, Name: "have_query_cache", Value: "YES"}, + {Scope: ScopeGlobal, Name: "innodb_flush_log_at_timeout", Value: "1"}, + {Scope: ScopeGlobal, Name: "innodb_max_undo_log_size", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "range_alloc_block_size", Value: "4096"}, + {Scope: ScopeGlobal, Name: ConnectTimeout, Value: "10"}, + {Scope: ScopeGlobal | ScopeSession, Name: MaxExecutionTime, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: CollationServer, Value: mysql.DefaultCollationName}, + {Scope: ScopeNone, Name: "have_rtree_keys", Value: "YES"}, + {Scope: ScopeGlobal, Name: "innodb_old_blocks_pct", Value: "37"}, + {Scope: ScopeGlobal, Name: "innodb_file_format", Value: "Antelope"}, + {Scope: ScopeGlobal, Name: "innodb_compression_failure_threshold_pct", Value: "5"}, + {Scope: ScopeNone, Name: "performance_schema_events_waits_history_long_size", Value: "10000"}, + {Scope: ScopeGlobal, Name: "innodb_checksum_algorithm", Value: "innodb"}, + {Scope: ScopeNone, Name: "innodb_ft_sort_pll_degree", Value: "2"}, + {Scope: ScopeNone, Name: "thread_stack", Value: "262144"}, + {Scope: ScopeGlobal, Name: "relay_log_info_repository", Value: "FILE"}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLLogBin, Value: "1"}, + {Scope: ScopeGlobal, Name: SuperReadOnly, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_delayed_threads", Value: "20"}, + {Scope: ScopeNone, Name: "protocol_version", Value: "10"}, + {Scope: ScopeGlobal | ScopeSession, Name: "new", Value: "OFF"}, + {Scope: ScopeGlobal | ScopeSession, Name: "myisam_sort_buffer_size", Value: "8388608"}, + {Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_offset", Value: "-1"}, + {Scope: ScopeGlobal, Name: InnodbBufferPoolDumpAtShutdown, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLNotes, Value: "1"}, + {Scope: ScopeGlobal, Name: InnodbCmpPerIndexEnabled, Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_ft_server_stopword_table", Value: ""}, + {Scope: ScopeNone, Name: "performance_schema_max_file_instances", Value: "7693"}, + {Scope: ScopeNone, Name: "log_output", Value: "FILE"}, + {Scope: ScopeGlobal, Name: "binlog_group_commit_sync_delay", Value: ""}, + {Scope: ScopeGlobal, Name: "binlog_group_commit_sync_no_delay_count", Value: ""}, + {Scope: ScopeNone, Name: "have_crypt", Value: "YES"}, + {Scope: ScopeGlobal, Name: "innodb_log_write_ahead_size", Value: ""}, + {Scope: ScopeNone, Name: "innodb_log_group_home_dir", Value: "./"}, + {Scope: ScopeNone, Name: "performance_schema_events_statements_history_size", Value: "10"}, + {Scope: ScopeGlobal, Name: GeneralLog, Value: "0"}, + {Scope: ScopeGlobal, Name: "validate_password_dictionary_file", Value: ""}, + {Scope: ScopeGlobal, Name: BinlogOrderCommits, Value: "1"}, + {Scope: ScopeGlobal, Name: "key_cache_division_limit", Value: "100"}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_insert_delayed_threads", Value: "20"}, + {Scope: ScopeNone, Name: "performance_schema_session_connect_attrs_size", Value: "512"}, + {Scope: ScopeGlobal | ScopeSession, Name: "time_zone", Value: "SYSTEM"}, + {Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct", Value: "75"}, + {Scope: ScopeGlobal, Name: InnodbFilePerTable, Value: "1"}, + {Scope: ScopeGlobal, Name: InnodbLogCompressedPages, Value: "1"}, + {Scope: ScopeNone, Name: "skip_networking", Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_monitor_reset", Value: ""}, + {Scope: ScopeNone, Name: "have_ssl", Value: "DISABLED"}, + {Scope: ScopeNone, Name: "have_openssl", Value: "DISABLED"}, + {Scope: ScopeNone, Name: "ssl_ca", Value: ""}, + {Scope: ScopeNone, Name: "ssl_cert", Value: ""}, + {Scope: ScopeNone, Name: "ssl_key", Value: ""}, + {Scope: ScopeNone, Name: "ssl_cipher", Value: ""}, + {Scope: ScopeNone, Name: "tls_version", Value: "TLSv1,TLSv1.1,TLSv1.2"}, + {Scope: ScopeNone, Name: "system_time_zone", Value: "CST"}, + {Scope: ScopeGlobal, Name: InnodbPrintAllDeadlocks, Value: "0"}, + {Scope: ScopeNone, Name: "innodb_autoinc_lock_mode", Value: "1"}, + {Scope: ScopeGlobal, Name: "key_buffer_size", Value: "8388608"}, + {Scope: ScopeGlobal | ScopeSession, Name: ForeignKeyChecks, Value: "OFF"}, + {Scope: ScopeGlobal, Name: "host_cache_size", Value: "279"}, + {Scope: ScopeGlobal, Name: DelayKeyWrite, Value: "ON"}, + {Scope: ScopeNone, Name: "metadata_locks_cache_size", Value: "1024"}, + {Scope: ScopeNone, Name: "innodb_force_recovery", Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_file_format_max", Value: "Antelope"}, + {Scope: ScopeGlobal | ScopeSession, Name: "debug", Value: ""}, + {Scope: ScopeGlobal, Name: "log_warnings", Value: "1"}, + {Scope: ScopeGlobal, Name: OfflineMode, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: InnodbStrictMode, Value: "1"}, + {Scope: ScopeGlobal, Name: "innodb_rollback_segments", Value: "128"}, + {Scope: ScopeGlobal | ScopeSession, Name: "join_buffer_size", Value: "262144"}, + {Scope: ScopeNone, Name: "innodb_mirrored_log_groups", Value: "1"}, + {Scope: ScopeGlobal, Name: "max_binlog_size", Value: "1073741824"}, + {Scope: ScopeGlobal, Name: "concurrent_insert", Value: "AUTO"}, + {Scope: ScopeGlobal, Name: InnodbAdaptiveHashIndex, Value: "1"}, + {Scope: ScopeGlobal, Name: InnodbFtEnableStopword, Value: "1"}, + {Scope: ScopeGlobal, Name: "general_log_file", Value: "/usr/local/mysql/data/localhost.log"}, + {Scope: ScopeGlobal | ScopeSession, Name: InnodbSupportXA, Value: "1"}, + {Scope: ScopeGlobal, Name: "innodb_compression_level", Value: "6"}, + {Scope: ScopeNone, Name: "innodb_file_format_check", Value: "1"}, + {Scope: ScopeNone, Name: "myisam_mmap_size", Value: "18446744073709551615"}, + {Scope: ScopeNone, Name: "innodb_buffer_pool_instances", Value: "8"}, + {Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: "aes-128-ecb"}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_length_for_sort_data", Value: "1024"}, + {Scope: ScopeNone, Name: "character_set_system", Value: "utf8"}, + {Scope: ScopeGlobal | ScopeSession, Name: InteractiveTimeout, Value: "28800"}, + {Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: "0"}, + {Scope: ScopeNone, Name: "character_sets_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"}, + {Scope: ScopeGlobal | ScopeSession, Name: QueryCacheType, Value: "OFF"}, + {Scope: ScopeNone, Name: "innodb_rollback_on_timeout", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "query_alloc_block_size", Value: "8192"}, + {Scope: ScopeGlobal | ScopeSession, Name: InitConnect, Value: ""}, + {Scope: ScopeNone, Name: "have_compress", Value: "YES"}, + {Scope: ScopeNone, Name: "thread_concurrency", Value: "10"}, + {Scope: ScopeGlobal | ScopeSession, Name: "query_prealloc_size", Value: "8192"}, + {Scope: ScopeNone, Name: "relay_log_space_limit", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: MaxUserConnections, Value: "0"}, + {Scope: ScopeNone, Name: "performance_schema_max_thread_classes", Value: "50"}, + {Scope: ScopeGlobal, Name: "innodb_api_trx_level", Value: "0"}, + {Scope: ScopeNone, Name: "disconnect_on_expired_password", Value: "1"}, + {Scope: ScopeNone, Name: "performance_schema_max_file_classes", Value: "50"}, + {Scope: ScopeGlobal, Name: "expire_logs_days", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: BinlogRowQueryLogEvents, Value: "0"}, + {Scope: ScopeGlobal, Name: "default_password_lifetime", Value: ""}, + {Scope: ScopeNone, Name: "pid_file", Value: "/usr/local/mysql/data/localhost.pid"}, + {Scope: ScopeNone, Name: "innodb_undo_tablespaces", Value: "0"}, + {Scope: ScopeGlobal, Name: InnodbStatusOutputLocks, Value: "0"}, + {Scope: ScopeNone, Name: "performance_schema_accounts_size", Value: "100"}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_error_count", Value: "64"}, + {Scope: ScopeGlobal, Name: "max_write_lock_count", Value: "18446744073709551615"}, + {Scope: ScopeNone, Name: "performance_schema_max_socket_instances", Value: "322"}, + {Scope: ScopeNone, Name: "performance_schema_max_table_instances", Value: "12500"}, + {Scope: ScopeGlobal, Name: "innodb_stats_persistent_sample_pages", Value: "20"}, + {Scope: ScopeGlobal, Name: "show_compatibility_56", Value: ""}, + {Scope: ScopeNone, Name: "innodb_open_files", Value: "2000"}, + {Scope: ScopeGlobal, Name: "innodb_spin_wait_delay", Value: "6"}, + {Scope: ScopeGlobal, Name: "thread_cache_size", Value: "9"}, + {Scope: ScopeGlobal, Name: LogSlowAdminStatements, Value: "0"}, + {Scope: ScopeNone, Name: "innodb_checksums", Value: "ON"}, + {Scope: ScopeNone, Name: "hostname", Value: ServerHostname}, + {Scope: ScopeGlobal | ScopeSession, Name: "auto_increment_offset", Value: "1"}, + {Scope: ScopeNone, Name: "ft_stopword_file", Value: "(built-in)"}, + {Scope: ScopeGlobal, Name: "innodb_max_dirty_pages_pct_lwm", Value: "0"}, + {Scope: ScopeGlobal, Name: LogQueriesNotUsingIndexes, Value: "0"}, + {Scope: ScopeSession, Name: "timestamp", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: QueryCacheWlockInvalidate, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "sql_buffer_result", Value: "OFF"}, + {Scope: ScopeGlobal | ScopeSession, Name: "character_set_filesystem", Value: "binary"}, + {Scope: ScopeGlobal | ScopeSession, Name: "collation_database", Value: mysql.DefaultCollationName}, + {Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementIncrement, Value: strconv.FormatInt(DefAutoIncrementIncrement, 10)}, + {Scope: ScopeGlobal | ScopeSession, Name: AutoIncrementOffset, Value: strconv.FormatInt(DefAutoIncrementOffset, 10)}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_heap_table_size", Value: "16777216"}, + {Scope: ScopeGlobal | ScopeSession, Name: "div_precision_increment", Value: "4"}, + {Scope: ScopeGlobal, Name: "innodb_lru_scan_depth", Value: "1024"}, + {Scope: ScopeGlobal, Name: "innodb_purge_rseg_truncate_frequency", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLAutoIsNull, Value: "0"}, + {Scope: ScopeNone, Name: "innodb_api_enable_binlog", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "innodb_ft_user_stopword_table", Value: ""}, + {Scope: ScopeNone, Name: "server_id_bits", Value: "32"}, + {Scope: ScopeGlobal, Name: "innodb_log_checksum_algorithm", Value: ""}, + {Scope: ScopeNone, Name: "innodb_buffer_pool_load_at_startup", Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: "sort_buffer_size", Value: "262144"}, + {Scope: ScopeGlobal, Name: "innodb_flush_neighbors", Value: "1"}, + {Scope: ScopeNone, Name: "innodb_use_sys_malloc", Value: "1"}, + {Scope: ScopeSession, Name: PluginLoad, Value: ""}, + {Scope: ScopeSession, Name: PluginDir, Value: "/data/deploy/plugin"}, + {Scope: ScopeNone, Name: "performance_schema_max_socket_classes", Value: "10"}, + {Scope: ScopeNone, Name: "performance_schema_max_stage_classes", Value: "150"}, + {Scope: ScopeGlobal, Name: "innodb_purge_batch_size", Value: "300"}, + {Scope: ScopeNone, Name: "have_profiling", Value: "NO"}, + {Scope: ScopeGlobal | ScopeSession, Name: "character_set_client", Value: mysql.DefaultCharset}, + {Scope: ScopeGlobal, Name: InnodbBufferPoolDumpNow, Value: "0"}, + {Scope: ScopeGlobal, Name: RelayLogPurge, Value: "1"}, + {Scope: ScopeGlobal, Name: "ndb_distribution", Value: ""}, + {Scope: ScopeGlobal, Name: "myisam_data_pointer_size", Value: "6"}, + {Scope: ScopeGlobal, Name: "ndb_optimization_delay", Value: ""}, + {Scope: ScopeGlobal, Name: "innodb_ft_num_word_optimize", Value: "2000"}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_join_size", Value: "18446744073709551615"}, + {Scope: ScopeNone, Name: CoreFile, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_seeks_for_key", Value: "18446744073709551615"}, + {Scope: ScopeNone, Name: "innodb_log_buffer_size", Value: "8388608"}, + {Scope: ScopeGlobal, Name: "delayed_insert_timeout", Value: "300"}, + {Scope: ScopeGlobal, Name: "max_relay_log_size", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: MaxSortLength, Value: "1024"}, + {Scope: ScopeNone, Name: "metadata_locks_hash_instances", Value: "8"}, + {Scope: ScopeGlobal, Name: "ndb_eventbuffer_free_percent", Value: ""}, + {Scope: ScopeNone, Name: "large_files_support", Value: "1"}, + {Scope: ScopeGlobal, Name: "binlog_max_flush_queue_time", Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_fill_factor", Value: ""}, + {Scope: ScopeGlobal, Name: "log_syslog_facility", Value: ""}, + {Scope: ScopeNone, Name: "innodb_ft_min_token_size", Value: "3"}, + {Scope: ScopeGlobal | ScopeSession, Name: "transaction_write_set_extraction", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndb_blob_write_batch_bytes", Value: ""}, + {Scope: ScopeGlobal, Name: "automatic_sp_privileges", Value: "1"}, + {Scope: ScopeGlobal, Name: "innodb_flush_sync", Value: ""}, + {Scope: ScopeNone, Name: "performance_schema_events_statements_history_long_size", Value: "10000"}, + {Scope: ScopeGlobal, Name: "innodb_monitor_disable", Value: ""}, + {Scope: ScopeNone, Name: "innodb_doublewrite", Value: "1"}, + {Scope: ScopeNone, Name: "log_bin_use_v1_row_events", Value: "0"}, + {Scope: ScopeSession, Name: "innodb_optimize_point_storage", Value: ""}, + {Scope: ScopeNone, Name: "innodb_api_disable_rowlock", Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_adaptive_flushing_lwm", Value: "10"}, + {Scope: ScopeNone, Name: "innodb_log_files_in_group", Value: "2"}, + {Scope: ScopeGlobal, Name: InnodbBufferPoolLoadNow, Value: "0"}, + {Scope: ScopeNone, Name: "performance_schema_max_rwlock_classes", Value: "40"}, + {Scope: ScopeNone, Name: "binlog_gtid_simple_recovery", Value: "1"}, + {Scope: ScopeNone, Name: Port, Value: "4000"}, + {Scope: ScopeNone, Name: "performance_schema_digests_size", Value: "10000"}, + {Scope: ScopeGlobal | ScopeSession, Name: Profiling, Value: "0"}, + {Scope: ScopeNone, Name: "lower_case_table_names", Value: "2"}, + {Scope: ScopeSession, Name: "rand_seed1", Value: ""}, + {Scope: ScopeGlobal, Name: "sha256_password_proxy_users", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLQuoteShowCreate, Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: "binlogging_impossible_mode", Value: "IGNORE_ERROR"}, + {Scope: ScopeGlobal | ScopeSession, Name: QueryCacheSize, Value: "1048576"}, + {Scope: ScopeGlobal, Name: "innodb_stats_transient_sample_pages", Value: "8"}, + {Scope: ScopeGlobal, Name: InnodbStatsOnMetadata, Value: "0"}, + {Scope: ScopeNone, Name: "server_uuid", Value: "00000000-0000-0000-0000-000000000000"}, + {Scope: ScopeNone, Name: "open_files_limit", Value: "5000"}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndb_force_send", Value: ""}, + {Scope: ScopeNone, Name: "skip_show_database", Value: "0"}, + {Scope: ScopeGlobal, Name: "log_timestamps", Value: ""}, + {Scope: ScopeNone, Name: "version_compile_machine", Value: "x86_64"}, + {Scope: ScopeGlobal, Name: "event_scheduler", Value: "OFF"}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndb_deferred_constraints", Value: ""}, + {Scope: ScopeGlobal, Name: "log_syslog_include_pid", Value: ""}, + {Scope: ScopeSession, Name: "last_insert_id", Value: ""}, + {Scope: ScopeNone, Name: "innodb_ft_cache_size", Value: "8000000"}, + {Scope: ScopeNone, Name: LogBin, Value: "0"}, + {Scope: ScopeGlobal, Name: InnodbDisableSortFileCache, Value: "0"}, + {Scope: ScopeGlobal, Name: "log_error_verbosity", Value: ""}, + {Scope: ScopeNone, Name: "performance_schema_hosts_size", Value: "100"}, + {Scope: ScopeGlobal, Name: "innodb_replication_delay", Value: "0"}, + {Scope: ScopeGlobal, Name: SlowQueryLog, Value: "0"}, + {Scope: ScopeSession, Name: "debug_sync", Value: ""}, + {Scope: ScopeGlobal, Name: InnodbStatsAutoRecalc, Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: "lc_messages", Value: "en_US"}, + {Scope: ScopeGlobal | ScopeSession, Name: "bulk_insert_buffer_size", Value: "8388608"}, + {Scope: ScopeGlobal | ScopeSession, Name: BinlogDirectNonTransactionalUpdates, Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_change_buffering", Value: "all"}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLBigSelects, Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: CharacterSetResults, Value: mysql.DefaultCharset}, + {Scope: ScopeGlobal, Name: "innodb_max_purge_lag_delay", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "session_track_schema", Value: ""}, + {Scope: ScopeGlobal, Name: "innodb_io_capacity_max", Value: "2000"}, + {Scope: ScopeGlobal, Name: "innodb_autoextend_increment", Value: "64"}, + {Scope: ScopeGlobal | ScopeSession, Name: "binlog_format", Value: "STATEMENT"}, + {Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace", Value: "enabled=off,one_line=off"}, + {Scope: ScopeGlobal | ScopeSession, Name: "read_rnd_buffer_size", Value: "262144"}, + {Scope: ScopeNone, Name: "version_comment", Value: "TiDB Server (Apache License 2.0) " + versioninfo.TiDBEdition + " Edition, MySQL 5.7 compatible"}, + {Scope: ScopeGlobal | ScopeSession, Name: NetWriteTimeout, Value: "60"}, + {Scope: ScopeGlobal, Name: InnodbBufferPoolLoadAbort, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: TxnIsolation, Value: "REPEATABLE-READ"}, + {Scope: ScopeGlobal | ScopeSession, Name: TransactionIsolation, Value: "REPEATABLE-READ"}, + {Scope: ScopeGlobal | ScopeSession, Name: "collation_connection", Value: mysql.DefaultCollationName}, + {Scope: ScopeGlobal | ScopeSession, Name: "transaction_prealloc_size", Value: "4096"}, + {Scope: ScopeNone, Name: "performance_schema_setup_objects_size", Value: "100"}, + {Scope: ScopeGlobal, Name: "sync_relay_log", Value: "10000"}, + {Scope: ScopeGlobal, Name: "innodb_ft_result_cache_limit", Value: "2000000000"}, + {Scope: ScopeNone, Name: "innodb_sort_buffer_size", Value: "1048576"}, + {Scope: ScopeGlobal, Name: "innodb_ft_enable_diag_print", Value: "OFF"}, + {Scope: ScopeNone, Name: "thread_handling", Value: "one-thread-per-connection"}, + {Scope: ScopeGlobal, Name: "stored_program_cache", Value: "256"}, + {Scope: ScopeNone, Name: "performance_schema_max_mutex_instances", Value: "15906"}, + {Scope: ScopeGlobal, Name: "innodb_adaptive_max_sleep_delay", Value: "150000"}, + {Scope: ScopeNone, Name: "large_pages", Value: "OFF"}, + {Scope: ScopeGlobal | ScopeSession, Name: "session_track_system_variables", Value: ""}, + {Scope: ScopeGlobal, Name: "innodb_change_buffer_max_size", Value: "25"}, + {Scope: ScopeGlobal, Name: LogBinTrustFunctionCreators, Value: "0"}, + {Scope: ScopeNone, Name: "innodb_write_io_threads", Value: "4"}, + {Scope: ScopeGlobal, Name: "mysql_native_password_proxy_users", Value: ""}, + {Scope: ScopeGlobal, Name: serverReadOnly, Value: "0"}, + {Scope: ScopeNone, Name: "large_page_size", Value: "0"}, + {Scope: ScopeNone, Name: "table_open_cache_instances", Value: "1"}, + {Scope: ScopeGlobal, Name: InnodbStatsPersistent, Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: "session_track_state_change", Value: ""}, + {Scope: ScopeNone, Name: "optimizer_switch", Value: "index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on"}, + {Scope: ScopeGlobal, Name: "delayed_queue_size", Value: "1000"}, + {Scope: ScopeNone, Name: "innodb_read_only", Value: "0"}, + {Scope: ScopeNone, Name: "datetime_format", Value: "%Y-%m-%d %H:%i:%s"}, + {Scope: ScopeGlobal, Name: "log_syslog", Value: ""}, + {Scope: ScopeNone, Name: "version", Value: mysql.ServerVersion}, + {Scope: ScopeGlobal | ScopeSession, Name: "transaction_alloc_block_size", Value: "8192"}, + {Scope: ScopeGlobal, Name: "innodb_large_prefix", Value: "OFF"}, + {Scope: ScopeNone, Name: "performance_schema_max_cond_classes", Value: "80"}, + {Scope: ScopeGlobal, Name: "innodb_io_capacity", Value: "200"}, + {Scope: ScopeGlobal, Name: "max_binlog_cache_size", Value: "18446744073709547520"}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndb_index_stat_enable", Value: ""}, + {Scope: ScopeGlobal, Name: "executed_gtids_compression_period", Value: ""}, + {Scope: ScopeNone, Name: "time_format", Value: "%H:%i:%s"}, + {Scope: ScopeGlobal | ScopeSession, Name: OldAlterTable, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "long_query_time", Value: "10.000000"}, + {Scope: ScopeNone, Name: "innodb_use_native_aio", Value: "0"}, + {Scope: ScopeGlobal, Name: "log_throttle_queries_not_using_indexes", Value: "0"}, + {Scope: ScopeNone, Name: "locked_in_memory", Value: "0"}, + {Scope: ScopeNone, Name: "innodb_api_enable_mdl", Value: "0"}, + {Scope: ScopeGlobal, Name: "binlog_cache_size", Value: "32768"}, + {Scope: ScopeGlobal, Name: "innodb_compression_pad_pct_max", Value: "50"}, + {Scope: ScopeGlobal, Name: InnodbCommitConcurrency, Value: "0"}, + {Scope: ScopeNone, Name: "ft_min_word_len", Value: "4"}, + {Scope: ScopeGlobal, Name: EnforceGtidConsistency, Value: "OFF"}, + {Scope: ScopeGlobal, Name: SecureAuth, Value: "1"}, + {Scope: ScopeNone, Name: "max_tmp_tables", Value: "32"}, + {Scope: ScopeGlobal, Name: InnodbRandomReadAhead, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: UniqueChecks, Value: "1"}, + {Scope: ScopeGlobal, Name: "internal_tmp_disk_storage_engine", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "myisam_repair_threads", Value: "1"}, + {Scope: ScopeGlobal, Name: "ndb_eventbuffer_max_alloc", Value: ""}, + {Scope: ScopeGlobal, Name: "innodb_read_ahead_threshold", Value: "56"}, + {Scope: ScopeGlobal, Name: "key_cache_block_size", Value: "1024"}, + {Scope: ScopeNone, Name: "ndb_recv_thread_cpu_mask", Value: ""}, + {Scope: ScopeGlobal, Name: "gtid_purged", Value: ""}, + {Scope: ScopeGlobal, Name: "max_binlog_stmt_cache_size", Value: "18446744073709547520"}, + {Scope: ScopeGlobal | ScopeSession, Name: "lock_wait_timeout", Value: "31536000"}, + {Scope: ScopeGlobal | ScopeSession, Name: "read_buffer_size", Value: "131072"}, + {Scope: ScopeNone, Name: "innodb_read_io_threads", Value: "4"}, + {Scope: ScopeGlobal | ScopeSession, Name: MaxSpRecursionDepth, Value: "0"}, + {Scope: ScopeNone, Name: "ignore_builtin_innodb", Value: "0"}, + {Scope: ScopeGlobal, Name: "slow_query_log_file", Value: "/usr/local/mysql/data/localhost-slow.log"}, + {Scope: ScopeGlobal, Name: "innodb_thread_sleep_delay", Value: "10000"}, + {Scope: ScopeNone, Name: "license", Value: "Apache License 2.0"}, + {Scope: ScopeGlobal, Name: "innodb_ft_aux_table", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLWarnings, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: KeepFilesOnCreate, Value: "0"}, + {Scope: ScopeNone, Name: "innodb_data_file_path", Value: "ibdata1:12M:autoextend"}, + {Scope: ScopeNone, Name: "performance_schema_setup_actors_size", Value: "100"}, + {Scope: ScopeNone, Name: "innodb_additional_mem_pool_size", Value: "8388608"}, + {Scope: ScopeNone, Name: "log_error", Value: "/usr/local/mysql/data/localhost.err"}, + {Scope: ScopeGlobal, Name: "binlog_stmt_cache_size", Value: "32768"}, + {Scope: ScopeNone, Name: "relay_log_info_file", Value: "relay-log.info"}, + {Scope: ScopeNone, Name: "innodb_ft_total_cache_size", Value: "640000000"}, + {Scope: ScopeNone, Name: "performance_schema_max_rwlock_instances", Value: "9102"}, + {Scope: ScopeGlobal, Name: "table_open_cache", Value: "2000"}, + {Scope: ScopeNone, Name: "performance_schema_events_stages_history_long_size", Value: "10000"}, + {Scope: ScopeGlobal | ScopeSession, Name: AutoCommit, Value: "1"}, + {Scope: ScopeSession, Name: "insert_id", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "default_tmp_storage_engine", Value: "InnoDB"}, + {Scope: ScopeGlobal | ScopeSession, Name: "optimizer_search_depth", Value: "62"}, + {Scope: ScopeGlobal, Name: "max_points_in_geometry", Value: ""}, + {Scope: ScopeGlobal, Name: "innodb_stats_sample_pages", Value: "8"}, + {Scope: ScopeGlobal | ScopeSession, Name: "profiling_history_size", Value: "15"}, + {Scope: ScopeGlobal | ScopeSession, Name: "character_set_database", Value: mysql.DefaultCharset}, + {Scope: ScopeNone, Name: "have_symlink", Value: "YES"}, + {Scope: ScopeGlobal | ScopeSession, Name: "storage_engine", Value: "InnoDB"}, + {Scope: ScopeGlobal | ScopeSession, Name: "sql_log_off", Value: "0"}, // In MySQL, the default value of `explicit_defaults_for_timestamp` is `0`. // But In TiDB, it's set to `1` to be consistent with TiDB timestamp behavior. // See: https://github.com/pingcap/tidb/pull/6068 for details - {ScopeNone, "explicit_defaults_for_timestamp", "1"}, - {ScopeNone, "performance_schema_events_waits_history_size", "10"}, - {ScopeGlobal, "log_syslog_tag", ""}, - {ScopeGlobal | ScopeSession, TxReadOnly, "0"}, - {ScopeGlobal | ScopeSession, TransactionReadOnly, "0"}, - {ScopeGlobal, "innodb_undo_log_truncate", ""}, - {ScopeSession, "innodb_create_intrinsic", ""}, - {ScopeGlobal, "gtid_executed_compression_period", ""}, - {ScopeGlobal, "ndb_log_empty_epochs", ""}, - {ScopeGlobal, MaxPreparedStmtCount, strconv.FormatInt(DefMaxPreparedStmtCount, 10)}, - {ScopeNone, "have_geometry", "YES"}, - {ScopeGlobal | ScopeSession, "optimizer_trace_max_mem_size", "16384"}, - {ScopeGlobal | ScopeSession, "net_retry_count", "10"}, - {ScopeSession, "ndb_table_no_logging", ""}, - {ScopeGlobal | ScopeSession, "optimizer_trace_features", "greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on"}, - {ScopeGlobal, "innodb_flush_log_at_trx_commit", "1"}, - {ScopeGlobal, "rewriter_enabled", ""}, - {ScopeGlobal, "query_cache_min_res_unit", "4096"}, - {ScopeGlobal | ScopeSession, "updatable_views_with_limit", "YES"}, - {ScopeGlobal | ScopeSession, "optimizer_prune_level", "1"}, - {ScopeGlobal | ScopeSession, "completion_type", "NO_CHAIN"}, - {ScopeGlobal, "binlog_checksum", "CRC32"}, - {ScopeNone, "report_port", "3306"}, - {ScopeGlobal | ScopeSession, ShowOldTemporals, "0"}, - {ScopeGlobal, "query_cache_limit", "1048576"}, - {ScopeGlobal, "innodb_buffer_pool_size", "134217728"}, - {ScopeGlobal, InnodbAdaptiveFlushing, "1"}, - {ScopeNone, "datadir", "/usr/local/mysql/data/"}, - {ScopeGlobal | ScopeSession, WaitTimeout, strconv.FormatInt(DefWaitTimeout, 10)}, - {ScopeGlobal, "innodb_monitor_enable", ""}, - {ScopeNone, "date_format", "%Y-%m-%d"}, - {ScopeGlobal, "innodb_buffer_pool_filename", "ib_buffer_pool"}, - {ScopeGlobal, "slow_launch_time", "2"}, - {ScopeGlobal | ScopeSession, "ndb_use_transactions", ""}, - {ScopeNone, "innodb_purge_threads", "1"}, - {ScopeGlobal, "innodb_concurrency_tickets", "5000"}, - {ScopeGlobal, "innodb_monitor_reset_all", ""}, - {ScopeNone, "performance_schema_users_size", "100"}, - {ScopeGlobal, "ndb_log_updated_only", ""}, - {ScopeNone, "basedir", "/usr/local/mysql"}, - {ScopeGlobal, "innodb_old_blocks_time", "1000"}, - {ScopeGlobal, "innodb_stats_method", "nulls_equal"}, - {ScopeGlobal | ScopeSession, InnodbLockWaitTimeout, strconv.FormatInt(DefInnodbLockWaitTimeout, 10)}, - {ScopeGlobal, LocalInFile, "1"}, - {ScopeGlobal | ScopeSession, "myisam_stats_method", "nulls_unequal"}, - {ScopeNone, "version_compile_os", "osx10.8"}, - {ScopeNone, "relay_log_recovery", "0"}, - {ScopeNone, "old", "0"}, - {ScopeGlobal | ScopeSession, InnodbTableLocks, "1"}, - {ScopeNone, PerformanceSchema, "0"}, - {ScopeNone, "myisam_recover_options", "OFF"}, - {ScopeGlobal | ScopeSession, NetBufferLength, "16384"}, - {ScopeGlobal | ScopeSession, "binlog_row_image", "FULL"}, - {ScopeNone, "innodb_locks_unsafe_for_binlog", "0"}, - {ScopeSession, "rbr_exec_mode", ""}, - {ScopeGlobal, "myisam_max_sort_file_size", "9223372036853727232"}, - {ScopeNone, "back_log", "80"}, - {ScopeNone, "lower_case_file_system", "1"}, - {ScopeGlobal | ScopeSession, GroupConcatMaxLen, "1024"}, - {ScopeSession, "pseudo_thread_id", ""}, - {ScopeNone, "socket", "/tmp/myssock"}, - {ScopeNone, "have_dynamic_loading", "YES"}, - {ScopeGlobal, "rewriter_verbose", ""}, - {ScopeGlobal, "innodb_undo_logs", "128"}, - {ScopeNone, "performance_schema_max_cond_instances", "3504"}, - {ScopeGlobal, "delayed_insert_limit", "100"}, - {ScopeGlobal, Flush, "0"}, - {ScopeGlobal | ScopeSession, "eq_range_index_dive_limit", "10"}, - {ScopeNone, "performance_schema_events_stages_history_size", "10"}, - {ScopeGlobal | ScopeSession, "character_set_connection", mysql.DefaultCharset}, - {ScopeGlobal, MyISAMUseMmap, "0"}, - {ScopeGlobal | ScopeSession, "ndb_join_pushdown", ""}, - {ScopeGlobal | ScopeSession, CharacterSetServer, mysql.DefaultCharset}, - {ScopeGlobal, "validate_password_special_char_count", "1"}, - {ScopeNone, "performance_schema_max_thread_instances", "402"}, - {ScopeGlobal | ScopeSession, "ndbinfo_show_hidden", ""}, - {ScopeGlobal | ScopeSession, "net_read_timeout", "30"}, - {ScopeNone, "innodb_page_size", "16384"}, - {ScopeGlobal | ScopeSession, MaxAllowedPacket, "67108864"}, - {ScopeNone, "innodb_log_file_size", "50331648"}, - {ScopeGlobal, "sync_relay_log_info", "10000"}, - {ScopeGlobal | ScopeSession, "optimizer_trace_limit", "1"}, - {ScopeNone, "innodb_ft_max_token_size", "84"}, - {ScopeGlobal, "validate_password_length", "8"}, - {ScopeGlobal, "ndb_log_binlog_index", ""}, - {ScopeGlobal, "innodb_api_bk_commit_interval", "5"}, - {ScopeNone, "innodb_undo_directory", "."}, - {ScopeNone, "bind_address", "*"}, - {ScopeGlobal, "innodb_sync_spin_loops", "30"}, - {ScopeGlobal | ScopeSession, SQLSafeUpdates, "0"}, - {ScopeNone, "tmpdir", "/var/tmp/"}, - {ScopeGlobal, "innodb_thread_concurrency", "0"}, - {ScopeGlobal, "innodb_buffer_pool_dump_pct", ""}, - {ScopeGlobal | ScopeSession, "lc_time_names", "en_US"}, - {ScopeGlobal | ScopeSession, "max_statement_time", ""}, - {ScopeGlobal | ScopeSession, EndMakersInJSON, "0"}, - {ScopeGlobal, AvoidTemporalUpgrade, "0"}, - {ScopeGlobal, "key_cache_age_threshold", "300"}, - {ScopeGlobal, InnodbStatusOutput, "0"}, - {ScopeSession, "identity", ""}, - {ScopeGlobal | ScopeSession, "min_examined_row_limit", "0"}, - {ScopeGlobal, "sync_frm", "ON"}, - {ScopeGlobal, "innodb_online_alter_log_max_size", "134217728"}, - {ScopeSession, WarningCount, "0"}, - {ScopeSession, ErrorCount, "0"}, - {ScopeGlobal | ScopeSession, "information_schema_stats_expiry", "86400"}, - {ScopeGlobal, "thread_pool_size", "16"}, - {ScopeGlobal | ScopeSession, WindowingUseHighPrecision, "ON"}, + {Scope: ScopeNone, Name: "explicit_defaults_for_timestamp", Value: "1"}, + {Scope: ScopeNone, Name: "performance_schema_events_waits_history_size", Value: "10"}, + {Scope: ScopeGlobal, Name: "log_syslog_tag", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: TxReadOnly, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: TransactionReadOnly, Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_undo_log_truncate", Value: ""}, + {Scope: ScopeSession, Name: "innodb_create_intrinsic", Value: ""}, + {Scope: ScopeGlobal, Name: "gtid_executed_compression_period", Value: ""}, + {Scope: ScopeGlobal, Name: "ndb_log_empty_epochs", Value: ""}, + {Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10)}, + {Scope: ScopeNone, Name: "have_geometry", Value: "YES"}, + {Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_max_mem_size", Value: "16384"}, + {Scope: ScopeGlobal | ScopeSession, Name: "net_retry_count", Value: "10"}, + {Scope: ScopeSession, Name: "ndb_table_no_logging", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_features", Value: "greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on"}, + {Scope: ScopeGlobal, Name: "innodb_flush_log_at_trx_commit", Value: "1"}, + {Scope: ScopeGlobal, Name: "rewriter_enabled", Value: ""}, + {Scope: ScopeGlobal, Name: "query_cache_min_res_unit", Value: "4096"}, + {Scope: ScopeGlobal | ScopeSession, Name: "updatable_views_with_limit", Value: "YES"}, + {Scope: ScopeGlobal | ScopeSession, Name: "optimizer_prune_level", Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: "completion_type", Value: "NO_CHAIN"}, + {Scope: ScopeGlobal, Name: "binlog_checksum", Value: "CRC32"}, + {Scope: ScopeNone, Name: "report_port", Value: "3306"}, + {Scope: ScopeGlobal | ScopeSession, Name: ShowOldTemporals, Value: "0"}, + {Scope: ScopeGlobal, Name: "query_cache_limit", Value: "1048576"}, + {Scope: ScopeGlobal, Name: "innodb_buffer_pool_size", Value: "134217728"}, + {Scope: ScopeGlobal, Name: InnodbAdaptiveFlushing, Value: "1"}, + {Scope: ScopeNone, Name: "datadir", Value: "/usr/local/mysql/data/"}, + {Scope: ScopeGlobal | ScopeSession, Name: WaitTimeout, Value: strconv.FormatInt(DefWaitTimeout, 10)}, + {Scope: ScopeGlobal, Name: "innodb_monitor_enable", Value: ""}, + {Scope: ScopeNone, Name: "date_format", Value: "%Y-%m-%d"}, + {Scope: ScopeGlobal, Name: "innodb_buffer_pool_filename", Value: "ib_buffer_pool"}, + {Scope: ScopeGlobal, Name: "slow_launch_time", Value: "2"}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndb_use_transactions", Value: ""}, + {Scope: ScopeNone, Name: "innodb_purge_threads", Value: "1"}, + {Scope: ScopeGlobal, Name: "innodb_concurrency_tickets", Value: "5000"}, + {Scope: ScopeGlobal, Name: "innodb_monitor_reset_all", Value: ""}, + {Scope: ScopeNone, Name: "performance_schema_users_size", Value: "100"}, + {Scope: ScopeGlobal, Name: "ndb_log_updated_only", Value: ""}, + {Scope: ScopeNone, Name: "basedir", Value: "/usr/local/mysql"}, + {Scope: ScopeGlobal, Name: "innodb_old_blocks_time", Value: "1000"}, + {Scope: ScopeGlobal, Name: "innodb_stats_method", Value: "nulls_equal"}, + {Scope: ScopeGlobal | ScopeSession, Name: InnodbLockWaitTimeout, Value: strconv.FormatInt(DefInnodbLockWaitTimeout, 10)}, + {Scope: ScopeGlobal, Name: LocalInFile, Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: "myisam_stats_method", Value: "nulls_unequal"}, + {Scope: ScopeNone, Name: "version_compile_os", Value: "osx10.8"}, + {Scope: ScopeNone, Name: "relay_log_recovery", Value: "0"}, + {Scope: ScopeNone, Name: "old", Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: InnodbTableLocks, Value: "1"}, + {Scope: ScopeNone, Name: PerformanceSchema, Value: "0"}, + {Scope: ScopeNone, Name: "myisam_recover_options", Value: "OFF"}, + {Scope: ScopeGlobal | ScopeSession, Name: NetBufferLength, Value: "16384"}, + {Scope: ScopeGlobal | ScopeSession, Name: "binlog_row_image", Value: "FULL"}, + {Scope: ScopeNone, Name: "innodb_locks_unsafe_for_binlog", Value: "0"}, + {Scope: ScopeSession, Name: "rbr_exec_mode", Value: ""}, + {Scope: ScopeGlobal, Name: "myisam_max_sort_file_size", Value: "9223372036853727232"}, + {Scope: ScopeNone, Name: "back_log", Value: "80"}, + {Scope: ScopeNone, Name: "lower_case_file_system", Value: "1"}, + {Scope: ScopeGlobal | ScopeSession, Name: GroupConcatMaxLen, Value: "1024"}, + {Scope: ScopeSession, Name: "pseudo_thread_id", Value: ""}, + {Scope: ScopeNone, Name: "socket", Value: "/tmp/myssock"}, + {Scope: ScopeNone, Name: "have_dynamic_loading", Value: "YES"}, + {Scope: ScopeGlobal, Name: "rewriter_verbose", Value: ""}, + {Scope: ScopeGlobal, Name: "innodb_undo_logs", Value: "128"}, + {Scope: ScopeNone, Name: "performance_schema_max_cond_instances", Value: "3504"}, + {Scope: ScopeGlobal, Name: "delayed_insert_limit", Value: "100"}, + {Scope: ScopeGlobal, Name: Flush, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "eq_range_index_dive_limit", Value: "10"}, + {Scope: ScopeNone, Name: "performance_schema_events_stages_history_size", Value: "10"}, + {Scope: ScopeGlobal | ScopeSession, Name: "character_set_connection", Value: mysql.DefaultCharset}, + {Scope: ScopeGlobal, Name: MyISAMUseMmap, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndb_join_pushdown", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: CharacterSetServer, Value: mysql.DefaultCharset}, + {Scope: ScopeGlobal, Name: "validate_password_special_char_count", Value: "1"}, + {Scope: ScopeNone, Name: "performance_schema_max_thread_instances", Value: "402"}, + {Scope: ScopeGlobal | ScopeSession, Name: "ndbinfo_show_hidden", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "net_read_timeout", Value: "30"}, + {Scope: ScopeNone, Name: "innodb_page_size", Value: "16384"}, + {Scope: ScopeGlobal | ScopeSession, Name: MaxAllowedPacket, Value: "67108864"}, + {Scope: ScopeNone, Name: "innodb_log_file_size", Value: "50331648"}, + {Scope: ScopeGlobal, Name: "sync_relay_log_info", Value: "10000"}, + {Scope: ScopeGlobal | ScopeSession, Name: "optimizer_trace_limit", Value: "1"}, + {Scope: ScopeNone, Name: "innodb_ft_max_token_size", Value: "84"}, + {Scope: ScopeGlobal, Name: "validate_password_length", Value: "8"}, + {Scope: ScopeGlobal, Name: "ndb_log_binlog_index", Value: ""}, + {Scope: ScopeGlobal, Name: "innodb_api_bk_commit_interval", Value: "5"}, + {Scope: ScopeNone, Name: "innodb_undo_directory", Value: "."}, + {Scope: ScopeNone, Name: "bind_address", Value: "*"}, + {Scope: ScopeGlobal, Name: "innodb_sync_spin_loops", Value: "30"}, + {Scope: ScopeGlobal | ScopeSession, Name: SQLSafeUpdates, Value: "0"}, + {Scope: ScopeNone, Name: "tmpdir", Value: "/var/tmp/"}, + {Scope: ScopeGlobal, Name: "innodb_thread_concurrency", Value: "0"}, + {Scope: ScopeGlobal, Name: "innodb_buffer_pool_dump_pct", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "lc_time_names", Value: "en_US"}, + {Scope: ScopeGlobal | ScopeSession, Name: "max_statement_time", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: EndMakersInJSON, Value: "0"}, + {Scope: ScopeGlobal, Name: AvoidTemporalUpgrade, Value: "0"}, + {Scope: ScopeGlobal, Name: "key_cache_age_threshold", Value: "300"}, + {Scope: ScopeGlobal, Name: InnodbStatusOutput, Value: "0"}, + {Scope: ScopeSession, Name: "identity", Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: "min_examined_row_limit", Value: "0"}, + {Scope: ScopeGlobal, Name: "sync_frm", Value: "ON"}, + {Scope: ScopeGlobal, Name: "innodb_online_alter_log_max_size", Value: "134217728"}, + {Scope: ScopeSession, Name: WarningCount, Value: "0"}, + {Scope: ScopeSession, Name: ErrorCount, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: "information_schema_stats_expiry", Value: "86400"}, + {Scope: ScopeGlobal, Name: "thread_pool_size", Value: "16"}, + {Scope: ScopeGlobal | ScopeSession, Name: WindowingUseHighPrecision, Value: "ON"}, /* TiDB specific variables */ - {ScopeSession, TiDBSnapshot, ""}, - {ScopeSession, TiDBOptAggPushDown, BoolToIntStr(DefOptAggPushDown)}, - {ScopeGlobal | ScopeSession, TiDBOptBCJ, BoolToIntStr(DefOptBCJ)}, - {ScopeSession, TiDBOptDistinctAggPushDown, BoolToIntStr(config.GetGlobalConfig().Performance.DistinctAggPushDown)}, - {ScopeSession, TiDBOptWriteRowID, BoolToIntStr(DefOptWriteRowID)}, - {ScopeGlobal | ScopeSession, TiDBBuildStatsConcurrency, strconv.Itoa(DefBuildStatsConcurrency)}, - {ScopeGlobal, TiDBAutoAnalyzeRatio, strconv.FormatFloat(DefAutoAnalyzeRatio, 'f', -1, 64)}, - {ScopeGlobal, TiDBAutoAnalyzeStartTime, DefAutoAnalyzeStartTime}, - {ScopeGlobal, TiDBAutoAnalyzeEndTime, DefAutoAnalyzeEndTime}, - {ScopeSession, TiDBChecksumTableConcurrency, strconv.Itoa(DefChecksumTableConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBExecutorConcurrency, strconv.Itoa(DefExecutorConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBDistSQLScanConcurrency, strconv.Itoa(DefDistSQLScanConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBOptInSubqToJoinAndAgg, BoolToIntStr(DefOptInSubqToJoinAndAgg)}, - {ScopeGlobal | ScopeSession, TiDBOptCorrelationThreshold, strconv.FormatFloat(DefOptCorrelationThreshold, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptCorrelationExpFactor, strconv.Itoa(DefOptCorrelationExpFactor)}, - {ScopeGlobal | ScopeSession, TiDBOptCPUFactor, strconv.FormatFloat(DefOptCPUFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptTiFlashConcurrencyFactor, strconv.FormatFloat(DefOptTiFlashConcurrencyFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptCopCPUFactor, strconv.FormatFloat(DefOptCopCPUFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptNetworkFactor, strconv.FormatFloat(DefOptNetworkFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptScanFactor, strconv.FormatFloat(DefOptScanFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptDescScanFactor, strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptSeekFactor, strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptMemoryFactor, strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptDiskFactor, strconv.FormatFloat(DefOptDiskFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBOptConcurrencyFactor, strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64)}, - {ScopeGlobal | ScopeSession, TiDBIndexJoinBatchSize, strconv.Itoa(DefIndexJoinBatchSize)}, - {ScopeGlobal | ScopeSession, TiDBIndexLookupSize, strconv.Itoa(DefIndexLookupSize)}, - {ScopeGlobal | ScopeSession, TiDBIndexLookupConcurrency, strconv.Itoa(DefIndexLookupConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBIndexLookupJoinConcurrency, strconv.Itoa(DefIndexLookupJoinConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBIndexSerialScanConcurrency, strconv.Itoa(DefIndexSerialScanConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBSkipUTF8Check, BoolToIntStr(DefSkipUTF8Check)}, - {ScopeGlobal | ScopeSession, TiDBSkipASCIICheck, BoolToIntStr(DefSkipASCIICheck)}, - {ScopeSession, TiDBBatchInsert, BoolToIntStr(DefBatchInsert)}, - {ScopeSession, TiDBBatchDelete, BoolToIntStr(DefBatchDelete)}, - {ScopeSession, TiDBBatchCommit, BoolToIntStr(DefBatchCommit)}, - {ScopeGlobal | ScopeSession, TiDBDMLBatchSize, strconv.Itoa(DefDMLBatchSize)}, - {ScopeSession, TiDBCurrentTS, strconv.Itoa(DefCurretTS)}, - {ScopeSession, TiDBLastTxnInfo, strconv.Itoa(DefCurretTS)}, - {ScopeGlobal | ScopeSession, TiDBMaxChunkSize, strconv.Itoa(DefMaxChunkSize)}, - {ScopeGlobal | ScopeSession, TiDBAllowBatchCop, strconv.Itoa(DefTiDBAllowBatchCop)}, - {ScopeGlobal | ScopeSession, TiDBInitChunkSize, strconv.Itoa(DefInitChunkSize)}, - {ScopeGlobal | ScopeSession, TiDBEnableCascadesPlanner, "0"}, - {ScopeGlobal | ScopeSession, TiDBEnableIndexMerge, "0"}, - {ScopeSession, TIDBMemQuotaQuery, strconv.FormatInt(config.GetGlobalConfig().MemQuotaQuery, 10)}, - {ScopeSession, TIDBMemQuotaHashJoin, strconv.FormatInt(DefTiDBMemQuotaHashJoin, 10)}, - {ScopeSession, TIDBMemQuotaMergeJoin, strconv.FormatInt(DefTiDBMemQuotaMergeJoin, 10)}, - {ScopeSession, TIDBMemQuotaSort, strconv.FormatInt(DefTiDBMemQuotaSort, 10)}, - {ScopeSession, TIDBMemQuotaTopn, strconv.FormatInt(DefTiDBMemQuotaTopn, 10)}, - {ScopeSession, TIDBMemQuotaIndexLookupReader, strconv.FormatInt(DefTiDBMemQuotaIndexLookupReader, 10)}, - {ScopeSession, TIDBMemQuotaIndexLookupJoin, strconv.FormatInt(DefTiDBMemQuotaIndexLookupJoin, 10)}, - {ScopeSession, TIDBMemQuotaNestedLoopApply, strconv.FormatInt(DefTiDBMemQuotaNestedLoopApply, 10)}, - {ScopeSession, TiDBEnableStreaming, "0"}, - {ScopeSession, TiDBEnableChunkRPC, "1"}, - {ScopeSession, TxnIsolationOneShot, ""}, - {ScopeGlobal | ScopeSession, TiDBEnableTablePartition, "on"}, - {ScopeGlobal | ScopeSession, TiDBHashJoinConcurrency, strconv.Itoa(DefTiDBHashJoinConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBProjectionConcurrency, strconv.Itoa(DefTiDBProjectionConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBHashAggPartialConcurrency, strconv.Itoa(DefTiDBHashAggPartialConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBHashAggFinalConcurrency, strconv.Itoa(DefTiDBHashAggFinalConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBWindowConcurrency, strconv.Itoa(DefTiDBWindowConcurrency)}, - {ScopeGlobal | ScopeSession, TiDBEnableParallelApply, BoolToIntStr(DefTiDBEnableParallelApply)}, - {ScopeGlobal | ScopeSession, TiDBBackoffLockFast, strconv.Itoa(kv.DefBackoffLockFast)}, - {ScopeGlobal | ScopeSession, TiDBBackOffWeight, strconv.Itoa(kv.DefBackOffWeight)}, - {ScopeGlobal | ScopeSession, TiDBRetryLimit, strconv.Itoa(DefTiDBRetryLimit)}, - {ScopeGlobal | ScopeSession, TiDBDisableTxnAutoRetry, BoolToIntStr(DefTiDBDisableTxnAutoRetry)}, - {ScopeGlobal | ScopeSession, TiDBConstraintCheckInPlace, BoolToIntStr(DefTiDBConstraintCheckInPlace)}, - {ScopeGlobal | ScopeSession, TiDBTxnMode, DefTiDBTxnMode}, - {ScopeGlobal, TiDBRowFormatVersion, strconv.Itoa(DefTiDBRowFormatV1)}, - {ScopeSession, TiDBOptimizerSelectivityLevel, strconv.Itoa(DefTiDBOptimizerSelectivityLevel)}, - {ScopeGlobal | ScopeSession, TiDBEnableWindowFunction, BoolToIntStr(DefEnableWindowFunction)}, - {ScopeGlobal | ScopeSession, TiDBEnableVectorizedExpression, BoolToIntStr(DefEnableVectorizedExpression)}, - {ScopeGlobal | ScopeSession, TiDBEnableFastAnalyze, BoolToIntStr(DefTiDBUseFastAnalyze)}, - {ScopeGlobal | ScopeSession, TiDBSkipIsolationLevelCheck, BoolToIntStr(DefTiDBSkipIsolationLevelCheck)}, + {Scope: ScopeSession, Name: TiDBSnapshot, Value: ""}, + {Scope: ScopeSession, Name: TiDBOptAggPushDown, Value: BoolToIntStr(DefOptAggPushDown)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptBCJ, Value: BoolToIntStr(DefOptBCJ)}, + {Scope: ScopeSession, Name: TiDBOptDistinctAggPushDown, Value: BoolToIntStr(config.GetGlobalConfig().Performance.DistinctAggPushDown)}, + {Scope: ScopeSession, Name: TiDBOptWriteRowID, Value: BoolToIntStr(DefOptWriteRowID)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBBuildStatsConcurrency, Value: strconv.Itoa(DefBuildStatsConcurrency)}, + {Scope: ScopeGlobal, Name: TiDBAutoAnalyzeRatio, Value: strconv.FormatFloat(DefAutoAnalyzeRatio, 'f', -1, 64)}, + {Scope: ScopeGlobal, Name: TiDBAutoAnalyzeStartTime, Value: DefAutoAnalyzeStartTime}, + {Scope: ScopeGlobal, Name: TiDBAutoAnalyzeEndTime, Value: DefAutoAnalyzeEndTime}, + {Scope: ScopeSession, Name: TiDBChecksumTableConcurrency, Value: strconv.Itoa(DefChecksumTableConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBExecutorConcurrency, Value: strconv.Itoa(DefExecutorConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBDistSQLScanConcurrency, Value: strconv.Itoa(DefDistSQLScanConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptInSubqToJoinAndAgg, Value: BoolToIntStr(DefOptInSubqToJoinAndAgg)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationThreshold, Value: strconv.FormatFloat(DefOptCorrelationThreshold, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCorrelationExpFactor, Value: strconv.Itoa(DefOptCorrelationExpFactor)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCPUFactor, Value: strconv.FormatFloat(DefOptCPUFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptTiFlashConcurrencyFactor, Value: strconv.FormatFloat(DefOptTiFlashConcurrencyFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptCopCPUFactor, Value: strconv.FormatFloat(DefOptCopCPUFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptNetworkFactor, Value: strconv.FormatFloat(DefOptNetworkFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptScanFactor, Value: strconv.FormatFloat(DefOptScanFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDescScanFactor, Value: strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptSeekFactor, Value: strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptMemoryFactor, Value: strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptDiskFactor, Value: strconv.FormatFloat(DefOptDiskFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptConcurrencyFactor, Value: strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexJoinBatchSize, Value: strconv.Itoa(DefIndexJoinBatchSize)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupSize, Value: strconv.Itoa(DefIndexLookupSize)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupConcurrency, Value: strconv.Itoa(DefIndexLookupConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexLookupJoinConcurrency, Value: strconv.Itoa(DefIndexLookupJoinConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBIndexSerialScanConcurrency, Value: strconv.Itoa(DefIndexSerialScanConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipUTF8Check, Value: BoolToIntStr(DefSkipUTF8Check)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipASCIICheck, Value: BoolToIntStr(DefSkipASCIICheck)}, + {Scope: ScopeSession, Name: TiDBBatchInsert, Value: BoolToIntStr(DefBatchInsert)}, + {Scope: ScopeSession, Name: TiDBBatchDelete, Value: BoolToIntStr(DefBatchDelete)}, + {Scope: ScopeSession, Name: TiDBBatchCommit, Value: BoolToIntStr(DefBatchCommit)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBDMLBatchSize, Value: strconv.Itoa(DefDMLBatchSize)}, + {Scope: ScopeSession, Name: TiDBCurrentTS, Value: strconv.Itoa(DefCurretTS)}, + {Scope: ScopeSession, Name: TiDBLastTxnInfo, Value: strconv.Itoa(DefCurretTS)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBMaxChunkSize, Value: strconv.Itoa(DefMaxChunkSize)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowBatchCop, Value: strconv.Itoa(DefTiDBAllowBatchCop)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBInitChunkSize, Value: strconv.Itoa(DefInitChunkSize)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableCascadesPlanner, Value: "0"}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableIndexMerge, Value: "0"}, + {Scope: ScopeSession, Name: TIDBMemQuotaQuery, Value: strconv.FormatInt(config.GetGlobalConfig().MemQuotaQuery, 10)}, + {Scope: ScopeSession, Name: TIDBMemQuotaHashJoin, Value: strconv.FormatInt(DefTiDBMemQuotaHashJoin, 10)}, + {Scope: ScopeSession, Name: TIDBMemQuotaMergeJoin, Value: strconv.FormatInt(DefTiDBMemQuotaMergeJoin, 10)}, + {Scope: ScopeSession, Name: TIDBMemQuotaSort, Value: strconv.FormatInt(DefTiDBMemQuotaSort, 10)}, + {Scope: ScopeSession, Name: TIDBMemQuotaTopn, Value: strconv.FormatInt(DefTiDBMemQuotaTopn, 10)}, + {Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupReader, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupReader, 10)}, + {Scope: ScopeSession, Name: TIDBMemQuotaIndexLookupJoin, Value: strconv.FormatInt(DefTiDBMemQuotaIndexLookupJoin, 10)}, + {Scope: ScopeSession, Name: TIDBMemQuotaNestedLoopApply, Value: strconv.FormatInt(DefTiDBMemQuotaNestedLoopApply, 10)}, + {Scope: ScopeSession, Name: TiDBEnableStreaming, Value: "0"}, + {Scope: ScopeSession, Name: TiDBEnableChunkRPC, Value: "1"}, + {Scope: ScopeSession, Name: TxnIsolationOneShot, Value: ""}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableTablePartition, Value: "on"}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBHashJoinConcurrency, Value: strconv.Itoa(DefTiDBHashJoinConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBProjectionConcurrency, Value: strconv.Itoa(DefTiDBProjectionConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggPartialConcurrency, Value: strconv.Itoa(DefTiDBHashAggPartialConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBHashAggFinalConcurrency, Value: strconv.Itoa(DefTiDBHashAggFinalConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBWindowConcurrency, Value: strconv.Itoa(DefTiDBWindowConcurrency)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableParallelApply, Value: BoolToIntStr(DefTiDBEnableParallelApply)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBBackoffLockFast, Value: strconv.Itoa(kv.DefBackoffLockFast)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBBackOffWeight, Value: strconv.Itoa(kv.DefBackOffWeight)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBRetryLimit, Value: strconv.Itoa(DefTiDBRetryLimit)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBDisableTxnAutoRetry, Value: BoolToIntStr(DefTiDBDisableTxnAutoRetry)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBConstraintCheckInPlace, Value: BoolToIntStr(DefTiDBConstraintCheckInPlace)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBTxnMode, Value: DefTiDBTxnMode}, + {Scope: ScopeGlobal, Name: TiDBRowFormatVersion, Value: strconv.Itoa(DefTiDBRowFormatV1)}, + {Scope: ScopeSession, Name: TiDBOptimizerSelectivityLevel, Value: strconv.Itoa(DefTiDBOptimizerSelectivityLevel)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableWindowFunction, Value: BoolToIntStr(DefEnableWindowFunction)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableVectorizedExpression, Value: BoolToIntStr(DefEnableVectorizedExpression)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableFastAnalyze, Value: BoolToIntStr(DefTiDBUseFastAnalyze)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBSkipIsolationLevelCheck, Value: BoolToIntStr(DefTiDBSkipIsolationLevelCheck)}, /* The following variable is defined as session scope but is actually server scope. */ - {ScopeSession, TiDBGeneralLog, strconv.Itoa(DefTiDBGeneralLog)}, - {ScopeSession, TiDBPProfSQLCPU, strconv.Itoa(DefTiDBPProfSQLCPU)}, - {ScopeSession, TiDBDDLSlowOprThreshold, strconv.Itoa(DefTiDBDDLSlowOprThreshold)}, - {ScopeSession, TiDBConfig, ""}, - {ScopeGlobal, TiDBDDLReorgWorkerCount, strconv.Itoa(DefTiDBDDLReorgWorkerCount)}, - {ScopeGlobal, TiDBDDLReorgBatchSize, strconv.Itoa(DefTiDBDDLReorgBatchSize)}, - {ScopeGlobal, TiDBDDLErrorCountLimit, strconv.Itoa(DefTiDBDDLErrorCountLimit)}, - {ScopeSession, TiDBDDLReorgPriority, "PRIORITY_LOW"}, - {ScopeGlobal, TiDBMaxDeltaSchemaCount, strconv.Itoa(DefTiDBMaxDeltaSchemaCount)}, - {ScopeGlobal, TiDBEnableChangeColumnType, BoolToIntStr(DefTiDBChangeColumnType)}, - {ScopeSession, TiDBForcePriority, mysql.Priority2Str[DefTiDBForcePriority]}, - {ScopeSession, TiDBEnableRadixJoin, BoolToIntStr(DefTiDBUseRadixJoin)}, - {ScopeGlobal | ScopeSession, TiDBOptJoinReorderThreshold, strconv.Itoa(DefTiDBOptJoinReorderThreshold)}, - {ScopeSession, TiDBSlowQueryFile, ""}, - {ScopeGlobal, TiDBScatterRegion, BoolToIntStr(DefTiDBScatterRegion)}, - {ScopeSession, TiDBWaitSplitRegionFinish, BoolToIntStr(DefTiDBWaitSplitRegionFinish)}, - {ScopeSession, TiDBWaitSplitRegionTimeout, strconv.Itoa(DefWaitSplitRegionTimeout)}, - {ScopeSession, TiDBLowResolutionTSO, "0"}, - {ScopeSession, TiDBExpensiveQueryTimeThreshold, strconv.Itoa(DefTiDBExpensiveQueryTimeThreshold)}, - {ScopeGlobal | ScopeSession, TiDBEnableNoopFuncs, BoolToIntStr(DefTiDBEnableNoopFuncs)}, - {ScopeSession, TiDBReplicaRead, "leader"}, - {ScopeSession, TiDBAllowRemoveAutoInc, BoolToIntStr(DefTiDBAllowRemoveAutoInc)}, - {ScopeGlobal | ScopeSession, TiDBEnableStmtSummary, BoolToIntStr(config.GetGlobalConfig().StmtSummary.Enable)}, - {ScopeGlobal | ScopeSession, TiDBStmtSummaryInternalQuery, BoolToIntStr(config.GetGlobalConfig().StmtSummary.EnableInternalQuery)}, - {ScopeGlobal | ScopeSession, TiDBStmtSummaryRefreshInterval, strconv.Itoa(config.GetGlobalConfig().StmtSummary.RefreshInterval)}, - {ScopeGlobal | ScopeSession, TiDBStmtSummaryHistorySize, strconv.Itoa(config.GetGlobalConfig().StmtSummary.HistorySize)}, - {ScopeGlobal | ScopeSession, TiDBStmtSummaryMaxStmtCount, strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxStmtCount), 10)}, - {ScopeGlobal | ScopeSession, TiDBStmtSummaryMaxSQLLength, strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxSQLLength), 10)}, - {ScopeGlobal | ScopeSession, TiDBCapturePlanBaseline, "off"}, - {ScopeGlobal | ScopeSession, TiDBUsePlanBaselines, boolToOnOff(DefTiDBUsePlanBaselines)}, - {ScopeGlobal | ScopeSession, TiDBEvolvePlanBaselines, boolToOnOff(DefTiDBEvolvePlanBaselines)}, - {ScopeGlobal, TiDBEvolvePlanTaskMaxTime, strconv.Itoa(DefTiDBEvolvePlanTaskMaxTime)}, - {ScopeGlobal, TiDBEvolvePlanTaskStartTime, DefTiDBEvolvePlanTaskStartTime}, - {ScopeGlobal, TiDBEvolvePlanTaskEndTime, DefTiDBEvolvePlanTaskEndTime}, - {ScopeSession, TiDBIsolationReadEngines, strings.Join(config.GetGlobalConfig().IsolationRead.Engines, ", ")}, - {ScopeGlobal | ScopeSession, TiDBStoreLimit, strconv.FormatInt(atomic.LoadInt64(&config.GetGlobalConfig().TiKVClient.StoreLimit), 10)}, - {ScopeSession, TiDBMetricSchemaStep, strconv.Itoa(DefTiDBMetricSchemaStep)}, - {ScopeSession, TiDBMetricSchemaRangeDuration, strconv.Itoa(DefTiDBMetricSchemaRangeDuration)}, - {ScopeSession, TiDBSlowLogThreshold, strconv.Itoa(logutil.DefaultSlowThreshold)}, - {ScopeSession, TiDBRecordPlanInSlowLog, strconv.Itoa(logutil.DefaultRecordPlanInSlowLog)}, - {ScopeSession, TiDBEnableSlowLog, BoolToIntStr(logutil.DefaultTiDBEnableSlowLog)}, - {ScopeSession, TiDBQueryLogMaxLen, strconv.Itoa(logutil.DefaultQueryLogMaxLen)}, - {ScopeSession, TiDBCheckMb4ValueInUTF8, BoolToIntStr(config.GetGlobalConfig().CheckMb4ValueInUTF8)}, - {ScopeSession, TiDBFoundInPlanCache, BoolToIntStr(DefTiDBFoundInPlanCache)}, - {ScopeSession, TiDBEnableCollectExecutionInfo, BoolToIntStr(DefTiDBEnableCollectExecutionInfo)}, - {ScopeGlobal | ScopeSession, TiDBAllowAutoRandExplicitInsert, boolToOnOff(DefTiDBAllowAutoRandExplicitInsert)}, - {ScopeGlobal | ScopeSession, TiDBEnableClusteredIndex, BoolToIntStr(DefTiDBEnableClusteredIndex)}, - {ScopeGlobal | ScopeSession, TiDBPartitionPruneMode, string(StaticOnly)}, - {ScopeGlobal, TiDBSlowLogMasking, BoolToIntStr(DefTiDBSlowLogMasking)}, - {ScopeGlobal, TiDBRedactLog, strconv.Itoa(config.DefTiDBRedactLog)}, - {ScopeGlobal | ScopeSession, TiDBShardAllocateStep, strconv.Itoa(DefTiDBShardAllocateStep)}, - {ScopeGlobal, TiDBEnableTelemetry, BoolToIntStr(DefTiDBEnableTelemetry)}, - {ScopeGlobal | ScopeSession, TiDBEnableAmendPessimisticTxn, boolToOnOff(DefTiDBEnableAmendPessimisticTxn)}, + {Scope: ScopeSession, Name: TiDBGeneralLog, Value: strconv.Itoa(DefTiDBGeneralLog)}, + {Scope: ScopeSession, Name: TiDBPProfSQLCPU, Value: strconv.Itoa(DefTiDBPProfSQLCPU)}, + {Scope: ScopeSession, Name: TiDBDDLSlowOprThreshold, Value: strconv.Itoa(DefTiDBDDLSlowOprThreshold)}, + {Scope: ScopeSession, Name: TiDBConfig, Value: ""}, + {Scope: ScopeGlobal, Name: TiDBDDLReorgWorkerCount, Value: strconv.Itoa(DefTiDBDDLReorgWorkerCount)}, + {Scope: ScopeGlobal, Name: TiDBDDLReorgBatchSize, Value: strconv.Itoa(DefTiDBDDLReorgBatchSize)}, + {Scope: ScopeGlobal, Name: TiDBDDLErrorCountLimit, Value: strconv.Itoa(DefTiDBDDLErrorCountLimit)}, + {Scope: ScopeSession, Name: TiDBDDLReorgPriority, Value: "PRIORITY_LOW"}, + {Scope: ScopeGlobal, Name: TiDBMaxDeltaSchemaCount, Value: strconv.Itoa(DefTiDBMaxDeltaSchemaCount)}, + {Scope: ScopeGlobal, Name: TiDBEnableChangeColumnType, Value: BoolToIntStr(DefTiDBChangeColumnType)}, + {Scope: ScopeSession, Name: TiDBForcePriority, Value: mysql.Priority2Str[DefTiDBForcePriority]}, + {Scope: ScopeSession, Name: TiDBEnableRadixJoin, Value: BoolToIntStr(DefTiDBUseRadixJoin)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptJoinReorderThreshold, Value: strconv.Itoa(DefTiDBOptJoinReorderThreshold)}, + {Scope: ScopeSession, Name: TiDBSlowQueryFile, Value: ""}, + {Scope: ScopeGlobal, Name: TiDBScatterRegion, Value: BoolToIntStr(DefTiDBScatterRegion)}, + {Scope: ScopeSession, Name: TiDBWaitSplitRegionFinish, Value: BoolToIntStr(DefTiDBWaitSplitRegionFinish)}, + {Scope: ScopeSession, Name: TiDBWaitSplitRegionTimeout, Value: strconv.Itoa(DefWaitSplitRegionTimeout)}, + {Scope: ScopeSession, Name: TiDBLowResolutionTSO, Value: "0"}, + {Scope: ScopeSession, Name: TiDBExpensiveQueryTimeThreshold, Value: strconv.Itoa(DefTiDBExpensiveQueryTimeThreshold)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableNoopFuncs, Value: BoolToIntStr(DefTiDBEnableNoopFuncs)}, + {Scope: ScopeSession, Name: TiDBReplicaRead, Value: "leader"}, + {Scope: ScopeSession, Name: TiDBAllowRemoveAutoInc, Value: BoolToIntStr(DefTiDBAllowRemoveAutoInc)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStmtSummary, Value: BoolToIntStr(config.GetGlobalConfig().StmtSummary.Enable)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryInternalQuery, Value: BoolToIntStr(config.GetGlobalConfig().StmtSummary.EnableInternalQuery)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryRefreshInterval, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.RefreshInterval)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryHistorySize, Value: strconv.Itoa(config.GetGlobalConfig().StmtSummary.HistorySize)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxStmtCount, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxStmtCount), 10)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBStmtSummaryMaxSQLLength, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().StmtSummary.MaxSQLLength), 10)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBCapturePlanBaseline, Value: "off"}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBUsePlanBaselines, Value: boolToOnOff(DefTiDBUsePlanBaselines)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEvolvePlanBaselines, Value: boolToOnOff(DefTiDBEvolvePlanBaselines)}, + {Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskMaxTime, Value: strconv.Itoa(DefTiDBEvolvePlanTaskMaxTime)}, + {Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskStartTime, Value: DefTiDBEvolvePlanTaskStartTime}, + {Scope: ScopeGlobal, Name: TiDBEvolvePlanTaskEndTime, Value: DefTiDBEvolvePlanTaskEndTime}, + {Scope: ScopeSession, Name: TiDBIsolationReadEngines, Value: strings.Join(config.GetGlobalConfig().IsolationRead.Engines, ", ")}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBStoreLimit, Value: strconv.FormatInt(atomic.LoadInt64(&config.GetGlobalConfig().TiKVClient.StoreLimit), 10)}, + {Scope: ScopeSession, Name: TiDBMetricSchemaStep, Value: strconv.Itoa(DefTiDBMetricSchemaStep)}, + {Scope: ScopeSession, Name: TiDBMetricSchemaRangeDuration, Value: strconv.Itoa(DefTiDBMetricSchemaRangeDuration)}, + {Scope: ScopeSession, Name: TiDBSlowLogThreshold, Value: strconv.Itoa(logutil.DefaultSlowThreshold)}, + {Scope: ScopeSession, Name: TiDBRecordPlanInSlowLog, Value: strconv.Itoa(logutil.DefaultRecordPlanInSlowLog)}, + {Scope: ScopeSession, Name: TiDBEnableSlowLog, Value: BoolToIntStr(logutil.DefaultTiDBEnableSlowLog)}, + {Scope: ScopeSession, Name: TiDBQueryLogMaxLen, Value: strconv.Itoa(logutil.DefaultQueryLogMaxLen)}, + {Scope: ScopeSession, Name: TiDBCheckMb4ValueInUTF8, Value: BoolToIntStr(config.GetGlobalConfig().CheckMb4ValueInUTF8)}, + {Scope: ScopeSession, Name: TiDBFoundInPlanCache, Value: BoolToIntStr(DefTiDBFoundInPlanCache)}, + {Scope: ScopeSession, Name: TiDBEnableCollectExecutionInfo, Value: BoolToIntStr(DefTiDBEnableCollectExecutionInfo)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowAutoRandExplicitInsert, Value: boolToOnOff(DefTiDBAllowAutoRandExplicitInsert)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableClusteredIndex, Value: BoolToIntStr(DefTiDBEnableClusteredIndex)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBPartitionPruneMode, Value: string(StaticOnly)}, + {Scope: ScopeGlobal, Name: TiDBSlowLogMasking, Value: BoolToIntStr(DefTiDBSlowLogMasking)}, + {Scope: ScopeGlobal, Name: TiDBRedactLog, Value: strconv.Itoa(config.DefTiDBRedactLog)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBShardAllocateStep, Value: strconv.Itoa(DefTiDBShardAllocateStep)}, + {Scope: ScopeGlobal, Name: TiDBEnableTelemetry, Value: BoolToIntStr(DefTiDBEnableTelemetry)}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAmendPessimisticTxn, Value: boolToOnOff(DefTiDBEnableAmendPessimisticTxn)}, // for compatibility purpose, we should leave them alone. // TODO: Follow the Terminology Updates of MySQL after their changes arrived. // https://mysqlhighavailability.com/mysql-terminology-updates/ - {ScopeSession, PseudoSlaveMode, ""}, - {ScopeGlobal, "slave_pending_jobs_size_max", "16777216"}, - {ScopeGlobal, "slave_transaction_retries", "10"}, - {ScopeGlobal, "slave_checkpoint_period", "300"}, - {ScopeGlobal, MasterVerifyChecksum, "0"}, - {ScopeGlobal, "rpl_semi_sync_master_trace_level", ""}, - {ScopeGlobal, "master_info_repository", "FILE"}, - {ScopeGlobal, "rpl_stop_slave_timeout", "31536000"}, - {ScopeGlobal, "slave_net_timeout", "3600"}, - {ScopeGlobal, "sync_master_info", "10000"}, - {ScopeGlobal, "init_slave", ""}, - {ScopeGlobal, SlaveCompressedProtocol, "0"}, - {ScopeGlobal, "rpl_semi_sync_slave_trace_level", ""}, - {ScopeGlobal, LogSlowSlaveStatements, "0"}, - {ScopeGlobal, "slave_checkpoint_group", "512"}, - {ScopeNone, "slave_load_tmpdir", "/var/tmp/"}, - {ScopeGlobal, "slave_parallel_type", ""}, - {ScopeGlobal, "slave_parallel_workers", "0"}, - {ScopeGlobal, "rpl_semi_sync_master_timeout", ""}, - {ScopeNone, "slave_skip_errors", "OFF"}, - {ScopeGlobal, "sql_slave_skip_counter", "0"}, - {ScopeGlobal, "rpl_semi_sync_slave_enabled", ""}, - {ScopeGlobal, "rpl_semi_sync_master_enabled", ""}, - {ScopeGlobal, "slave_preserve_commit_order", ""}, - {ScopeGlobal, "slave_exec_mode", "STRICT"}, - {ScopeNone, "log_slave_updates", "0"}, - {ScopeGlobal, "rpl_semi_sync_master_wait_point", ""}, - {ScopeGlobal, "slave_sql_verify_checksum", "1"}, - {ScopeGlobal, "slave_max_allowed_packet", "1073741824"}, - {ScopeGlobal, "rpl_semi_sync_master_wait_for_slave_count", ""}, - {ScopeGlobal, "rpl_semi_sync_master_wait_no_slave", ""}, - {ScopeGlobal, "slave_rows_search_algorithms", "TABLE_SCAN,INDEX_SCAN"}, - {ScopeGlobal, SlaveAllowBatching, "0"}, + {Scope: ScopeSession, Name: PseudoSlaveMode, Value: ""}, + {Scope: ScopeGlobal, Name: "slave_pending_jobs_size_max", Value: "16777216"}, + {Scope: ScopeGlobal, Name: "slave_transaction_retries", Value: "10"}, + {Scope: ScopeGlobal, Name: "slave_checkpoint_period", Value: "300"}, + {Scope: ScopeGlobal, Name: MasterVerifyChecksum, Value: "0"}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_master_trace_level", Value: ""}, + {Scope: ScopeGlobal, Name: "master_info_repository", Value: "FILE"}, + {Scope: ScopeGlobal, Name: "rpl_stop_slave_timeout", Value: "31536000"}, + {Scope: ScopeGlobal, Name: "slave_net_timeout", Value: "3600"}, + {Scope: ScopeGlobal, Name: "sync_master_info", Value: "10000"}, + {Scope: ScopeGlobal, Name: "init_slave", Value: ""}, + {Scope: ScopeGlobal, Name: SlaveCompressedProtocol, Value: "0"}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_trace_level", Value: ""}, + {Scope: ScopeGlobal, Name: LogSlowSlaveStatements, Value: "0"}, + {Scope: ScopeGlobal, Name: "slave_checkpoint_group", Value: "512"}, + {Scope: ScopeNone, Name: "slave_load_tmpdir", Value: "/var/tmp/"}, + {Scope: ScopeGlobal, Name: "slave_parallel_type", Value: ""}, + {Scope: ScopeGlobal, Name: "slave_parallel_workers", Value: "0"}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_master_timeout", Value: ""}, + {Scope: ScopeNone, Name: "slave_skip_errors", Value: "OFF"}, + {Scope: ScopeGlobal, Name: "sql_slave_skip_counter", Value: "0"}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_slave_enabled", Value: ""}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_master_enabled", Value: ""}, + {Scope: ScopeGlobal, Name: "slave_preserve_commit_order", Value: ""}, + {Scope: ScopeGlobal, Name: "slave_exec_mode", Value: "STRICT"}, + {Scope: ScopeNone, Name: "log_slave_updates", Value: "0"}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_point", Value: ""}, + {Scope: ScopeGlobal, Name: "slave_sql_verify_checksum", Value: "1"}, + {Scope: ScopeGlobal, Name: "slave_max_allowed_packet", Value: "1073741824"}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_for_slave_count", Value: ""}, + {Scope: ScopeGlobal, Name: "rpl_semi_sync_master_wait_no_slave", Value: ""}, + {Scope: ScopeGlobal, Name: "slave_rows_search_algorithms", Value: "TABLE_SCANINDEX_SCAN"}, + {Scope: ScopeGlobal, Name: SlaveAllowBatching, Value: "0"}, } // SynonymsSysVariables is synonyms of system variables. diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go index 5239a7f89dcc1..cce4606b9c49e 100644 --- a/sessionctx/variable/varsutil.go +++ b/sessionctx/variable/varsutil.go @@ -108,7 +108,7 @@ func GetSessionSystemVar(s *SessionVars, key string) (string, error) { // GetSessionOnlySysVars get the default value defined in code for session only variable. // The return bool value indicates whether it's a session only variable. func GetSessionOnlySysVars(s *SessionVars, key string) (string, bool, error) { - sysVar := SysVars[key] + sysVar := GetSysVar(key) if sysVar == nil { return "", false, ErrUnknownSystemVar.GenWithStackByArgs(key) } @@ -192,7 +192,7 @@ func GetGlobalSystemVar(s *SessionVars, key string) (string, error) { // GetScopeNoneSystemVar checks the validation of `key`, // and return the default value if its scope is `ScopeNone`. func GetScopeNoneSystemVar(key string) (string, bool, error) { - sysVar := SysVars[key] + sysVar := GetSysVar(key) if sysVar == nil { return "", false, ErrUnknownSystemVar.GenWithStackByArgs(key) } @@ -207,8 +207,7 @@ const epochShiftBits = 18 // SetSessionSystemVar sets system variable and updates SessionVars states. func SetSessionSystemVar(vars *SessionVars, name string, value types.Datum) error { - name = strings.ToLower(name) - sysVar := SysVars[name] + sysVar := GetSysVar(name) if sysVar == nil { return ErrUnknownSystemVar } @@ -230,8 +229,8 @@ func SetSessionSystemVar(vars *SessionVars, name string, value types.Datum) erro // ValidateGetSystemVar checks if system variable exists and validates its scope when get system variable. func ValidateGetSystemVar(name string, isGlobal bool) error { - sysVar, exists := SysVars[name] - if !exists { + sysVar := GetSysVar(name) + if sysVar == nil { return ErrUnknownSystemVar.GenWithStackByArgs(name) } switch sysVar.Scope { diff --git a/tidb-server/main.go b/tidb-server/main.go index e43834c1b3418..b73988b1c0bf3 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -552,18 +552,17 @@ func setGlobalVars() { priority := mysql.Str2Priority(cfg.Performance.ForcePriority) variable.ForcePriority = int32(priority) - variable.SysVars[variable.TiDBForcePriority].Value = mysql.Priority2Str[priority] - variable.SysVars[variable.TiDBOptDistinctAggPushDown].Value = variable.BoolToIntStr(cfg.Performance.DistinctAggPushDown) - - variable.SysVars[variable.TIDBMemQuotaQuery].Value = strconv.FormatInt(cfg.MemQuotaQuery, 10) - variable.SysVars["lower_case_table_names"].Value = strconv.Itoa(cfg.LowerCaseTableNames) - variable.SysVars[variable.LogBin].Value = variable.BoolToIntStr(config.GetGlobalConfig().Binlog.Enable) - - variable.SysVars[variable.Port].Value = fmt.Sprintf("%d", cfg.Port) - variable.SysVars[variable.Socket].Value = cfg.Socket - variable.SysVars[variable.DataDir].Value = cfg.Path - variable.SysVars[variable.TiDBSlowQueryFile].Value = cfg.Log.SlowQueryFile - variable.SysVars[variable.TiDBIsolationReadEngines].Value = strings.Join(cfg.IsolationRead.Engines, ", ") + + variable.SetSysVar(variable.TiDBForcePriority, mysql.Priority2Str[priority]) + variable.SetSysVar(variable.TiDBOptDistinctAggPushDown, variable.BoolToIntStr(cfg.Performance.DistinctAggPushDown)) + variable.SetSysVar(variable.TIDBMemQuotaQuery, strconv.FormatInt(cfg.MemQuotaQuery, 10)) + variable.SetSysVar("lower_case_table_names", strconv.Itoa(cfg.LowerCaseTableNames)) + variable.SetSysVar(variable.LogBin, variable.BoolToIntStr(config.GetGlobalConfig().Binlog.Enable)) + variable.SetSysVar(variable.Port, fmt.Sprintf("%d", cfg.Port)) + variable.SetSysVar(variable.Socket, cfg.Socket) + variable.SetSysVar(variable.DataDir, cfg.Path) + variable.SetSysVar(variable.TiDBSlowQueryFile, cfg.Log.SlowQueryFile) + variable.SetSysVar(variable.TiDBIsolationReadEngines, strings.Join(cfg.IsolationRead.Engines, ", ")) // For CI environment we default enable prepare-plan-cache. plannercore.SetPreparedPlanCache(config.CheckTableBeforeDrop || cfg.PreparedPlanCache.Enabled) diff --git a/tidb-server/main_test.go b/tidb-server/main_test.go index 6cca1689c438d..b36166c6ed0a4 100644 --- a/tidb-server/main_test.go +++ b/tidb-server/main_test.go @@ -40,14 +40,14 @@ var _ = Suite(&testMainSuite{}) type testMainSuite struct{} func (t *testMainSuite) TestSetGlobalVars(c *C) { - c.Assert(variable.SysVars[variable.TiDBIsolationReadEngines].Value, Equals, "tikv, tiflash, tidb") - c.Assert(variable.SysVars[variable.TIDBMemQuotaQuery].Value, Equals, "1073741824") + c.Assert(variable.GetSysVar(variable.TiDBIsolationReadEngines).Value, Equals, "tikv, tiflash, tidb") + c.Assert(variable.GetSysVar(variable.TIDBMemQuotaQuery).Value, Equals, "1073741824") config.UpdateGlobal(func(conf *config.Config) { conf.IsolationRead.Engines = []string{"tikv", "tidb"} conf.MemQuotaQuery = 9999999 }) setGlobalVars() - c.Assert(variable.SysVars[variable.TiDBIsolationReadEngines].Value, Equals, "tikv, tidb") - c.Assert(variable.SysVars[variable.TIDBMemQuotaQuery].Value, Equals, "9999999") + c.Assert(variable.GetSysVar(variable.TiDBIsolationReadEngines).Value, Equals, "tikv, tidb") + c.Assert(variable.GetSysVar(variable.TIDBMemQuotaQuery).Value, Equals, "9999999") } From f2d4e4724778dee01e42ba5d7362fa5a048d7e81 Mon Sep 17 00:00:00 2001 From: "Zhuomin(Charming) Liu" Date: Tue, 29 Sep 2020 14:39:23 +0800 Subject: [PATCH 08/16] distsql: add metrics for coprocessor cache (#19979) --- distsql/select_result.go | 7 +++ executor/inspection_summary.go | 1 + infoschema/metric_table_def.go | 6 +++ metrics/distsql.go | 8 +++ metrics/grafana/tidb.json | 95 ++++++++++++++++++++++++++++++++++ metrics/metrics.go | 1 + store/tikv/coprocessor.go | 10 +++- 7 files changed, 126 insertions(+), 2 deletions(-) diff --git a/distsql/select_result.go b/distsql/select_result.go index 42cc246f4c536..f9840aed95f4c 100644 --- a/distsql/select_result.go +++ b/distsql/select_result.go @@ -43,6 +43,9 @@ import ( var ( errQueryInterrupted = terror.ClassExecutor.NewStd(errno.ErrQueryInterrupted) + + coprCacheHistogramHit = metrics.DistSQLCoprCacheHistogram.WithLabelValues("hit") + coprCacheHistogramMiss = metrics.DistSQLCoprCacheHistogram.WithLabelValues("miss") ) var ( @@ -155,6 +158,10 @@ func (r *selectResult) fetchResp(ctx context.Context) error { break } } + if r.stats != nil { + coprCacheHistogramHit.Observe(float64(r.stats.CoprCacheHitNum)) + coprCacheHistogramMiss.Observe(float64(len(r.stats.copRespTime) - int(r.stats.CoprCacheHitNum))) + } return nil } diff --git a/executor/inspection_summary.go b/executor/inspection_summary.go index 8ff8b7a4c64d2..37aef042a62f9 100644 --- a/executor/inspection_summary.go +++ b/executor/inspection_summary.go @@ -140,6 +140,7 @@ var inspectionSummaryRules = map[string][]string{ "tidb_distsql_partial_scan_key_num", "tidb_distsql_qps", "tidb_distsql_scan_key_num", + "tidb_distsql_copr_cache", "tidb_region_cache_ops", "tidb_batch_client_pending_req_count", "tidb_batch_client_unavailable_duration", diff --git a/infoschema/metric_table_def.go b/infoschema/metric_table_def.go index 8e2be18d3d0a8..f74820d6f1055 100644 --- a/infoschema/metric_table_def.go +++ b/infoschema/metric_table_def.go @@ -2495,6 +2495,12 @@ var MetricTableMap = map[string]MetricTableDef{ Labels: []string{"instance", "type"}, Comment: "The total time of distsql execution(second)", }, + "tidb_distsql_copr_cache": { + Comment: "The quantile of TiDB distsql coprocessor cache", + PromQL: "histogram_quantile($QUANTILE, sum(rate(tidb_distsql_copr_cache_buckets{$LABEL_CONDITIONS}[$RANGE_DURATION])) by (type,instance))", + Labels: []string{"instance", "type"}, + Quantile: 0.95, + }, "tidb_execute_total_count": { PromQL: "sum(increase(tidb_session_execute_duration_seconds_count{$LABEL_CONDITIONS}[$RANGE_DURATION])) by (instance,sql_type)", Labels: []string{"instance", "sql_type"}, diff --git a/metrics/distsql.go b/metrics/distsql.go index 6074b63c3e82f..9eef1c62f0310 100644 --- a/metrics/distsql.go +++ b/metrics/distsql.go @@ -52,4 +52,12 @@ var ( Help: "number of partial results for each query.", }, ) + DistSQLCoprCacheHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "tidb", + Subsystem: "distsql", + Name: "copr_cache", + Help: "coprocessor cache hit, evict and miss number", + Buckets: prometheus.ExponentialBuckets(1, 2, 16), + }, []string{LblType}) ) diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index abe270bfcf874..acd1539761e5f 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -6161,6 +6161,101 @@ "alignLevel": null } }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "TiDB coprocessor cache hit, evict and miss number", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 163 + }, + "id": 175, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(1, sum(rate(tidb_distsql_copr_cache_buckets[1m])) by (type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Coprocessor Cache", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, { "aliasColors": {}, "bars": false, diff --git a/metrics/metrics.go b/metrics/metrics.go index ab6c6d84ed1d9..be843ad6f02f0 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -77,6 +77,7 @@ func RegisterMetrics() { prometheus.MustRegister(DDLWorkerHistogram) prometheus.MustRegister(DeploySyncerHistogram) prometheus.MustRegister(DistSQLPartialCountHistogram) + prometheus.MustRegister(DistSQLCoprCacheHistogram) prometheus.MustRegister(DistSQLQueryHistogram) prometheus.MustRegister(DistSQLScanKeysHistogram) prometheus.MustRegister(DistSQLScanKeysPartialHistogram) diff --git a/store/tikv/coprocessor.go b/store/tikv/coprocessor.go index c914d068fca2a..e730cdf43cc85 100644 --- a/store/tikv/coprocessor.go +++ b/store/tikv/coprocessor.go @@ -45,8 +45,11 @@ import ( "go.uber.org/zap" ) -var tikvTxnRegionsNumHistogramWithCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("coprocessor") -var tikvTxnRegionsNumHistogramWithBatchCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("batch_coprocessor") +var ( + tikvTxnRegionsNumHistogramWithCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("coprocessor") + tikvTxnRegionsNumHistogramWithBatchCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("batch_coprocessor") + coprCacheHistogramEvict = metrics.DistSQLCoprCacheHistogram.WithLabelValues("evict") +) // CopClient is coprocessor client. type CopClient struct { @@ -777,6 +780,9 @@ func (worker *copIteratorWorker) handleTask(ctx context.Context, task *copTask, remainTasks = remainTasks[1:] } } + if worker.store.coprCache != nil && worker.store.coprCache.cache.Metrics != nil { + coprCacheHistogramEvict.Observe(float64(worker.store.coprCache.cache.Metrics.KeysEvicted())) + } } // handleTaskOnce handles single copTask, successful results are send to channel. From c8221fab813b62c72e137a5d5cce24b4b982b1c2 Mon Sep 17 00:00:00 2001 From: Arenatlx Date: Tue, 29 Sep 2020 15:56:35 +0800 Subject: [PATCH 09/16] ddl: support column type change between decimal && SQL mode warnings (#20012) --- ddl/backfilling.go | 45 +++++++++++++++--- ddl/column.go | 70 +++++++++++++++++++++++++--- ddl/column_test.go | 4 +- ddl/db_test.go | 101 +++++++++++++++++++++++++++++++++++++++++ ddl/ddl.go | 12 +++++ ddl/ddl_api.go | 46 ++++++++++++++----- ddl/index.go | 5 +- ddl/reorg.go | 58 +++++++++++++++++++++++ executor/admin_test.go | 2 +- 9 files changed, 315 insertions(+), 28 deletions(-) diff --git a/ddl/backfilling.go b/ddl/backfilling.go index 7d0f3bf1d71bf..5a8e4ece3e564 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/parser/model" + "github.com/pingcap/parser/terror" ddlutil "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" @@ -71,10 +72,12 @@ type backfillResult struct { // backfillTaskContext is the context of the batch adding indices or updating column values. // After finishing the batch adding indices or updating column values, result in backfillTaskContext will be merged into backfillResult. type backfillTaskContext struct { - nextHandle kv.Handle - done bool - addedCount int - scanCount int + nextHandle kv.Handle + done bool + addedCount int + scanCount int + warnings map[errors.ErrorID]*terror.Error + warningsCount map[errors.ErrorID]int64 } type backfillWorker struct { @@ -150,10 +153,26 @@ func mergeBackfillCtxToResult(taskCtx *backfillTaskContext, result *backfillResu result.scanCount += taskCtx.scanCount } +func mergeWarningsAndWarningsCount(partWarnings, totalWarnings map[errors.ErrorID]*terror.Error, partWarningsCount, totalWarningsCount map[errors.ErrorID]int64) (map[errors.ErrorID]*terror.Error, map[errors.ErrorID]int64) { + for _, warn := range partWarnings { + if _, ok := totalWarningsCount[warn.ID()]; ok { + totalWarningsCount[warn.ID()] += partWarningsCount[warn.ID()] + } else { + totalWarningsCount[warn.ID()] = partWarningsCount[warn.ID()] + totalWarnings[warn.ID()] = warn + } + } + return totalWarnings, totalWarningsCount +} + // handleBackfillTask backfills range [task.startHandle, task.endHandle) handle's index to table. func (w *backfillWorker) handleBackfillTask(d *ddlCtx, task *reorgBackfillTask, bf backfiller) *backfillResult { handleRange := *task - result := &backfillResult{addedCount: 0, nextHandle: handleRange.startHandle, err: nil} + result := &backfillResult{ + err: nil, + addedCount: 0, + nextHandle: handleRange.startHandle, + } lastLogCount := 0 lastLogTime := time.Now() startTime := lastLogTime @@ -177,7 +196,17 @@ func (w *backfillWorker) handleBackfillTask(d *ddlCtx, task *reorgBackfillTask, bf.AddMetricInfo(float64(taskCtx.addedCount)) mergeBackfillCtxToResult(&taskCtx, result) + + // Although `handleRange` is for data in one region, but back fill worker still split it into many + // small reorg batch size slices and reorg them in many different kv txn. + // If a task failed, it may contained some committed small kv txn which has already finished the + // small range reorganization. + // In the next round of reorganization, the target handle range may overlap with last committed + // small ranges. This will cause the `redo` action in reorganization. + // So for added count and warnings collection, it is recommended to collect the statistics in every + // successfully committed small ranges rather than fetching it in the total result. w.ddlWorker.reorgCtx.increaseRowCount(int64(taskCtx.addedCount)) + w.ddlWorker.reorgCtx.mergeWarnings(taskCtx.warnings, taskCtx.warningsCount) if num := result.scanCount - lastLogCount; num >= 30000 { lastLogCount = result.scanCount @@ -386,6 +415,8 @@ var ( TestCheckWorkerNumCh = make(chan struct{}) // TestCheckWorkerNumber use for test adjust backfill worker. TestCheckWorkerNumber = int32(16) + // TestCheckReorgTimeout is used to mock timeout when reorg data. + TestCheckReorgTimeout = int32(0) ) func loadDDLReorgVars(w *worker) error { @@ -482,12 +513,12 @@ func (w *worker) writePhysicalTableRecord(t table.PhysicalTable, bfWorkerType ba sessCtx.GetSessionVars().StmtCtx.IsDDLJobInQueue = true if bfWorkerType == typeAddIndexWorker { - idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap) + idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap, reorgInfo.ReorgMeta.SQLMode) idxWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, idxWorker.backfillWorker) go idxWorker.backfillWorker.run(reorgInfo.d, idxWorker) } else { - updateWorker := newUpdateColumnWorker(sessCtx, w, i, t, oldColInfo, colInfo, decodeColMap) + updateWorker := newUpdateColumnWorker(sessCtx, w, i, t, oldColInfo, colInfo, decodeColMap, reorgInfo.ReorgMeta.SQLMode) updateWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, updateWorker.backfillWorker) go updateWorker.backfillWorker.run(reorgInfo.d, updateWorker) diff --git a/ddl/column.go b/ddl/column.go index e7e123952c486..07ece53470b23 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" + "github.com/pingcap/parser/terror" ddlutil "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -645,6 +646,11 @@ func onSetDefaultValue(t *meta.Meta, job *model.Job) (ver int64, _ error) { func needChangeColumnData(oldCol, newCol *model.ColumnInfo) bool { toUnsigned := mysql.HasUnsignedFlag(newCol.Flag) originUnsigned := mysql.HasUnsignedFlag(oldCol.Flag) + if oldCol.Tp == newCol.Tp && oldCol.Tp == mysql.TypeNewDecimal { + // Since type decimal will encode the precision, frac, negative(signed) and wordBuf into storage together, there is no short + // cut to eliminate data reorg change for column type change between decimal. + return oldCol.Flen != newCol.Flen || oldCol.Decimal != newCol.Decimal || toUnsigned != originUnsigned + } if newCol.Flen > 0 && newCol.Flen < oldCol.Flen || toUnsigned != originUnsigned { return true } @@ -1036,9 +1042,12 @@ type updateColumnWorker struct { rowDecoder *decoder.RowDecoder rowMap map[int64]types.Datum + + // For SQL Mode and warnings. + sqlMode mysql.SQLMode } -func newUpdateColumnWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, oldCol, newCol *model.ColumnInfo, decodeColMap map[int64]decoder.Column) *updateColumnWorker { +func newUpdateColumnWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, oldCol, newCol *model.ColumnInfo, decodeColMap map[int64]decoder.Column, sqlMode mysql.SQLMode) *updateColumnWorker { rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) return &updateColumnWorker{ backfillWorker: newBackfillWorker(sessCtx, worker, id, t), @@ -1047,6 +1056,7 @@ func newUpdateColumnWorker(sessCtx sessionctx.Context, worker *worker, id int, t metricCounter: metrics.BackfillTotalCounter.WithLabelValues("update_col_speed"), rowDecoder: rowDecoder, rowMap: make(map[int64]types.Datum, len(decodeColMap)), + sqlMode: sqlMode, } } @@ -1055,8 +1065,9 @@ func (w *updateColumnWorker) AddMetricInfo(cnt float64) { } type rowRecord struct { - key []byte // It's used to lock a record. Record it to reduce the encoding time. - vals []byte // It's the record. + key []byte // It's used to lock a record. Record it to reduce the encoding time. + vals []byte // It's the record. + warning *terror.Error // It's used to record the cast warning of a record. } // getNextHandle gets next handle of entry that we are going to process. @@ -1133,11 +1144,25 @@ func (w *updateColumnWorker) getRowRecord(handle kv.Handle, recordKey []byte, ra return nil } + var recordWarning *terror.Error newColVal, err := table.CastValue(w.sessCtx, w.rowMap[w.oldColInfo.ID], w.newColInfo, false, false) - // TODO: Consider sql_mode and the error msg(encounter this error check whether to rollback). if err != nil { - return errors.Trace(err) + if IsNormalWarning(err) || (!w.sqlMode.HasStrictMode() && IsStrictWarning(err)) { + // Keep the warnings. + recordWarning = errors.Cause(err).(*terror.Error) + } else { + return errors.Trace(err) + } } + + failpoint.Inject("MockReorgTimeoutInOneRegion", func(val failpoint.Value) { + if val.(bool) { + if handle.IntValue() == 3000 && atomic.CompareAndSwapInt32(&TestCheckReorgTimeout, 0, 1) { + failpoint.Return(errors.Trace(errWaitReorgTimeout)) + } + } + }) + w.rowMap[w.newColInfo.ID] = newColVal newColumnIDs := make([]int64, 0, len(w.rowMap)) newRow := make([]types.Datum, 0, len(w.rowMap)) @@ -1151,11 +1176,31 @@ func (w *updateColumnWorker) getRowRecord(handle kv.Handle, recordKey []byte, ra return errors.Trace(err) } - w.rowRecords = append(w.rowRecords, &rowRecord{key: recordKey, vals: newRowVal}) + w.rowRecords = append(w.rowRecords, &rowRecord{key: recordKey, vals: newRowVal, warning: recordWarning}) w.cleanRowMap() return nil } +// IsNormalWarning is used to check the normal warnings, for example data-truncated warnings. +// This kind of warning will be always thrown out regard less of what kind of the sql mode is. +func IsNormalWarning(err error) bool { + // TODO: there are more errors here can be identified as normal warnings. + if types.ErrTruncatedWrongVal.Equal(err) { + return true + } + return false +} + +// IsStrictWarning is used to check whether the error can be transferred as a warning under a +// non-strict SQL Mode. +func IsStrictWarning(err error) bool { + // TODO: there are more errors here can be identified as warnings under non-strict SQL mode. + if types.ErrOverflow.Equal(err) { + return true + } + return false +} + func (w *updateColumnWorker) cleanRowMap() { for id := range w.rowMap { delete(w.rowMap, id) @@ -1177,6 +1222,8 @@ func (w *updateColumnWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (t taskCtx.nextHandle = nextHandle taskCtx.done = taskDone + warningsMap := make(map[errors.ErrorID]*terror.Error, len(rowRecords)) + warningsCountMap := make(map[errors.ErrorID]int64, len(rowRecords)) for _, rowRecord := range rowRecords { taskCtx.scanCount++ @@ -1185,8 +1232,19 @@ func (w *updateColumnWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (t return errors.Trace(err) } taskCtx.addedCount++ + if rowRecord.warning != nil { + if _, ok := warningsCountMap[rowRecord.warning.ID()]; ok { + warningsCountMap[rowRecord.warning.ID()]++ + } else { + warningsCountMap[rowRecord.warning.ID()] = 1 + warningsMap[rowRecord.warning.ID()] = rowRecord.warning + } + } } + // Collect the warnings. + taskCtx.warnings, taskCtx.warningsCount = warningsMap, warningsCountMap + return nil }) logSlowOperations(time.Since(oprStartTime), "BackfillDataInTxn", 3000) diff --git a/ddl/column_test.go b/ddl/column_test.go index bfd8f04990727..9c7679a12741b 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -1159,8 +1159,8 @@ func (s *testColumnSuite) TestModifyColumn(c *C) { {"varchar(10)", "varchar(8)", errUnsupportedModifyColumn.GenWithStackByArgs("length 8 is less than origin 10")}, {"varchar(10)", "varchar(11)", nil}, {"varchar(10) character set utf8 collate utf8_bin", "varchar(10) character set utf8", nil}, - {"decimal(2,1)", "decimal(3,2)", errUnsupportedModifyColumn.GenWithStackByArgs("can't change decimal column precision")}, - {"decimal(2,1)", "decimal(2,2)", errUnsupportedModifyColumn.GenWithStackByArgs("can't change decimal column precision")}, + {"decimal(2,1)", "decimal(3,2)", errUnsupportedModifyColumn.GenWithStackByArgs("decimal change from decimal(2, 1) to decimal(3, 2), and tidb_enable_change_column_type is false")}, + {"decimal(2,1)", "decimal(2,2)", errUnsupportedModifyColumn.GenWithStackByArgs("decimal change from decimal(2, 1) to decimal(2, 2), and tidb_enable_change_column_type is false")}, {"decimal(2,1)", "decimal(2,1)", nil}, {"decimal(2,1)", "int", errUnsupportedModifyColumn.GenWithStackByArgs("type int(11) not match origin decimal(2,1)")}, {"decimal", "int", errUnsupportedModifyColumn.GenWithStackByArgs("type int(11) not match origin decimal(11,0)")}, diff --git a/ddl/db_test.go b/ddl/db_test.go index aa2dcc26192a0..1e1848b44fcba 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -79,6 +79,7 @@ var _ = Suite(&testDBSuite7{&testDBSuite{}}) var _ = SerialSuites(&testSerialDBSuite{&testDBSuite{}}) const defaultBatchSize = 1024 +const defaultReorgBatchSize = 256 type testDBSuite struct { cluster cluster.Cluster @@ -5767,3 +5768,103 @@ func (s *testSerialDBSuite) TestColumnTypeChangeGenUniqueChangingName(c *C) { tk.MustExec("drop table if exists t") } + +func (s *testSerialDBSuite) TestModifyColumnTypeWithWarnings(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + // Enable column change variable. + tk.Se.GetSessionVars().EnableChangeColumnType = true + + // Test normal warnings. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a decimal(5,2))") + tk.MustExec("insert into t values(111.22),(111.22),(111.22),(111.22),(333.4)") + // 111.22 will be truncated the fraction .22 as .2 with truncated warning for each row. + tk.MustExec("alter table t modify column a decimal(4,1)") + // there should 4 rows of warnings corresponding to the origin rows. + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect DECIMAL value: '111.22'", + "Warning 1292 Truncated incorrect DECIMAL value: '111.22'", + "Warning 1292 Truncated incorrect DECIMAL value: '111.22'", + "Warning 1292 Truncated incorrect DECIMAL value: '111.22'")) + + // Test the strict warnings is treated as errors under the strict mode. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a decimal(5,2))") + tk.MustExec("insert into t values(111.22),(111.22),(111.22),(33.4)") + // Since modify column a from decimal(5,2) to decimal(3,1), the first three rows with 111.22 will overflows the target types. + _, err := tk.Exec("alter table t modify column a decimal(3,1)") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[types:1690]DECIMAL value is out of range in '(3, 1)'") + + // Test the strict warnings is treated as warnings under the non-strict mode. + tk.MustExec("set @@sql_mode=\"\"") + tk.MustExec("alter table t modify column a decimal(3,1)") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1690 DECIMAL value is out of range in '(3, 1)'", + "Warning 1690 DECIMAL value is out of range in '(3, 1)'", + "Warning 1690 DECIMAL value is out of range in '(3, 1)'")) +} + +// TestModifyColumnTypeWhenInterception is to test modifying column type with warnings intercepted by +// reorg timeout, not owner error and so on. +func (s *testSerialDBSuite) TestModifyColumnTypeWhenInterception(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + // Enable column change variable. + tk.Se.GetSessionVars().EnableChangeColumnType = true + + // Test normal warnings. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b decimal(4,2))") + + count := defaultBatchSize * 4 + // Add some rows. + dml := fmt.Sprintf("insert into t values") + for i := 1; i <= count; i++ { + dml += fmt.Sprintf("(%d, %f)", i, 11.22) + if i != count { + dml += "," + } + } + tk.MustExec(dml) + // Make the regions scale like: [1, 1024), [1024, 2048), [2048, 3072), [3072, 4096] + tk.MustQuery("split table t between(0) and (4096) regions 4") + + d := s.dom.DDL() + hook := &ddl.TestDDLCallback{} + var checkMiddleWarningCount bool + var checkMiddleAddedCount bool + // Since the `DefTiDBDDLReorgWorkerCount` is 4, every worker will be assigned with one region + // for the first time. Here we mock the insert failure/reorg timeout in region [2048, 3072) + // which will lead next handle be set to 2048 and partial warnings be stored into ddl job. + // Since the existence of reorg batch size, only the last reorg batch [2816, 3072) of kv + // range [2048, 3072) fail to commit, the rest of them all committed successfully. So the + // addedCount and warnings count in the job are all equal to `4096 - reorg batch size`. + // In the next round of this ddl job, the last reorg batch will be finished. + var middleWarningsCount = int64(defaultBatchSize*4 - defaultReorgBatchSize) + hook.OnJobUpdatedExported = func(job *model.Job) { + if job.SchemaState == model.StateWriteReorganization || job.SnapshotVer != 0 { + if len(job.ReorgMeta.WarningsCount) == len(job.ReorgMeta.Warnings) { + for _, v := range job.ReorgMeta.WarningsCount { + if v == middleWarningsCount { + checkMiddleWarningCount = true + } + } + } + if job.RowCount == middleWarningsCount { + checkMiddleAddedCount = true + } + } + } + originHook := d.GetHook() + d.(ddl.DDLForTest).SetHook(hook) + defer d.(ddl.DDLForTest).SetHook(originHook) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/MockReorgTimeoutInOneRegion", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/MockReorgTimeoutInOneRegion"), IsNil) + }() + tk.MustExec("alter table t modify column b decimal(3,1)") + c.Assert(checkMiddleWarningCount, Equals, true) + c.Assert(checkMiddleAddedCount, Equals, true) + res := tk.MustQuery("show warnings") + c.Assert(len(res.Rows()), Equals, count) +} diff --git a/ddl/ddl.go b/ddl/ddl.go index 3510c463f99ec..a764d51e0b755 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -523,6 +523,18 @@ func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error { // If a job is a history job, the state must be JobStateSynced or JobStateRollbackDone or JobStateCancelled. if historyJob.IsSynced() { + // Judge whether there are some warnings when executing DDL under the certain SQL mode. + if historyJob.ReorgMeta != nil && len(historyJob.ReorgMeta.Warnings) != 0 { + if len(historyJob.ReorgMeta.Warnings) != len(historyJob.ReorgMeta.WarningsCount) { + logutil.BgLogger().Info("[ddl] DDL warnings doesn't match the warnings count", zap.Int64("jobID", jobID)) + } else { + for key, warning := range historyJob.ReorgMeta.Warnings { + for j := int64(0); j < historyJob.ReorgMeta.WarningsCount[key]; j++ { + ctx.GetSessionVars().StmtCtx.AppendWarning(warning) + } + } + } + } logutil.BgLogger().Info("[ddl] DDL job is finished", zap.Int64("jobID", jobID)) return nil } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index cf26fcd44dae4..af6a4ebe6875d 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/parser/format" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" + "github.com/pingcap/parser/terror" field_types "github.com/pingcap/parser/types" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/placement" @@ -3308,9 +3309,12 @@ func CheckModifyTypeCompatible(origin *types.FieldType, to *types.FieldType) (al if origin.Tp != to.Tp { return "", errUnsupportedModifyColumn.GenWithStackByArgs(unsupportedMsg) } - // The root cause is modifying decimal precision needs to rewrite binary representation of that decimal. - if to.Flen != origin.Flen || to.Decimal != origin.Decimal { - return "", errUnsupportedModifyColumn.GenWithStackByArgs("can't change decimal column precision") + // Floating-point and fixed-point types also can be UNSIGNED. As with integer types, this attribute prevents + // negative values from being stored in the column. Unlike the integer types, the upper range of column values + // remains the same. + if to.Flen != origin.Flen || to.Decimal != origin.Decimal || mysql.HasUnsignedFlag(to.Flag) != mysql.HasUnsignedFlag(origin.Flag) { + msg := fmt.Sprintf("decimal change from decimal(%d, %d) to decimal(%d, %d)", origin.Flen, origin.Decimal, to.Flen, to.Decimal) + return msg, errUnsupportedModifyColumn.GenWithStackByArgs(msg) } default: if origin.Tp != to.Tp { @@ -3342,9 +3346,9 @@ func CheckModifyTypeCompatible(origin *types.FieldType, to *types.FieldType) (al return "", nil } -// checkModifyTypes checks if the 'origin' type can be modified to 'to' type with out the need to +// checkModifyTypes checks if the 'origin' type can be modified to 'to' type without the need to // change or check existing data in the table. -// It returns error if the two types has incompatible Charset and Collation, different sign, different +// It returns error if the two types has incompatible charset and collation, different sign, different // digital/string types, or length of new Flen and Decimal is less than origin. func checkModifyTypes(ctx sessionctx.Context, origin *types.FieldType, to *types.FieldType, needRewriteCollationData bool) error { changeColumnValueMsg, err := CheckModifyTypeCompatible(origin, to) @@ -3645,7 +3649,12 @@ func (d *ddl) getModifiableColumnJob(ctx sessionctx.Context, ident ast.Ident, or SchemaName: schema.Name.L, Type: model.ActionModifyColumn, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{&newCol, originalColName, spec.Position, modifyColumnTp, newAutoRandBits}, + ReorgMeta: &model.DDLReorgMeta{ + SQLMode: ctx.GetSessionVars().SQLMode, + Warnings: make(map[errors.ErrorID]*terror.Error), + WarningsCount: make(map[errors.ErrorID]int64), + }, + Args: []interface{}{&newCol, originalColName, spec.Position, modifyColumnTp, newAutoRandBits}, } return job, nil } @@ -3818,7 +3827,12 @@ func (d *ddl) RenameColumn(ctx sessionctx.Context, ident ast.Ident, spec *ast.Al SchemaName: schema.Name.L, Type: model.ActionModifyColumn, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{&newCol, oldColName, spec.Position, 0}, + ReorgMeta: &model.DDLReorgMeta{ + SQLMode: ctx.GetSessionVars().SQLMode, + Warnings: make(map[errors.ErrorID]*terror.Error), + WarningsCount: make(map[errors.ErrorID]int64), + }, + Args: []interface{}{&newCol, oldColName, spec.Position, 0}, } err = d.doDDLJob(ctx, job) err = d.callHookOnChanged(err) @@ -4456,8 +4470,13 @@ func (d *ddl) CreatePrimaryKey(ctx sessionctx.Context, ti ast.Ident, indexName m SchemaName: schema.Name.L, Type: model.ActionAddPrimaryKey, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{unique, indexName, indexPartSpecifications, indexOption, sqlMode, nil, global}, - Priority: ctx.GetSessionVars().DDLReorgPriority, + ReorgMeta: &model.DDLReorgMeta{ + SQLMode: ctx.GetSessionVars().SQLMode, + Warnings: make(map[errors.ErrorID]*terror.Error), + WarningsCount: make(map[errors.ErrorID]int64), + }, + Args: []interface{}{unique, indexName, indexPartSpecifications, indexOption, sqlMode, nil, global}, + Priority: ctx.GetSessionVars().DDLReorgPriority, } err = d.doDDLJob(ctx, job) @@ -4620,8 +4639,13 @@ func (d *ddl) CreateIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde SchemaName: schema.Name.L, Type: model.ActionAddIndex, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{unique, indexName, indexPartSpecifications, indexOption, hiddenCols, global}, - Priority: ctx.GetSessionVars().DDLReorgPriority, + ReorgMeta: &model.DDLReorgMeta{ + SQLMode: ctx.GetSessionVars().SQLMode, + Warnings: make(map[errors.ErrorID]*terror.Error), + WarningsCount: make(map[errors.ErrorID]int64), + }, + Args: []interface{}{unique, indexName, indexPartSpecifications, indexOption, hiddenCols, global}, + Priority: ctx.GetSessionVars().DDLReorgPriority, } err = d.doDDLJob(ctx, job) diff --git a/ddl/index.go b/ddl/index.go index 4c8133968fd3f..c94f90e53c679 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -816,9 +816,11 @@ type addIndexWorker struct { idxKeyBufs [][]byte batchCheckKeys []kv.Key distinctCheckFlags []bool + + sqlMode mysql.SQLMode } -func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column) *addIndexWorker { +func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column, sqlMode mysql.SQLMode) *addIndexWorker { index := tables.NewIndex(t.GetPhysicalID(), t.Meta(), indexInfo) rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) return &addIndexWorker{ @@ -828,6 +830,7 @@ func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t tab rowDecoder: rowDecoder, defaultVals: make([]types.Datum, len(t.WritableCols())), rowMap: make(map[int64]types.Datum, len(decodeColMap)), + sqlMode: sqlMode, } } diff --git a/ddl/reorg.go b/ddl/reorg.go index c65035f98a665..12ed72604ff16 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "strconv" + "sync" "sync/atomic" "time" @@ -61,6 +62,14 @@ type reorgCtx struct { notifyCancelReorgJob int32 // doneHandle is used to simulate the handle that has been processed. doneHandle atomic.Value // nullableHandle + + // warnings is used to store the warnings when doing the reorg job under + // a certain SQL Mode. + mu struct { + sync.Mutex + warnings map[errors.ErrorID]*terror.Error + warningsCount map[errors.ErrorID]int64 + } } // nullableHandle can store handle. @@ -111,6 +120,22 @@ func (rc *reorgCtx) setNextHandle(doneHandle kv.Handle) { rc.doneHandle.Store(nullableHandle{handle: doneHandle}) } +func (rc *reorgCtx) mergeWarnings(warnings map[errors.ErrorID]*terror.Error, warningsCount map[errors.ErrorID]int64) { + if len(warnings) == 0 || len(warningsCount) == 0 { + return + } + rc.mu.Lock() + defer rc.mu.Unlock() + rc.mu.warnings, rc.mu.warningsCount = mergeWarningsAndWarningsCount(warnings, rc.mu.warnings, warningsCount, rc.mu.warningsCount) +} + +func (rc *reorgCtx) resetWarnings() { + rc.mu.Lock() + defer rc.mu.Unlock() + rc.mu.warnings = make(map[errors.ErrorID]*terror.Error) + rc.mu.warningsCount = make(map[errors.ErrorID]int64) +} + func (rc *reorgCtx) increaseRowCount(count int64) { atomic.AddInt64(&rc.rowCount, count) } @@ -124,11 +149,21 @@ func (rc *reorgCtx) getRowCountAndHandle() (int64, kv.Handle) { func (rc *reorgCtx) clean() { rc.setRowCount(0) rc.setNextHandle(nil) + rc.resetWarnings() rc.doneCh = nil } func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model.TableInfo, lease time.Duration, f func() error) error { job := reorgInfo.Job + // This is for tests compatible, because most of the early tests try to build the reorg job manually + // without reorg meta info, which will cause nil pointer in here. + if job.ReorgMeta == nil { + job.ReorgMeta = &model.DDLReorgMeta{ + SQLMode: mysql.ModeNone, + Warnings: make(map[errors.ErrorID]*terror.Error), + WarningsCount: make(map[errors.ErrorID]int64), + } + } if w.reorgCtx.doneCh == nil { // start a reorganization job w.wg.Add(1) @@ -136,6 +171,8 @@ func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model. // initial reorgCtx w.reorgCtx.setRowCount(job.GetRowCount()) w.reorgCtx.setNextHandle(reorgInfo.StartHandle) + w.reorgCtx.mu.warnings = make(map[errors.ErrorID]*terror.Error) + w.reorgCtx.mu.warningsCount = make(map[errors.ErrorID]int64) go func() { defer w.wg.Done() w.reorgCtx.doneCh <- f() @@ -159,6 +196,13 @@ func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model. logutil.BgLogger().Info("[ddl] run reorg job done", zap.Int64("handled rows", rowCount)) // Update a job's RowCount. job.SetRowCount(rowCount) + + // Update a job's warnings. + w.mergeWarningsIntoJob(job) + + if err == nil { + metrics.AddIndexProgress.Set(100) + } w.reorgCtx.clean() if err != nil { return errors.Trace(err) @@ -173,6 +217,7 @@ func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model. logutil.BgLogger().Info("[ddl] run reorg job quit") w.reorgCtx.setNextHandle(nil) w.reorgCtx.setRowCount(0) + w.reorgCtx.resetWarnings() // We return errWaitReorgTimeout here too, so that outer loop will break. return errWaitReorgTimeout case <-time.After(waitTimeout): @@ -180,6 +225,11 @@ func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model. // Update a job's RowCount. job.SetRowCount(rowCount) updateAddIndexProgress(w, tblInfo, rowCount) + + // Update a job's warnings. + w.mergeWarningsIntoJob(job) + + w.reorgCtx.resetWarnings() // Update a reorgInfo's handle. err := t.UpdateDDLReorgStartHandle(job, reorgInfo.currElement, doneHandle) logutil.BgLogger().Info("[ddl] run reorg job wait timeout", zap.Duration("waitTime", waitTimeout), @@ -191,6 +241,14 @@ func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model. return nil } +func (w *worker) mergeWarningsIntoJob(job *model.Job) { + w.reorgCtx.mu.Lock() + partWarnings := w.reorgCtx.mu.warnings + partWarningsCount := w.reorgCtx.mu.warningsCount + job.SetWarnings(mergeWarningsAndWarningsCount(partWarnings, job.ReorgMeta.Warnings, partWarningsCount, job.ReorgMeta.WarningsCount)) + w.reorgCtx.mu.Unlock() +} + func updateAddIndexProgress(w *worker, tblInfo *model.TableInfo, addedRowCount int64) { if tblInfo == nil || addedRowCount == 0 { return diff --git a/executor/admin_test.go b/executor/admin_test.go index c4d6bc7f00451..01a4556f46000 100644 --- a/executor/admin_test.go +++ b/executor/admin_test.go @@ -1021,7 +1021,7 @@ func (s *testSuite8) TestAdminCheckTable(c *C) { tk.MustExec(`insert into t1 set a='1.9'`) err = tk.ExecToErr(`alter table t1 modify column a decimal(3,2);`) c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: can't change decimal column precision") + c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported modify column: decimal change from decimal(2, 1) to decimal(3, 2), and tidb_enable_change_column_type is false") tk.MustExec(`delete from t1;`) tk.MustExec(`admin check table t1;`) } From 1d34c333f4e86f5993cace8ef5a640622bd9fe7b Mon Sep 17 00:00:00 2001 From: Shenghui Wu <793703860@qq.com> Date: Tue, 29 Sep 2020 16:11:32 +0800 Subject: [PATCH 10/16] expression: avoid constant propagate for the hybrid types (#20258) --- expression/constant_propagation.go | 7 ++++++- expression/integration_test.go | 19 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/expression/constant_propagation.go b/expression/constant_propagation.go index 264bb0dd5494a..f236ad722b3d0 100644 --- a/expression/constant_propagation.go +++ b/expression/constant_propagation.go @@ -228,7 +228,8 @@ func (s *propConstSolver) propagateColumnEQ() { if fun, ok := s.conditions[i].(*ScalarFunction); ok && fun.FuncName.L == ast.EQ { lCol, lOk := fun.GetArgs()[0].(*Column) rCol, rOk := fun.GetArgs()[1].(*Column) - if lOk && rOk && lCol.GetType().Collate == rCol.GetType().Collate { + // TODO: Enable hybrid types in ConstantPropagate. + if lOk && rOk && lCol.GetType().Collate == rCol.GetType().Collate && !lCol.GetType().Hybrid() && !rCol.GetType().Hybrid() { lID := s.getColID(lCol) rID := s.getColID(rCol) s.unionSet.Union(lID, rID) @@ -301,6 +302,10 @@ func (s *propConstSolver) pickNewEQConds(visited []bool) (retMapper map[int]*Con } continue } + // TODO: Enable hybrid types in ConstantPropagate. + if col.GetType().Hybrid() { + continue + } visited[i] = true updated, foreverFalse := s.tryToUpdateEQList(col, con) if foreverFalse { diff --git a/expression/integration_test.go b/expression/integration_test.go index 2dc2d9c0817a4..bb97eb95a1f46 100755 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -7410,3 +7410,22 @@ func (s *testIntegrationSuite) TestIssue11645(c *C) { tk.MustQuery(`SELECT DATE_ADD('0001-01-02 00:00:00', INTERVAL -25 HOUR);`).Check(testkit.Rows("0000-00-00 23:00:00")) tk.MustQuery(`SELECT DATE_ADD('0001-01-02 00:00:00', INTERVAL -8785 HOUR);`).Check(testkit.Rows("0000-00-00 23:00:00")) } + +func (s *testIntegrationSuite) TestIssue20180(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a enum('a', 'b'), b tinyint);") + tk.MustExec("create table t1(c varchar(20));") + tk.MustExec("insert into t values('b', 0);") + tk.MustExec("insert into t1 values('b');") + tk.MustQuery("select * from t, t1 where t.a= t1.c;").Check(testkit.Rows("b 0 b")) + tk.MustQuery("select * from t, t1 where t.b= t1.c;").Check(testkit.Rows("b 0 b")) + tk.MustQuery("select * from t, t1 where t.a = t1.c and t.b= t1.c;").Check(testkit.Rows("b 0 b")) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a enum('a','b'));") + tk.MustExec("insert into t values('b');") + tk.MustQuery("select * from t where a > 1 and a = \"b\";").Check(testkit.Rows("b")) +} From 891f84b9e9d90b18d04b883bb7e608393b0e4b71 Mon Sep 17 00:00:00 2001 From: tiancaiamao Date: Tue, 29 Sep 2020 16:21:16 +0800 Subject: [PATCH 11/16] *: remove an unused file (#20294) --- profile | Bin 22772 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 profile diff --git a/profile b/profile deleted file mode 100644 index 101b4f78def78b59f0758138bde4f712d7d5ba08..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22772 zcmV)jK%u`MiwFP!00004|E&FYd>zHrI1axvyL%ZV8PBfUk!hC=1Y1cY5DAckBw+dz z2m}b}ajvCnSs>{u(iO&}$-Uch@4ffldoQ^6-h1!8yq_~$X7)<x}Z}IDVr15qKobY|b>eJoF|u)A%VWnrZw|cr=`3S7tg4 zxxAp3tuvj!Dn98v8Ir>(c4cP3fD7W2!Cw@g3|FZJ;rN7P4Ve5IJr@Ls_bH}GJhN%598>SVg5lEJqsu&mgP0;=7=cHrxMXQ=TE>B;U{g*OqhO9d^7oZ=bOo&f~Ukc3+8NXitQ}E*FluUpN6Ny zVVkomF!_Y|R)x1?qR%S)8F(ffwkxx$(C2)Yu&uMI?tWI~HrV0s*ql{^?bls&l~PXZ z8mv03Z!c&@Ajx3Wcz*|v)%dgUtUy2wShQatpc2pF!~#w@(rHpb#w^oa+2JcrN^1o3m87GEaP~!P_|BYVhab`EV_pvzl|3C;_Vr3|5cyzVJowzVPLD~i0o8E zP8wsWJPp#q-?KRj!`>@`{%Z1B&Vp<5@5A>6PM?8RyIh=p!--XeQ(n4j>7z+WPluoJ z&{>(;(EoT>?EG>5q`<-B{0H!ZaJpTYeFu)65ucvmKM|jv;2Dq+PPaLG7N*@3<37nR zJL5jdUw{|HxX-~FZ`^-#Vpa7BHrf8pv(I@0bM{?Wepl(jvb;Ti^t%ew^Dwh@ciio# zcvnXPJjGvx7sHu$WmXINTy2L_f11B9=Jzyz30?~S(B`Z*%$z8;Hq3iD^9%DI!H>c( z+nlAr;6)v=%V+qZ$3(kl_$%;A_*J_G`yLFwF231(L}lAx-{Ds+0b=tX!;izSrZM&% z{uB5~_*I*;@59>CGVJJCezb~!dY1nbek#8G0M4`(-=5>Gs)=vU@z>xr@hu%rweOAX zzRTx3jPPClGx%Bf=a%W=ialSecG~y8{{zHz2AmkO5W%14?H%yv`7huX;n!`>UVzzq z1QILp7DPw~;xNvm881jwzX(&>i2-W~VoqbM7JmcY2>;yXEE7r(h>6zb6CDDm&Fes& z@T*p`+ZTJ0W!jv*1a~iW!D*!N2@Xor_?z%%_&atr_Cr`a_s(;+&c4V0ZtLv(JSvv_ zJ^mKF6@EL7vG4PD;GOWhHfKMAK_ixwlfjQV)5zezgkOd~usQoF>|cKDJGQ}I;GGElNPvwJ& zK)~y8?503KI{!H*Ry9w6r2PCBugkSpW^cgt-E)yke(0*5AM(%Ov+&zCXLVrc^_e(> zAMv%$&_Ck8f!~Bbw>f(gy3V@%oNcg|`3eU?FZ18RZ^OT{E3>y?@j`c{^*FJf@?>K* zTxpe;-l+5DTk29ddmAPU5VL=U4|Xv63jaO)UWl-F;9?)~?Nxrn8R%912lzwydv;~^ zE-Y>5O1@%FtQwvOd+hCZ-hKTQxk}F7gF^!Zll+(;b(ZpD{zv$uV3PM?bZ7DHC%l96 z?I-+C@F(%@f8g8R;@eO8Oy}EA`Jds>;@dA_`SjaJs;}|p&bQb2U*Ip{x^`vu0nDFp z8|m?9JWtTU&-h>Aui?7Z(;hOab5s;n-+TXme)+-6%3Le64`J1evA82Y=P!u1Kj#sM zgzMRy{R#&35vt@De6fR!U+^r*5_0t;802mE&p5Fj_vp)V2Kw+0Z(ijm9F^nBipoF#_}2k+)?lB&kye5>Q8fzMY=KQ`dK+MhF+T}X+NaR} zqRL%xKZdKT`ujgf(fmi~f4?iT;5)pjGsSm!9^?uA`zP3U zN#OHc{&%smcX>YKi%)-sv!2#|QAj`6auw3=kAM1eK-2yOZrv3#b(FPLL!Hi#KmCiM zZtKFb+hRKJ@lDR6-s1&OAawp;q2*rDO(ova(KW6?@o3$@$^}KB({2~46+)TbU@YPR zRcH0!$P~d@@AGdpTW7EFe~AUY&tnh^zivI@t*45hv&ZWt&_fmsyx9%0_CI`;GtK|u zg-{sIw5zjhSlX!#a@Q~UGq%A#;AuVki#BZO)>wd2ui7=RpdWdFO z+0ptr4RXJ*p5Tm^p6L8t7eVD1=1XXHLm=o=KHEXir~L2mcVRZ=!MH6g5J8{u$^z@3 z@qfTS!bLV``EY8DSo3dqTj$$v_&?#FV$B6Gd8zpJIiKg4O`r3B!N0_}7@Tb{zJ0)_ zKFWzQ<1h2_3u4jSFRbbw9aM5;x2v43>fTmY5_-#fZ*x`%+lP(A4friziEDV2{g(e5 z{v9qw_5U|$(`_6s;&;MqO=iF2jR`a+__aDKg5E=JqO0|Lj(YqL{3v2Onf;zOAB8$5H>tn-OS6gnIi5M&5Q+cpYIWyW%Ff`%Nof#xLKg4mpW4Fis}Mm+tQU(+~AOJ;xOEeW(F;a2#$5lq}AKL3TEb+GUk-kLyb z_wzTh&3&wu^9l>9yS3)^Ik3EV#?x{2~FT81&R)1FnouZp1MM|kO+ ziO*msjY$*ogLE5MQv#)%#9;OL0BRd7hxbhuebwh(33MglC)1Ky4(~>wo4};Wnh|KS zTr_FGZ==viW(|0E0^Qy3%?Y%bCcf9@tr#b^$yyNDKgGp<*UDl^t|CclN}7@8qy@?S zhi$T!1U4)cO&jnF4*$8fCu`|J0c%Cz&PD+hn6qVhmgA$XM5lkx{fAY>AIN#<4%C{! z(OWJlu0iMCWNirCo$AhU1kTaom3Zl@dp}Ehv^Dv<(Kl^K?msNfQ+esp-VKu;!TBI1 zn5->`?*%!Pgfu8OTbtJH< zOnm>6chLoXe#v_g=;eMdBd}?fKt&#(jgw7gdAv7)-tPBK1a`F*?eqC8=X*ZyL!giQ zy)%K~r-fRn!pAypN*4l?CkjjAP5ui`tY^G^ftI8L=}5{*ZYR>2RP91Mqs2B^R|5Au zfwA4u!LE70y5g4LYJqhlFxAwNrj(fvBq(9Qb-x+rs7`}14?@6GA=Q>}l%88ZYu9@{D z40*2?fk7Jt8e@DrI!-x!8uDH;i}8L0`k`P`=G4n0YTIPJ3ADT?^kN}z=7^s{-k(5! z6hAds9|DuNv_!4)H(tdyScFe-yRamqFH)do@#lDH;~5i)!?vO;ll3ET@3e~U zDjwF7`U!LkIAHw=40Pm6F<-2S{oL*BXE7g4U@-0{_eOlAzq19l$p#P@>h(We3&aox zi2jjWOg50fsuS)s+6Tb~Dqw>MvNUqcAA^(10@#)*Qk zAvi0`@)XRY4kmD>z=jg&;K=4U?_~(tJQ(RCiN*O40z*hRuE>d@K6wBeMqshmvbSuh z5YsRp+GN8Cv{)zhIEP?Pg%MR-o}|~ z2_H&eC=r(7C<6Nr3L#L57YIASjYTGpB%_FDRhw)yfySHN35`k)OlY(>AtemxNOqTc z#t=B-P{CLHB1(f?-T_5a68nk|BQVTCnyY;t9i!~hSOPm8q~-7-IBFH2CB}+T-4VtS znC!9bF3Glvsu?H2O*WpuovQ*Tf8ui-A?A+G#(P4{WD^K%xF#67As>mhQ4(v&hZ7i% z4E;2lNMQSzPKemAd0*s-M*Jdj<2QWkvqD>c%|{RzLBc;wOJx{lG)$+7y@I^%CSv0nLuaH#JYmkV>NGwQznp!WD=Q7zWICZ z7uI9EIxpRKxj~YVl^qqf$P@xA`-mO;2fyKHlz;HC1jf1=VJd;uo=sDR93Z`^DP*dr zm8TIn?xAWLqDqERry;7u2AFI*fd$SM=km_TbyCSs7yBvTIGaJBqi2QX3InJzFCDVv z-6S@H2rKFzivAQT)wK?05@;Kc_|kEi>8cMU$KueCJLW6`{SGK3Zh0jB=q!c6XA`(L zSJ<-uwp8d?JB~_}QxL(N}0TnM3AcfAa_|9WNAeDdE{de3ueFiNGWhe%ZFzd;$kLmWAyQ zYfN}I4D%-A(_{jZk#a1yfWVM00-y=u0~~-Rgij$bg@j*K#PK{bpDZBhw#60_n0l)e z`)o@1VkGGle40vNDhW@+SuG+k<*>-GG$Z^oF{@^TPbV;)glpI)TTGzzw)mdR?;<&X zM~;;jl0{@Ok$DJ{Eg{h4lJM8x=3^0962%hY$0#>0&Q=g8y&&Qruk&9D zvF8eac_S^>Raf=o8x}TTP(M3s$tlmR0>BR*}_y zTh6{EFvIiI-Uv+PTcUbuYY2?-!o#16z)w|P+H6AIq{r5fK=jvSYY9yE!uqWoQ+geN zU0(EW3CfWg-gF*YOV*Jwx0ZX1KCK>#dY0s8`vfSYn%zTB>YR;V9)c`4$rkDd=`OOB>cK?IaLNGT9CS z+ja^bufp$yL|oM+GiwJa2hZ6~0>du%Mj7%a{v0PZuw4Y&_7`FF)`b7cHdq_N|0M9= zn(#RU=8*8`X({-2E`hluJdfBG+fCr`w3)b)wnQX$(^BwhK7siz#2x~h2hKu>c7)${ z*4mEn1q2q5@aMM0_7eE^q5x=5_*h3TwI_TbfrSphJ_09q&Bh)&5Z=OZXgUzSh`=HQ zFxh?r7q5w#|A?X_E#5q9J_y=bP z3ACQ&LjFlWy4bCBFaTMT9U{=UV+)*ZN5Z#IJH*NeU+);r9Z?j*V#lj4BYX*gC5{Jp zn84n{?)HsFuQ`<+A#ibypaBtG46#mxcX1?9C&HH!SW3cmZO)DoII6_(6;e4w4wJ`^ zkfTJ6LR;(@fqT3A;S4(y-UNG0!KY;emXYvs+|}a*E}Rj&+J*4e2$76WD+sJWj9csk zfulDCKv%+dIRIS=UrAsk0&sScK>v0!!K6&;7&%T(kdr|_juwNvCYIYSat z7#2HAp!GSijvj<}cKYl=_!3|1 zwR;l25N)+8d=l<{3cgrNU@Zx+Q-#)fQZ6iKvI_*3FA(ai4xf)Uj+E#Z$mhO~%-KZ( zgO`aYXmydLPW5yozerMR29otAyF{SV4OgS03{-l(6ws!Z32bU9gkd#48iipMZ_!DY ziJ#Hu>TZ+WA~0Aj5Ks_tONF~lp!?W3>mh8D-6JsF%ka&{5vqGLu5_2&Q?$Ir?h`ocv(hpYCKfBDFsA21L|$(q z45YMVeA+-@gTp+HDNLItq>Za_WQgXz!#t(5PGg#&b(&B(eZUo4$8jW+HKlNVhMRF+ zjuaqwwF!N^DGj7lP1cOUtw6|HIvveuz*J~XVME7-sIh*+OE|Hf@K9W(Ijv}JEhwz% zFZAv!ybbb@B*hl=9ZVET9o>?`VsG`r_Nw89njUKzTr+2_C`?jCo9wm~4H!ojYfYhj zK)hg}k-`QqTfGi9QVNvTv<)qX;idFG8<58K1Tg z*h0cTR}9>?vreP_W*aP(_aVY^=uh}o0$WM=6D4s{ zM%A2mD(ggH#auyp0|KA+tlBnFmwWNzmbtdadq4^*;ebW}VOWJx}X+krT>xs#c82dQzC|8Hu;iNR&=XPpZ^vnXDIu76Io=Dxh9erjJe5o5F*1EY_Dovn?Hwo(2)#j0-{x?Ui#LB|nG-~vv{is8&{i#}lXR!ekTC5iYKbY_<7-USwr=0|L zIuWaZ6nZNqYYGhypaZEaKjUl=g|#PLP55#E3xlYV#IDH(Q@HIomO}_{hUQ^&;uOpz zV_)o$!wr$R9?B(-CwewM{mP!sVmx7B)dBDHump z#3rLDY`*TY2^Nx6Q%}2`85T7ru6#0Q`no|H0<$Re-R~0Ne6&d=5zeCJ zd?}O7rqFo5Ffurwi|i=jXH(Ov@(Gp}`TeJ|ITV)c5ei}i;Ts(HV6NjHj39g;fqm#6 z*wxuw3TKxD2n>b0R6cX)OLJ*Zy3eDqdWkSX32%x9kCfT-sH_A}W%DU)Lc=?RF3BQi z66m8Sv^JlXH^3KAxZg>5wq=ChL)w%Pxdrqvsaj1+ag!~i(AX3A3C_ksMckWg5ry^N zifHFZf>f85f)D!%>?h#^#5UPt3eC#gvW&xz3TtNxg&iBkR7Meg-?0@&p;rh8iKw7n zN}=sa*Dv~ls|m+NbTN(Q)?Y%GQgo1>^4uB42pB{YT#scGj&%{kl@Y!glYmmoE)y$P zRpN3A2TJ>3w0<<<&fbnD{1AadBz&0IHQ5RZOMD&BT+so_9G24+LBYS0!uSWc7%QnC zxir}-3hO*!g{cIk&sB7is*Fswn!3HVH@J)JCZ*k4ni#J!**XfdX1cz`3N#_4;kGX5SfsM`6uRvb zoHmf~MUE+U3A52A+d!d1pc^^rdis+M!JaI(k-}8pitiU>@{P0tlYJ9~acV`r!nT`e z&;wG8pb1iISyjATe^SX;zVzU1GljAyLO%bIrv%Gli&x|QZM$lXS_(;ajt-9=xQlGI|mDV$#C8E+R+ESPK$g*AhPG=7D5 zK?x}30KA6xK`=i73jeiz;q6SzFb| zYSlwhCs^z-g~Oda5!W0=oaC*;DB^rUX>)dj!XVYKkf29`qLZ_u6b7l9UV0oysiH49 zJ4T_YmkIbv*is(Sj?q6S$m8P_+W3=A&_u^^vP#b;J3*oO1A;gwq~xs0PEt7Li{+N8 zSgz)omMJF{p=^O~?r})1saBwx>=cCwcLfLjiH~v$mrql;>lrd%h!aI;D2&`EcFCzH zuEaYc3&>i+Q}i@F<4IWSN$%E}h*JD2<%20_DID?qyHXUV(i%Bi-h?*UISTiNy8Kd# z{318ma>HQ5CUy;iu^%0NT7xgOc)?&b1Y70p}7%@S=k~VwWk*Iw>5U352(F^z#J5PZBsu z!ly70a)m;(4FX~!;m0u}k%CXB37mEzu2Q&qR6tB3{9K5mfSpA683Jcqh-(xko$7&- zaWdg&sDPME_*nvHN%$O&a-Bkxrb2*CA$+LQ#}stI;JgcQgTm-RJ+Y6egb#5brV@UE zzy%lLCWUFc+9Jd>!nZl2Oe6dvfr})32}ij_q19C(!lx5H&Jp3$3BOF>GKz4M-KJ1> z$dy)mP+EP@!>@grUZGd%HF}-q-k>+>EqXiADpc$sWC5inT+ylAw0<0{P+{05?2CL+xHl^ip*XN7JJ<1VP_l|`1jguR-Uj89hxTp{64u)OL*i60}rf4JO@I{6=ZiH`e@B+7@e~!KqbkkQ8PTegG?*lkw>)fvY4O z$H1%xtvU#m{{f$d0$%on)y++`rkYdD(M*Gd=S4BfEW%$G{{Ae&uMxQB1O%FEaP-nZ z9BDS;%PR@eo=y050@q3S1{%06G?;mRI6}<90G=IUa|v%p1>_vUZxXmk!ncrV<`RCJ zz-M0D+T1#|++Gw!G_iW;vShmMnscp1?7ozwN7Hg})#S=HM_Fy65{T=6K zA>mCZG@+u(yPXDi9JNu2<9Q;P=aS03t=3NSEc2SIy#_OU)fVtLq&{x1DS;-=I%sgr zOA!BD#MnA&FgB39kl}$2+C9?I9}ifW2IHLQlbdw^DW8t1Z;J#NJuoO*r=}DDo zoiw#N-DI6Lxa#>bc&y1v^)`xk*6tAj?s2Qhx@d6GgPwtURGRf&JZJ^Hs|F)I@UG6} zx@oY~n;f27QpW45-6P#_dUCw(8tn5rnB;WOLxWykWLXs8OZn1WyGMFp2TBh;HMr{3 zCOBszG4QIvCNCc{3-KdWa!;)nws$9>Ag-q-sw(_1GI`Nc6Qv7Y6DeqXgm|AMu6)e; z-(<6K)Ln$vtcoWKlaf{EzqbY*I^M+d_0_mY14_8wnx~5unV($UduemlM}srUfnCK# z>Z3jDd%70ut3mhsEm2x5B76n%NeVtSrO=dO)xXL5Y0!O&s7tHMkE1;#3rG8E{WPqz zOlAEwI5$>k$i;-u!m_$0guf}o-eSU=QD{a*UELDGn^R~`!!KjCsRmcNxWaUw<17u- zU|LflfR_^91nr7sd}=|V1r5h-iw)A?)Pi0(-(`f)#QaJMKDDIKlHwT$iw)Lb-M-cc zv7GQ34#aZ8TTy7`LJZMhT3-RNg7DqWnpY6snnG(DZbNO04b|Ytf>zkaO2WIL!cW1c zwiMb5tXOQA2E8T-h*gBQ!HK5eQ#%UnT!`TsjGf&J`&do*0mlGZO?Z0>?Oliw8k{}j znK$dO?#E&yHJI05^!qJw?j%UTrw$Z4xUEKMux^IvcMS$agg#h9ct;8yU5L>dYzpkD zmFCIJXNJ-K+5l~!Hb@(+4bg^b!?fYr2yLV`N*gUU6Hi5q(O`vFyC+#0~S2o4%TtNvU7&}l-j<@wnEu{}LPD>1OD-4bJ!ZQ(4(hQe% zxE7nL!TlwI^wtr+I7zVgI>I|q=;Z9RYtu2LvZ-3`7j`O}rop`n9Wan5j?stMdcubx zHzeazX9}HZ_*L6t(=})_NA$e`E2RWkZy>x2g)TJQ6`68|2G`CBg4>8?UIJnx;oT^7 zb0KDGu)yhK6Z#?oViV!rDRg%sW@*r2X?JAG&4kZ(yn)Sx_n^>&hI`^Dvo+X1Pe5!T zd>>9U1)q9R=;cDp(O^i+GVEh3;Y+FtENmsbH-+A~6qC)>VD%k0wKfZn4M~&b`C7Hp zrfJi)8QM&3mNr|Pqs`UYQLOef**pyRvjrMVy6PIbuLZ&}^R)$vscW)@8XU#=HS*vz+*_mxTQ68MSc5Ir;CA_gor)@6tf_?%iiY~9I9uhQg|j6Z48qF>be1Nn zbEGa`q6Loj*JMjIxZ6SO%yz=NRds4{J3D#&?S%KE(2s^+RgMlU)e!ZZEz{tL=N>;B zh?^|a0;v?mV3W>u~Z1MtjcrS(myFv>%LMB_OLDS31%?(x!m&TGFTN%Wr$yRAF!ONnx zM5k7oFsrn39@}aSE_U@|U)xaFCgN;MS~5Nj zqA&;nI9sp5eYIjhI>c+Vwc0vuy`~)cOl2E1xQ1+QRC4Vjya&36 zz5I%S@>bTL8&O{X==pLVp}x0 za8U$c#t@EyH!RlG;Hnq96(<#Bzgx7eTF_D3romZ14A~3mMnZ2Bvye8i#kOlOvcEUy z=0WJ~3DBJF&|sg(*5BcHFNPZ(OWC0*i<3V5QTltO&V8o_eY^rUQMM>Ah1#k4Hnhoh zY0$$vDZd>Bw~WK=(v)(7-5MO~B+dlxC;UsHo%RzxoWgJ#9zikmuEFkGZa`-%b|meh z-P#`6mBsdIaBbF1)In_>Bha?kJ`Ju7nuYJ$5q`rlC--YG%rCt}Q&%ZT+pF!#0jwUj+$&P6- ze!xQHxF7R<*t49-F)gv`)nvytn5f<)Vm;w;dX?i^MWu8nG}zch^#2oHhW*RhyAu^Q zIH|!TKmYVmApd$&+e*FUtI$s#j!kw-gKpjw!RMPU?mYSzzaaAA=kz(a%|M|G8gPopV}b|KDc zFlxLY$HRm#Mn5bWpTdz^^2APZT05hi)y`>Ha$YXy zd0vCj=e?#qv8i0;d2EtM(k8p0!8I%zKteqbw0AD}lG#I}#V%^l**{%y24e_P`(4x$ zP7YY?k_K&BHb&mt>xe;xE-wWHud?lU0f;c2l!{)9UDn`~FA&`%lB+`=yNn{vkJnq0 z*PJn=q%_$T4SEH%w+u9035Z~mUDaT-cYO(#<|y*(s^-hXn(UeeOCJ;fb7z=xEf@g1 zuEE{oZsEv_LIS&ahm`A@QqpCz8yej4Wn+E-@`ft+wkz8bx-WPF-TVX_0zOow3$1;9xnY5~%c@o5T$DG0z>a~-;?wMkM^Hqo2v z&GhCv)+f2ls(4o@7DcifZ(2$SY_`y0foCO)w6;_nEp*jNwrX(qK9$F&&;(Pvr4EN4 zfT@-Vm{KM!vN|sXQyiSN(&40X8JB|eR(isvSte_(!^}bM7PZGMlC0m_9oR*O{UmFn zLz6jo@V>57gx9up_C5ZMe-m3WzMM*7DtZ-?hTG_hURMObC2295tgQ~myaP64kf~(- zd|O>L>o{wt!=_^{LkrSSNQ|z(!x^IGFG+l@D-p!B& zfg{ZwbT3TLSw|guA*1Tf6^Tl39rdtx*`f7}Hy>4(1`}+ltW1Z|7^ezh`O6Yn{-Ur_ znJ(j17@61MLclzcwq+-MwGv+HtV5ZX+8%=WD~Za^`f2Ltt8I&Q(V$o!RiN1x>#M`&YaUAQ`Vd8$^sN9Tob}UT&kc81u#i`J4*k?!u~>f{29|jq zl<@kbbnTDykx*MDF?&TTS7&ds0XmFZCc>nIx6=ca1p{ah2i@V|pq{R$Qa z24slI2I+A3ntRmhDhfTxOoJ4`V6wqF%u-K5D}4^u)ichV4bfr3W%tPDOI($@Lv$?X z4VYp>b-1!lIJbidzk~r;xn)CjwRqiR!*rOs*JYx!NRm=Q57UE!#bm>E7{6O!trEw> z>6W~`Vz@8LYO)bJv@H`^@Y95EK_7n{5j7@h$@pd(g=whRCA*H$6H2@|8>z#1m0cwc zM=D&H%0}rhqC#2sxF;#CQF>zN7%!U8;kXl5|EV}rWt(h_4ijdGvna1QflN8U(fSx& zjr!Rp8>>UNfCDHIH&ze0e@Z_C^&lhL78|ESx3g++RMGI`^aX14hO_ZHjPvZ$$GK`= zjn}>4lc!Ltvk5wMn;R%$hz6YE399n6Ih&|Md#^|dPpeB$a-v?CU{Iy9Nje;?kTWD> zPSVk$`DFle7MrZYp+lY+Jcs&NGV5f$oDT$SiVlmNRH>U$e%`4nmZs1YT?C@sr7Pra zCn)WyI&AX%ehhmlYn+PY9ssY&rs;6>v^aN|5Uo;%n5N%QJeX8AU56cQL{0MuFmV&QmP?+IH_-E))*0MF~hI53UMz=EspJq~+=|arZq4hkG zX*iGB0}+Qgk6BBYg%FCTkTqSOq0iKFzffrMJf5lyxGuAFDDw({MH!Biy}Pwpx*q`I zY_<-UJ*q=*NN(3`eM91r35(6qVei&XxReWokF70Meu41W6lPoD=W@Z)EGoG4; zIr?0Eo<3hcmv}#a+596 zVdFI~p8I?tCcj8`qOmTO+MF%cp>4&s^x_0tTA63ha%T~gokQX14`8t+It=cq9{p74 zeTiNtAwXfWr8-=}q=mtp8ou6R>c= zxgASM(tN99je*J5=&;1QDN0;UCBv+1bT50VaBo-QUO8u*Y^@I0F{6r^R7_GUY1*}V zbAp%FD0gW&7r}{KsvN_)*$Q9UoUPO0vKLv#gpt&$>-69IArq6W*P*L7=bmWV%Kp~- zbGDyh8+7QmPUJQ(6Mn0b9b#8dHi-+XZaRt9%Y@IPFpq}gY03C?K85)-{G`(31|9DK z6f-p0Mjd84c&*EaqHd_{?Nn%^?!s)+;pS}-N_m!#uEN#8&nDeD?$;5c5u0@w;YT7r z3`8O~>s!4@WTKI?MThl)l3^(qm(wk}7eEhW`Ees`i*40m!jyJM+gAx6=!od6csB+t zpy7oWLfNLnsLeu%Un6{%Bl@lpzKFsi7h<~(S0{=-t`l)EBP|7=7E@R(AWXJHhjAN4 zsI-JnO%~-@J9Sv*U2TJx*(*G?Ro|xPZr69{JN>EKz;@|yz`022D}Eu6*?0Zjq+Md4 z#llRsTZb85t-@h6C}p|8Zb^qOOSxo?O_el1mKC_KTsV>D$5%FIdvv%nx-}l~{ujps z-j!S+wnukz$a{6@GP5nB_6Ffo9K_uqdNOU9>_6jnNQJO_0+w^Z!k zI>I-iY_ZrO9c~X4jqVV>(4qP}gs-Bo%4u|1hfV8+K)Or#HJp1gKCPy(nufn?o9u`V zXXgu(;h+2*25p~mm!DK^uf9*;uOHA4>bZyX!}<~3z1Qie4yRQ6Rbl0$ipO4q9n)b4 zV%A{yh&U?zFJ35YhkJy7OW|7@K7cm@I(x#733Qw6xDHpAh&bfue9B{j#!l#PW3D?% zytzu@3k*#j*H1WetHDm{(9F5P=6&UEWJQ;pOf;rX>2SA;@Io^9K}3Kwy-(>seeGvT zkjJ*zX&nys6ZkA6{1B3(bZ$?hFPMu>hohyUX!}0a90{A`KH+O9tf67Nv+Ar4 zTds&B@}*Q56lux$w3fnJ1W0A)bXZc+l`IOYozc(g=k(wKb(5XfVbnGut)hHT@Ram< zT{%o|vI{!gaO~XYc?ZOTbSW0TF;ZjnKfjM~7nvMGT@D;U5dH&lT*I?&ymHpm%kcYzLCy(KU$0#|Y)WFhfA6k)v zla*8YCTncK{<|V};a(vxO`67rr_^neH8Eg`cdf}m+)O!C6C=@9XllStFCc=y)}xGm zL2qhQaxUS=t4<8)?Zr)pp+O}hGR+J%ZpvA60|t7b9<{nsafsyNdyhF9^-z-pOTs*F(gUos7;#Kle6|E=Ho~%~@9imV2ip zF+?ht+0_VybxhXHfNg=Bs-!IFW_VIm=>UUkN`%#9-3{2}W#tztpb{?j{$p!yT5TX5v>ae*~d_Rm&js$ z4VW@U#Qa-Pe&12Cttj6{VH*u^NA=jxfRoESA<-xhwd`y3D<>xU8_>0@=&&^v_bQ~N z;L{EYJKPQj7%(azb)+2WZwzod^hi;WpIfx8T0)+{w%9-eMvhmr+6r9{G@O*R-u!9ZXgRQ}a7;qo+>3ZN-AQZ|RViYU# zPeFt%4)NzMjb!wGorJTzbY0ouv-ENK+i;u>HQ=suvGJdEL4485@%z!}$Zagb~ zMli-PLEzH&qr-e{XtUu4Y;mlJHk9|!?GP4>Z&r?0r6uE+ofLM`a9mMZ!-cY!21!je z!hoZGaYsw+!o5>RLDF(M!U&vi8fn0IFVOu#z#<)KsMoTNGT>I{UZ~#o6Z|y?1xKR{ zB}8hn(FSY_+)OI<`)IL6UeM2EV+^=`P1G&@Nc?1#LT+P>auK<)26PA{C8SszYXk!8 zoQ*SJR>ilIjWZHlsPP80IOK(3MfCS6Z>Oq_7aAu)z$)bmNIh;9SAt%a6xLchT>j=! zi%l@#f^(LxjPUI!IAxOwM)@Yn)~|9^jE6{zO*CLnFu^QMhKY$%Kr-$UE#b$#6qGuL z|CKQ&8PIZ`P)Kd5IJ1U#oKn7v!Y;R7Y_b9C{ED(;C@N*y{3K(t;T3}gRHx0^6ayyW z4?^jzj!@q+6gS2A{I~AUH`ykeYQTE0%s||1C;_J$k`qie&44}X<+%#6PBWzTQcMeF zBZH^rryJ1J(~pC3N%CODbg3WZZG8iEMPOKLh5@a2h_aq`ly{>d+R%>j-4u4y@E(lB z%rxM_Desu02;WO9yO}Y=m}$rpj3%39z$UMt^$^DTrMjM_n(ng==&k+|m9nw3jld5| znQV>$Gj4T7)_%{aS(SaxF_hfLTmz;L6u;8*TmF0DrnmyF@?7I@;um#1f&@0tfW>_} zp&I@huVRPrrmRw|-!-{=cAxbo^NbNxTzNI$fYx4VjBp(!@y>U9_PSPxcZ+w&u=mY2 z*#ZOld0~T=xLXpJ3yeV6K>4zX)(c1vi!C&uyPqo3HQ;hfagRzW7P%>cda0q_O=z)Y28{QMkoVxcj*>p)7bGYYjS^O|G%dEmfNeLuTi3-^HIfci7-(Ed zX;G7{G@yf1PvNQHGUC)#$W|+bwyo|N#5QNE3^;bqy^jHXQ)v~hG890Qtu|n^_Y=A| z5p|L_Rx47=V&59D!1Edd#^EghI;(^~A*YkL@9uPOQ`12GsZ3T*L?AZ1PK45dgam9016 zyr1x1hf zQ*|NYx&*w~+<;-8sUh4-X((?pHYXSx3h6ni%>;fo*%kwOju0Wow?v`7vW8RiG~HtS z&WU#@!*Kio$zaqZm2EX(J!UIHxK%9!Tjk!Nm$X%G6=&NFXno6-Gp`GC%?%YLZ~oKw?l9oGfBY0P42nwFZtO7nX(IiU%61xX zqpb)TcBFhvviO0%4$cYEj+F1Guph(zCfjAe#dRWJUPdvgk(PoFc?(?JRc1%%1=7_fbV$TH^e>B+)H-fO^sg<>Hq3GebKS5L|AGzN+jvb&7k z#vWs@F)ZPf>^~GM53dVwt+>a1YhNkoaenv&PgRNhmED9}ZDCM8n^+O?D79ln4&ij+GCm!naLno9m;v8*7a5ul_(jCO3_2e+ju=OcV+w+T z9B8uR2FwfO^SHMIj~q9ei(QvitcvMiN5D=P(BJ8(m@mZBWDWRk#ER_igwvmGvXcf3 zUnYpT9MDM@2-qnDCOSXG6ZqvLg&Iy7m@XF7AV5XJ7d6PZ+2&bOE}}s&SYl|SnXw}ucG_%n1?sxtbyZO?3@AXMv3Z|?vzh> zR>Zx!Q+}MnaVp|o=MC7=w<9W^9+YGDDlHkGPEa`E09-Jj-x!f&>`D2=WFe}0Qht)c zNg8giDk%JEo^!@|EJVj)B7Ra%;qhz60}j$$H{j$^(WpJ)_w+!V_qwsK z+|NCj?1lk11KCSS7B`GJWG+#xPQM<%g3ITsvzrE7E`K|xROdGX>fB_v3~1V09H4S4 z4HZFo%Sfp9ziq&*rp?eRdC4gql|uEl5x7p&Vs{MKu+FRU6cj6c+&jh|`s;U9bJk>c z4QT8oHZ~(;NJH~kC05&qI)wZXfXO!|^pEOYpn;<9PND52!9DLBf<0KdDh_V1z!ai%&pU^v>WP7E zlXVF}YtQ<}^GpikbqNL8LfMQ1)Mxfo)-?ouka)#Ol4Xj^Bc0o>ppEI=L-M zvA)!;-t0^HISS|8Ql0K0m~=uYpeB@G!L`)(xHjA^)ID?~kTG{;lRqny^$5XQFZ&`= zv~o5*LXQXI8o+vnptn;g<5mwl_faWAu4hP`!1YJqtXBx;R$P7FE9Ao|tl_D&_7r~? zL6L|9^>V+zWU<~M*xTK!$`tqXN{Z?os*|uaQeNOe1L?z?tWOA9`QGNw$ zn^N%UJcaY_nZN-d=j&Y{Bj`HHZYW0vx;;n6r8j% z)E__rHYf!BE81x~SlhaHuLt<-`?7fm)Q1WmkaN6U$bHeASG_CWhd?UlKP3Cnkez6GMB5xM3n8X=AcUA(%5*#1Y&}rsZx<3W*vJABz^7 z9D*^^)E_NVCN()!{wK*yHYEgi&x?#d@OJ2f%I>Fxf_lwjQ$uhmP&8s&Y+4BR1xt8j zM0{#!TByPzp6Maj;a|Ogg=KP2r-#a&iLls=5cHcVJb<=%AuU&{31>V2brzc$f+IVH zQQe=4_+(lNK3$-2K^WB*n-zks`vt@RDgtn6Dfo1e!bJgLve_XRG)x2_HGUfRO!CLf z(5%qxkZy$VQcuk9grJwF?y%%mVTCzT-6KV>pC-}-gVbb8La?-jpyF!03sSN4gqBEi$Ye`H(DMPLwlqL$7F!mA z-Vch#E<-gVqp_SV55bE4ZjA;WX_8k=Eca9drjtU@$4|_xM081IvLfVXUX(ptPl5-` zR)%1mV~YLMxsz3Puu|G!w#in7V1w_*H3+zIs}wg*VM?^ZJ!IP!TOEQ6bA`z|kn+V? z#G8yymndAK;h)Q5J zBgK*Y!n|m4rjZpZDTrrkS%oE;$pw*uSaCEfR*+qs`D{*Zyg^C5^sHEZMoug*Qjn8T zTo5TNY?K}?$ckl0>*U3G+8HIqMHzXqtVmu)PE2f`3zhMefXXS??oDj>as^AyY7ou( zGQCz>aY=4G`bi0*wsv~`SkafbzV!Mf1z9g<)~k3y6>x_~2clPpJ6`xf<_noGypZ`~ z<_j;s`07iq{NP8?7c*aY@x|<{S6<9?0CB$mtGVH7+_~}Mcu{UaPH}opwBXaiyxe%^ zv*oel!gzmiyh`r7bf?jq6`-afMsmp6+4rK_>Geu-^RjXO1vS6+-*u8Ls3`Nr|FE?) zI1ZZmOi@WeJU1V?EWfxgQcyd+C|W-^FYk@Kyja$ok*o&Mim>4}%E+&m8_$mvA~v3K z-=){`Ix2{M9e<}N8hyVYJNk8IP33zQ_FLNr$ox)ue7n#Y`4OB54#3@4>CUU1tV~nB z`7&CZmzx!>kglBtW(WZ6=tut{mK>xamfkO@AIp5w?Fbi`6)lJtMe=H=iCD%o*UKjBc$$J=ziW?Qz&5jf`%q_tASza&2(fmlm z$d}QQ!p!eJ7{9R!+ZM#)(f@bbyja$kb#WLR_c4Fmc%&$vpZUy#$1E(02}Y^pHNl|@ zC1#7F#nE_iVQxV|Zb6P@r{b&z(d?4Es2r*=QuJlreA!RFP#F&)@JMmKZ@Hw_YZQ+b zr`JlWQ&PV^3b@}E<;J6R8pWd(&0PRz)Dxh%+!Sx_IkB8rW@Yccx}P>Gjz{w)`R5eH z8VZKae9ECAXT;pRY=6j4g;cLddbuq!gy@ZmabJB9okhPER!?rMApPyH3-cnm1#kOr zKFBSOXFk9Xdzd*S`@~1=6PNzZgQkGW+%s3wYo&eMpiyydRwVD^q8RdjZmgiTH#H%r z{tNp|9YLMa&;P?IVfzZ~Gr50Rv4XEYd-;bC6VJKzBUwVkJ~d)r# z@pq%bwy&N3Mpjm|xcDPSGk^TQM}XRSMj>`xHzyjei?0zCuRmxX5a5B6as`)=4poZd zMaYfm@1U;{e%iB}|;U9gP$@tDv<4`h)9H_;ITXf*vQmL1JZ_4ZB(Ys7av=8$A& zb^OmEOh-|;h<{pHW6<4p46^^Lr|tCO>#GM3=UB-P67UtcE`ixPuFH$=aJYH@sdbJM z^n7gv7oKDEANUlx1-XJA(WOAstRP-2RcBG8p}PZBz0L7o#q$djwydb6KyFzzXUl#S zZRjX>cgK7+ogc~mDz`W)Ulcbg{!MW-`(b8Qel%Y(vs5pkK@=sSu>XaHU6Ie~+V%n# zar}h)290#3z3b^!ulTDp?(!lq}`m;r~T(X~yK| zM+y~Ft5LDjd8*e*LA2o;MbU!Dhml6VDL}(d_FoWf_+g~z%R0I7;!MkBM|7+6BXRMr z0!z?^%E)z@)^uOE%>I5sJTt|6^?p1Sd9bcc;A$s2_5NFO;^=G$S@)tZbiFm@NAvSz zUqv%(msct-2yQxp_)O@xZ4fEQj@7SUk=5q6&B#yP_F_9-mbUJ}Tc(XXNfak=1v zXuL3%mzVjy2P4Y`sr%*MN}?q~lK8GLHpLlARz{z!sHC7EQZG-DzPLOjn)KXaTvRk$ zl6+24w2|vcN*^aHQjqu#J9hP&KfH_EU#ZJ}qqsP?I4;Qi#Z2Ee$x+`6Y`!QRlfXy$ z8u?dDx*oDX1$$js5`QOB9G9XEqqZ2#cu`K#dzD%1!3f4CbqiwkBVP*a`ZOS^Dqhs+ z-KaBIHJ1+M^f1mL*|so}BV_K^AJtdUB20@qk)op9s6q`Garvme+&#}l{W3Q%Ptu6v zt9~dKRag{_<`>34eAuF#Ms-UHa=%8w=e|@L}F!eomHv!D;!VB;Mep zd{Iy@N4%4)Q<5Mcb7I9=4Wila=NINl5^!JE&Ch&P#PjRLvKwW-P=4=o3*ynDf=Hh5 zKqGlM83NiDNPcYZO_3CM_rKG_Innrsk+0vuIK&6hf}D5*Nj++aV~^)~GQ0UmDyW6-J99@mP^CZh3K|f-H2AW3fEpAXenqAWTMKEGEKn0$ByohGN#y zBF8KGRIIGNN>oMBEcB@!W`h()3UY-GaIGZY`*6jb7^;?oPfT3+q#w!pTS;zFH2+~d z<8tbY$TL1|qG|WFH=Lv$A!;PSa_Gz29fNqFt7Kv6Trxg6`&O+8=)6~ zX%4h@GAkE1D#i(=yMHq)xeu;<&n(ZeE?S@oe zQKGGxUMuaLw`#k+q?MmY063$5w%-kpp{M^&ZeG+=Qx&)`vEmHGU4@ODi9b<(NVgHL zG6{8)kmU6<|I2J#BwOU--it=EqeYUy(S9z9#T2_eR@?~Hx}N_gE6a$4(6v>kpkB&D=dkB94~s9u0WZCjSwZi=th&i zo|vPrnekQqqDX!e-6_?^!w*gmPVzR;iTEIw;$g-Gm7Nzx3O=g;n}RG%4o9;ypO!0f zF@s7F{d%{hpS8doO(W?!U=rigtjJf`9mlKVD;zTxtS@Wy^ zV;nInmY*Lh5J5yIaqZ4AJ-aAYn3I|6Sd+-0b-Wm{8`<#%Sgo|&c%^^Kr8kA%=Z#$B9*7-0ETbRuZrOD(+h<%8UdP+FiVB0DFeC`?j*vK)kJ=AOF-d!d*Mr zg?c0_zfi2N0%=`teOa;m!bp)^-Ov6*Zv%J28Sk^0mlb;dt^X$O9t`(nN9FK|D_+6qQlAT>t{8{W%fyzdL8r3D>s$Gf|i+2E&I-Ep+ z&tgs;&hzC(rHeD%k({VsB;wOhJz=bW+90=nTu#qV6UOqg9e`pVj+&e$^4GT`?b=dUpr}}+UcJ*h!tU-+gs7B-26yhMMjF-EF=1LygufhaOSc&Nqas*@MLiT$CT*%{iQ3)2FxI$4HDVXlf7CVJ9r=n^3 zh^*&?=x|!E|A%6@Tvbe79!{+wp6Od3$mVE2BGDDg*M7_l6E>iZTW#mP--~=- zSQ3|{=!fDny->W5Ck$H@Wuq-7!E2?}&;43ZUTuc}ALd5od$5QQg+^UA6Wx4>t8zKf zAK7(Jv1ZUWjq1mW-i~B7_*lx^oGf&N-ij4?K@vrlcpfKTNNcCen?$1v$07Y+VI$^n_MkNFrR+v=~clh@| z)!9-{e-@Ma`{7Fyi;QNbJ#4lP%x5uozJ4C9u%upIZdN3o8!M<2DJsg17XAMN00960 LImXdKM#KOBC_wRW From 21549e46c6612dfb173bc9ee7395ae59b97a43d7 Mon Sep 17 00:00:00 2001 From: Calvin Weng Date: Tue, 29 Sep 2020 19:00:27 +0800 Subject: [PATCH 12/16] README: update contributing guide (#20262) --- CONTRIBUTING.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d3d19f9a12378..24ccc1707deab 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,3 @@ -# Contribution Guide +# Contributing Guide -See the [Contribution Guide](https://github.com/pingcap/community/blob/master/CONTRIBUTING.md) in the -[community](https://github.com/pingcap/community) repo. +See the [Contributing Guide](https://github.com/pingcap/community/blob/master/contributors/README.md) in the [community](https://github.com/pingcap/community) repository. \ No newline at end of file From 170e2fec92d245330ef5f789f38c8499d2ac9e20 Mon Sep 17 00:00:00 2001 From: lysu Date: Tue, 29 Sep 2020 21:25:56 +0800 Subject: [PATCH 13/16] *: collect multiple partitions and store in tidb (part II) (#19899) --- distsql/request_builder.go | 89 +++++-- domain/domainctx.go | 14 -- executor/analyze.go | 86 +++---- executor/analyze_test.go | 113 ++++----- executor/builder.go | 10 +- executor/distsql.go | 2 +- executor/index_merge_reader.go | 2 +- executor/show_stats_test.go | 51 ++-- planner/core/integration_test.go | 41 ++-- planner/core/planbuilder.go | 86 ++++++- planner/core/testdata/analyze_suite_out.json | 2 +- session/session.go | 13 + sessionctx/context.go | 4 + sessionctx/variable/session.go | 7 +- statistics/handle/ddl_test.go | 127 +++++----- statistics/handle/gc_test.go | 53 +++-- statistics/handle/handle.go | 35 ++- statistics/handle/update.go | 18 +- statistics/handle/update_test.go | 235 ++++++++++--------- util/mock/context.go | 5 + util/testkit/testkit.go | 8 + 21 files changed, 576 insertions(+), 425 deletions(-) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index dcae06ea677c0..944c56bd5200d 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -48,6 +48,15 @@ func (builder *RequestBuilder) SetMemTracker(tracker *memory.Tracker) *RequestBu return builder } +// SetTableRangesForTables sets "KeyRanges" for "kv.Request" by converting multiples "tableRanges" +// to "KeyRanges" firstly. +func (builder *RequestBuilder) SetTableRangesForTables(tids []int64, tableRanges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder { + if builder.err == nil { + builder.Request.KeyRanges = TablesRangesToKVRanges(tids, tableRanges, fb) + } + return builder +} + // SetTableRanges sets "KeyRanges" for "kv.Request" by converting "tableRanges" // to "KeyRanges" firstly. func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder { @@ -66,9 +75,27 @@ func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, return builder } +// SetIndexRangesForTables sets "KeyRanges" for "kv.Request" by converting multiple indexes range +// "ranges" to "KeyRanges" firstly. +func (builder *RequestBuilder) SetIndexRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) *RequestBuilder { + if builder.err == nil { + builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil) + } + return builder +} + // SetCommonHandleRanges sets "KeyRanges" for "kv.Request" by converting common handle range // "ranges" to "KeyRanges" firstly. func (builder *RequestBuilder) SetCommonHandleRanges(sc *stmtctx.StatementContext, tid int64, ranges []*ranger.Range) *RequestBuilder { + if builder.err == nil { + builder.Request.KeyRanges, builder.err = CommonHandleRangesToKVRanges(sc, []int64{tid}, ranges) + } + return builder +} + +// SetCommonHandleRangesForTables sets "KeyRanges" for "kv.Request" by converting common handle range +// "ranges" to "KeyRanges" firstly for multiple tables. +func (builder *RequestBuilder) SetCommonHandleRangesForTables(sc *stmtctx.StatementContext, tid []int64, ranges []*ranger.Range) *RequestBuilder { if builder.err == nil { builder.Request.KeyRanges, builder.err = CommonHandleRangesToKVRanges(sc, tid, ranges) } @@ -222,8 +249,13 @@ func (builder *RequestBuilder) SetConcurrency(concurrency int) *RequestBuilder { // TableRangesToKVRanges converts table ranges to "KeyRange". func TableRangesToKVRanges(tid int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange { + return TablesRangesToKVRanges([]int64{tid}, ranges, fb) +} + +// TablesRangesToKVRanges converts table ranges to "KeyRange". +func TablesRangesToKVRanges(tids []int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange { if fb == nil || fb.Hist == nil { - return tableRangesToKVRangesWithoutSplit(tid, ranges) + return tableRangesToKVRangesWithoutSplit(tids, ranges) } krs := make([]kv.KeyRange, 0, len(ranges)) feedbackRanges := make([]*ranger.Range, 0, len(ranges)) @@ -243,21 +275,25 @@ func TableRangesToKVRanges(tid int64, ranges []*ranger.Range, fb *statistics.Que if !ran.HighExclude { high = kv.Key(high).PrefixNext() } - startKey := tablecodec.EncodeRowKey(tid, low) - endKey := tablecodec.EncodeRowKey(tid, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + for _, tid := range tids { + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } } fb.StoreRanges(feedbackRanges) return krs } -func tableRangesToKVRangesWithoutSplit(tid int64, ranges []*ranger.Range) []kv.KeyRange { - krs := make([]kv.KeyRange, 0, len(ranges)) +func tableRangesToKVRangesWithoutSplit(tids []int64, ranges []*ranger.Range) []kv.KeyRange { + krs := make([]kv.KeyRange, 0, len(ranges)*len(tids)) for _, ran := range ranges { low, high := encodeHandleKey(ran) - startKey := tablecodec.EncodeRowKey(tid, low) - endKey := tablecodec.EncodeRowKey(tid, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + for _, tid := range tids { + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } } return krs } @@ -346,8 +382,13 @@ func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange { // IndexRangesToKVRanges converts index ranges to "KeyRange". func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { + return IndexRangesToKVRangesForTables(sc, []int64{tid}, idxID, ranges, fb) +} + +// IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange". +func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { if fb == nil || fb.Hist == nil { - return indexRangesToKVWithoutSplit(sc, tid, idxID, ranges) + return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges) } feedbackRanges := make([]*ranger.Range, 0, len(ranges)) for _, ran := range ranges { @@ -376,16 +417,18 @@ func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, range if !ran.HighExclude { high = kv.Key(high).PrefixNext() } - startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + for _, tid := range tids { + startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } } fb.StoreRanges(feedbackRanges) return krs, nil } // CommonHandleRangesToKVRanges converts common handle ranges to "KeyRange". -func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { +func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tids []int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { rans := make([]*ranger.Range, 0, len(ranges)) for _, ran := range ranges { low, high, err := encodeIndexKey(sc, ran) @@ -402,23 +445,27 @@ func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid int64, range low = kv.Key(low).PrefixNext() } ran.LowVal[0].SetBytes(low) - startKey := tablecodec.EncodeRowKey(tid, low) - endKey := tablecodec.EncodeRowKey(tid, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + for _, tid := range tids { + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } } return krs, nil } -func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { +func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { krs := make([]kv.KeyRange, 0, len(ranges)) for _, ran := range ranges { low, high, err := encodeIndexKey(sc, ran) if err != nil { return nil, err } - startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + for _, tid := range tids { + startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } } return krs, nil } diff --git a/domain/domainctx.go b/domain/domainctx.go index 325c7a00b0a1e..ba31bce418455 100644 --- a/domain/domainctx.go +++ b/domain/domainctx.go @@ -14,7 +14,6 @@ package domain import ( - "github.com/pingcap/parser/model" "github.com/pingcap/tidb/sessionctx" ) @@ -41,16 +40,3 @@ func GetDomain(ctx sessionctx.Context) *Domain { } return v } - -// CanRuntimePruneTbl indicates whether tbl support runtime prune. -func CanRuntimePruneTbl(ctx sessionctx.Context, tbl *model.TableInfo) bool { - if tbl.Partition == nil { - return false - } - return GetDomain(ctx).StatsHandle().CanRuntimePrune(tbl.ID, tbl.Partition.Definitions[0].ID) -} - -// CanRuntimePrune indicates whether tbl support runtime prune for table and first partition id. -func CanRuntimePrune(ctx sessionctx.Context, tid, p0Id int64) bool { - return GetDomain(ctx).StatsHandle().CanRuntimePrune(tid, p0Id) -} diff --git a/executor/analyze.go b/executor/analyze.go index 0561d81793b9d..a3aab93b09de3 100755 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -269,9 +269,9 @@ func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRang var builder distsql.RequestBuilder var kvReqBuilder *distsql.RequestBuilder if e.isCommonHandle && e.idxInfo.Primary { - kvReqBuilder = builder.SetCommonHandleRanges(e.ctx.GetSessionVars().StmtCtx, e.tableID.CollectIDs[0], ranges) + kvReqBuilder = builder.SetCommonHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, e.tableID.CollectIDs, ranges) } else { - kvReqBuilder = builder.SetIndexRanges(e.ctx.GetSessionVars().StmtCtx, e.tableID.CollectIDs[0], e.idxInfo.ID, ranges) + kvReqBuilder = builder.SetIndexRangesForTables(e.ctx.GetSessionVars().StmtCtx, e.tableID.CollectIDs, e.idxInfo.ID, ranges) } kvReq, err := kvReqBuilder. SetAnalyzeRequest(e.analyzePB). @@ -452,9 +452,9 @@ func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectRe var builder distsql.RequestBuilder var reqBuilder *distsql.RequestBuilder if e.handleCols != nil && !e.handleCols.IsInt() { - reqBuilder = builder.SetCommonHandleRanges(e.ctx.GetSessionVars().StmtCtx, e.tableID.CollectIDs[0], ranges) + reqBuilder = builder.SetCommonHandleRangesForTables(e.ctx.GetSessionVars().StmtCtx, e.tableID.CollectIDs, ranges) } else { - reqBuilder = builder.SetTableRanges(e.tableID.CollectIDs[0], ranges, nil) + reqBuilder = builder.SetTableRangesForTables(e.tableID.CollectIDs, ranges, nil) } // Always set KeepOrder of the request to be true, in order to compute // correct `correlation` of columns. @@ -680,7 +680,7 @@ func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { } }() var partition string - if e.tblInfo.ID != e.tableID.PersistID { + if e.tableID.StoreAsCollectID() && e.tblInfo.ID != e.tableID.PersistID { for _, definition := range e.tblInfo.Partition.Definitions { if definition.ID == e.tableID.PersistID { partition = fmt.Sprintf(" partition(%s)", definition.Name.L) @@ -694,7 +694,7 @@ func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { } var recordSets []sqlexec.RecordSet recordSets, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) - if err != nil || len(recordSets) == 0 { + if err != nil { return } if len(recordSets) == 0 { @@ -746,36 +746,38 @@ func (e *AnalyzeFastExec) buildSampTask() (err error) { bo := tikv.NewBackofferWithVars(context.Background(), 500, nil) store, _ := e.ctx.GetStore().(tikv.Storage) e.cache = store.GetRegionCache() - startKey, endKey := tablecodec.GetTableHandleKeyRange(e.tableID.CollectIDs[0]) - targetKey := startKey accessRegionsCounter := 0 - for { - // Search for the region which contains the targetKey. - loc, err := e.cache.LocateKey(bo, targetKey) - if err != nil { - return err - } - if bytes.Compare(endKey, loc.StartKey) < 0 { - break - } - accessRegionsCounter++ + for _, pid := range e.tableID.CollectIDs { + startKey, endKey := tablecodec.GetTableHandleKeyRange(pid) + targetKey := startKey + for { + // Search for the region which contains the targetKey. + loc, err := e.cache.LocateKey(bo, targetKey) + if err != nil { + return err + } + if bytes.Compare(endKey, loc.StartKey) < 0 { + break + } + accessRegionsCounter++ - // Set the next search key. - targetKey = loc.EndKey + // Set the next search key. + targetKey = loc.EndKey - // If the KV pairs in the region all belonging to the table, add it to the sample task. - if bytes.Compare(startKey, loc.StartKey) <= 0 && len(loc.EndKey) != 0 && bytes.Compare(loc.EndKey, endKey) <= 0 { - e.sampTasks = append(e.sampTasks, loc) - continue - } + // If the KV pairs in the region all belonging to the table, add it to the sample task. + if bytes.Compare(startKey, loc.StartKey) <= 0 && len(loc.EndKey) != 0 && bytes.Compare(loc.EndKey, endKey) <= 0 { + e.sampTasks = append(e.sampTasks, loc) + continue + } - e.scanTasks = append(e.scanTasks, loc) - if bytes.Compare(loc.StartKey, startKey) < 0 { - loc.StartKey = startKey - } - if bytes.Compare(endKey, loc.EndKey) < 0 || len(loc.EndKey) == 0 { - loc.EndKey = endKey - break + e.scanTasks = append(e.scanTasks, loc) + if bytes.Compare(loc.StartKey, startKey) < 0 { + loc.StartKey = startKey + } + if bytes.Compare(endKey, loc.EndKey) < 0 || len(loc.EndKey) == 0 { + loc.EndKey = endKey + break + } } } fastAnalyzeHistogramAccessRegions.Observe(float64(accessRegionsCounter)) @@ -1140,15 +1142,15 @@ func (e *AnalyzeFastExec) buildStats() (hists []*statistics.Histogram, cms []*st // AnalyzeTestFastExec is for fast sample in unit test. type AnalyzeTestFastExec struct { AnalyzeFastExec - Ctx sessionctx.Context - PhysicalTableID int64 - HandleCols core.HandleCols - ColsInfo []*model.ColumnInfo - IdxsInfo []*model.IndexInfo - Concurrency int - Collectors []*statistics.SampleCollector - TblInfo *model.TableInfo - Opts map[ast.AnalyzeOptionType]uint64 + Ctx sessionctx.Context + TableID core.AnalyzeTableID + HandleCols core.HandleCols + ColsInfo []*model.ColumnInfo + IdxsInfo []*model.IndexInfo + Concurrency int + Collectors []*statistics.SampleCollector + TblInfo *model.TableInfo + Opts map[ast.AnalyzeOptionType]uint64 } // TestFastSample only test the fast sample in unit test. @@ -1158,7 +1160,7 @@ func (e *AnalyzeTestFastExec) TestFastSample() error { e.colsInfo = e.ColsInfo e.idxsInfo = e.IdxsInfo e.concurrency = e.Concurrency - e.tableID = core.AnalyzeTableID{PersistID: e.PhysicalTableID, CollectIDs: []int64{e.PhysicalTableID}} + e.tableID = e.TableID e.wg = &sync.WaitGroup{} e.job = &statistics.AnalyzeJob{} e.tblInfo = e.TblInfo diff --git a/executor/analyze_test.go b/executor/analyze_test.go index 7642122a03c48..b3880ed9b434f 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -51,64 +51,66 @@ var _ = Suite(&testFastAnalyze{}) func (s *testSuite1) TestAnalyzePartition(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - createTable := `CREATE TABLE t (a int, b int, c varchar(10), primary key(a), index idx(b)) + testkit.WithPruneMode(tk, variable.StaticOnly, func() { + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + createTable := `CREATE TABLE t (a int, b int, c varchar(10), primary key(a), index idx(b)) PARTITION BY RANGE ( a ) ( PARTITION p0 VALUES LESS THAN (6), PARTITION p1 VALUES LESS THAN (11), PARTITION p2 VALUES LESS THAN (16), PARTITION p3 VALUES LESS THAN (21) )` - tk.MustExec(createTable) - for i := 1; i < 21; i++ { - tk.MustExec(fmt.Sprintf(`insert into t values (%d, %d, "hello")`, i, i)) - } - tk.MustExec("analyze table t") - - is := infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) - table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - pi := table.Meta().GetPartitionInfo() - c.Assert(pi, NotNil) - do, err := session.GetDomain(s.store) - c.Assert(err, IsNil) - handle := do.StatsHandle() - for _, def := range pi.Definitions { - statsTbl := handle.GetPartitionStats(table.Meta(), def.ID) - c.Assert(statsTbl.Pseudo, IsFalse) - c.Assert(len(statsTbl.Columns), Equals, 3) - c.Assert(len(statsTbl.Indices), Equals, 1) - for _, col := range statsTbl.Columns { - c.Assert(col.Len(), Greater, 0) - } - for _, idx := range statsTbl.Indices { - c.Assert(idx.Len(), Greater, 0) + tk.MustExec(createTable) + for i := 1; i < 21; i++ { + tk.MustExec(fmt.Sprintf(`insert into t values (%d, %d, "hello")`, i, i)) } - } + tk.MustExec("analyze table t") - tk.MustExec("drop table t") - tk.MustExec(createTable) - for i := 1; i < 21; i++ { - tk.MustExec(fmt.Sprintf(`insert into t values (%d, %d, "hello")`, i, i)) - } - tk.MustExec("alter table t analyze partition p0") - is = infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) - table, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - pi = table.Meta().GetPartitionInfo() - c.Assert(pi, NotNil) - - for i, def := range pi.Definitions { - statsTbl := handle.GetPartitionStats(table.Meta(), def.ID) - if i == 0 { + is := infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) + table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + pi := table.Meta().GetPartitionInfo() + c.Assert(pi, NotNil) + do, err := session.GetDomain(s.store) + c.Assert(err, IsNil) + handle := do.StatsHandle() + for _, def := range pi.Definitions { + statsTbl := handle.GetPartitionStats(table.Meta(), def.ID) c.Assert(statsTbl.Pseudo, IsFalse) c.Assert(len(statsTbl.Columns), Equals, 3) c.Assert(len(statsTbl.Indices), Equals, 1) - } else { - c.Assert(statsTbl.Pseudo, IsTrue) + for _, col := range statsTbl.Columns { + c.Assert(col.Len(), Greater, 0) + } + for _, idx := range statsTbl.Indices { + c.Assert(idx.Len(), Greater, 0) + } } - } + + tk.MustExec("drop table t") + tk.MustExec(createTable) + for i := 1; i < 21; i++ { + tk.MustExec(fmt.Sprintf(`insert into t values (%d, %d, "hello")`, i, i)) + } + tk.MustExec("alter table t analyze partition p0") + is = infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) + table, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + pi = table.Meta().GetPartitionInfo() + c.Assert(pi, NotNil) + + for i, def := range pi.Definitions { + statsTbl := handle.GetPartitionStats(table.Meta(), def.ID) + if i == 0 { + c.Assert(statsTbl.Pseudo, IsFalse) + c.Assert(len(statsTbl.Columns), Equals, 3) + c.Assert(len(statsTbl.Indices), Equals, 1) + } else { + c.Assert(statsTbl.Pseudo, IsTrue) + } + } + }) } func (s *testSuite1) TestAnalyzeReplicaReadFollower(c *C) { @@ -264,14 +266,17 @@ func (s *testFastAnalyze) TestAnalyzeFastSample(c *C) { opts := make(map[ast.AnalyzeOptionType]uint64) opts[ast.AnalyzeOptNumSamples] = 20 mockExec := &executor.AnalyzeTestFastExec{ - Ctx: tk.Se.(sessionctx.Context), - HandleCols: handleCols, - ColsInfo: colsInfo, - IdxsInfo: indicesInfo, - Concurrency: 1, - PhysicalTableID: tbl.(table.PhysicalTable).GetPhysicalID(), - TblInfo: tblInfo, - Opts: opts, + Ctx: tk.Se.(sessionctx.Context), + HandleCols: handleCols, + ColsInfo: colsInfo, + IdxsInfo: indicesInfo, + Concurrency: 1, + TableID: core.AnalyzeTableID{ + CollectIDs: []int64{tbl.(table.PhysicalTable).GetPhysicalID()}, + PersistID: tbl.(table.PhysicalTable).GetPhysicalID(), + }, + TblInfo: tblInfo, + Opts: opts, } err = mockExec.TestFastSample() c.Assert(err, IsNil) diff --git a/executor/builder.go b/executor/builder.go index 7e2b01ba4358f..cbedfc175399b 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2141,7 +2141,11 @@ func (b *executorBuilder) buildAnalyze(v *plannercore.Analyze) Executor { if enableFastAnalyze { b.buildAnalyzeFastIndex(e, task, v.Opts) } else { - e.tasks = append(e.tasks, b.buildAnalyzeIndexPushdown(task, v.Opts, autoAnalyze)) + if task.TableID.StoreAsCollectID() && len(task.TableID.CollectIDs) > 1 && !task.IndexInfo.Global && !task.IndexInfo.Unique { + b.buildAnalyzeFastIndex(e, task, v.Opts) + } else { + e.tasks = append(e.tasks, b.buildAnalyzeIndexPushdown(task, v.Opts, autoAnalyze)) + } } } if b.err != nil { @@ -3470,7 +3474,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l var tmpKvRanges []kv.KeyRange var err error if indexID == -1 { - tmpKvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, tableID, ranges) + tmpKvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{tableID}, ranges) } else { tmpKvRanges, err = distsql.IndexRangesToKVRanges(sc, tableID, indexID, ranges, nil) } @@ -3508,7 +3512,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l } // Index id is -1 means it's a common handle. if indexID == -1 { - return distsql.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, tableID, tmpDatumRanges) + return distsql.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tableID}, tmpDatumRanges) } return distsql.IndexRangesToKVRanges(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil) } diff --git a/executor/distsql.go b/executor/distsql.go index 347735c49594e..85184cc9f2c01 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -391,7 +391,7 @@ func (e *IndexLookUpExecutor) Open(ctx context.Context) error { sc := e.ctx.GetSessionVars().StmtCtx physicalID := getPhysicalTableID(e.table) if e.index.ID == -1 { - e.kvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, physicalID, e.ranges) + e.kvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, e.ranges) } else { e.kvRanges, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, e.ranges, e.feedback) } diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index db2eeb54536f3..689e466e3659c 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -111,7 +111,7 @@ func (e *IndexMergeReaderExecutor) Open(ctx context.Context) error { _, ok := plan[0].(*plannercore.PhysicalIndexScan) if !ok { if e.table.Meta().IsCommonHandle { - keyRanges, err := distsql.CommonHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(e.table), e.ranges[i]) + keyRanges, err := distsql.CommonHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, []int64{getPhysicalTableID(e.table)}, e.ranges[i]) if err != nil { return err } diff --git a/executor/show_stats_test.go b/executor/show_stats_test.go index 87d667a886a37..270c35f5abf2d 100644 --- a/executor/show_stats_test.go +++ b/executor/show_stats_test.go @@ -18,6 +18,7 @@ import ( "time" . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/util/testkit" ) @@ -174,35 +175,37 @@ func (s *testShowStatsSuite) TestShowStatsHasNullValue(c *C) { func (s *testShowStatsSuite) TestShowPartitionStats(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("set @@session.tidb_enable_table_partition=1") - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - createTable := `CREATE TABLE t (a int, b int, primary key(a), index idx(b)) + testkit.WithPruneMode(tk, variable.StaticOnly, func() { + tk.MustExec("set @@session.tidb_enable_table_partition=1") + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + createTable := `CREATE TABLE t (a int, b int, primary key(a), index idx(b)) PARTITION BY RANGE ( a ) (PARTITION p0 VALUES LESS THAN (6))` - tk.MustExec(createTable) - tk.MustExec(`insert into t values (1, 1)`) - tk.MustExec("analyze table t") + tk.MustExec(createTable) + tk.MustExec(`insert into t values (1, 1)`) + tk.MustExec("analyze table t") - result := tk.MustQuery("show stats_meta") - c.Assert(len(result.Rows()), Equals, 1) - c.Assert(result.Rows()[0][0], Equals, "test") - c.Assert(result.Rows()[0][1], Equals, "t") - c.Assert(result.Rows()[0][2], Equals, "p0") + result := tk.MustQuery("show stats_meta") + c.Assert(len(result.Rows()), Equals, 1) + c.Assert(result.Rows()[0][0], Equals, "test") + c.Assert(result.Rows()[0][1], Equals, "t") + c.Assert(result.Rows()[0][2], Equals, "p0") - result = tk.MustQuery("show stats_histograms").Sort() - c.Assert(len(result.Rows()), Equals, 3) - c.Assert(result.Rows()[0][2], Equals, "p0") - c.Assert(result.Rows()[0][3], Equals, "a") - c.Assert(result.Rows()[1][2], Equals, "p0") - c.Assert(result.Rows()[1][3], Equals, "b") - c.Assert(result.Rows()[2][2], Equals, "p0") - c.Assert(result.Rows()[2][3], Equals, "idx") + result = tk.MustQuery("show stats_histograms").Sort() + c.Assert(len(result.Rows()), Equals, 3) + c.Assert(result.Rows()[0][2], Equals, "p0") + c.Assert(result.Rows()[0][3], Equals, "a") + c.Assert(result.Rows()[1][2], Equals, "p0") + c.Assert(result.Rows()[1][3], Equals, "b") + c.Assert(result.Rows()[2][2], Equals, "p0") + c.Assert(result.Rows()[2][3], Equals, "idx") - result = tk.MustQuery("show stats_buckets").Sort() - result.Check(testkit.Rows("test t p0 a 0 0 1 1 1 1", "test t p0 b 0 0 1 1 1 1", "test t p0 idx 1 0 1 1 1 1")) + result = tk.MustQuery("show stats_buckets").Sort() + result.Check(testkit.Rows("test t p0 a 0 0 1 1 1 1", "test t p0 b 0 0 1 1 1 1", "test t p0 idx 1 0 1 1 1 1")) - result = tk.MustQuery("show stats_healthy") - result.Check(testkit.Rows("test t p0 100")) + result = tk.MustQuery("show stats_healthy") + result.Check(testkit.Rows("test t p0 100")) + }) } func (s *testShowStatsSuite) TestShowAnalyzeStatus(c *C) { diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 1a8590704770c..36b23dc105dd3 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -671,26 +671,27 @@ func (s *testIntegrationSerialSuite) TestIsolationReadDoNotFilterSystemDB(c *C) func (s *testIntegrationSuite) TestPartitionTableStats(c *C) { tk := testkit.NewTestKit(c, s.store) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int)partition by range columns(a)(partition p0 values less than (10), partition p1 values less than(20), partition p2 values less than(30));") - tk.MustExec("insert into t values(21, 1), (22, 2), (23, 3), (24, 4), (15, 5)") - tk.MustExec("analyze table t") - tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`) - - var input []string - var output []struct { - SQL string - Result []string - } - s.testData.GetTestCases(c, &input, &output) - for i, tt := range input { - s.testData.OnRecord(func() { - output[i].SQL = tt - output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) - }) - tk.MustQuery(tt).Check(testkit.Rows(output[i].Result...)) + { + tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int)partition by range columns(a)(partition p0 values less than (10), partition p1 values less than(20), partition p2 values less than(30));") + tk.MustExec("insert into t values(21, 1), (22, 2), (23, 3), (24, 4), (15, 5)") + tk.MustExec("analyze table t") + + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i].Result...)) + } } } diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index ab23f70f96de3..3b77a3daaa02f 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -1533,6 +1533,11 @@ func getPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames [] func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.AnalyzeOptionType]uint64) (Plan, error) { p := &Analyze{Opts: opts} + pruneMode := variable.PartitionPruneMode(b.ctx.GetSessionVars().PartitionPruneMode.Load()) + if len(as.PartitionNames) > 0 && pruneMode == variable.DynamicOnly { + logutil.BgLogger().Info("analyze partition didn't affect in dynamic-prune-mode", zap.String("partitions", as.PartitionNames[0].L)) + return p, nil + } for _, tbl := range as.TableNames { if tbl.TableInfo.IsView() { return nil, errors.Errorf("analyze view %s is not supported now.", tbl.Name.O) @@ -1546,8 +1551,20 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A return nil, err } for _, idx := range idxInfo { - for i, id := range physicalIDs { - info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + if pruneMode == variable.StaticOnly || (pruneMode == variable.StaticButPrepareDynamic && !b.ctx.GetSessionVars().InRestrictedSQL) { + // static mode or static-but-prepare-dynamic mode not belong auto analyze need analyze each partition + // for static-but-prepare-dynamic mode with auto analyze, echo partition will be check before analyze partition. + for i, id := range physicalIDs { + info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{ + IndexInfo: idx, + analyzeInfo: info, + TblInfo: tbl.TableInfo, + }) + } + } + if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic { + info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, TableID: AnalyzeTableID{PersistID: tbl.TableInfo.ID, CollectIDs: physicalIDs}, Incremental: as.Incremental} p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{ IndexInfo: idx, analyzeInfo: info, @@ -1557,8 +1574,19 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A } handleCols := BuildHandleColsForAnalyze(b.ctx, tbl.TableInfo) if len(colInfo) > 0 || handleCols != nil { - for i, id := range physicalIDs { - info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic { + for i, id := range physicalIDs { + info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{ + HandleCols: handleCols, + ColsInfo: colInfo, + analyzeInfo: info, + TblInfo: tbl.TableInfo, + }) + } + } + if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic { + info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, TableID: AnalyzeTableID{PersistID: tbl.TableInfo.ID, CollectIDs: physicalIDs}, Incremental: as.Incremental} p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{ HandleCols: handleCols, ColsInfo: colInfo, @@ -1574,6 +1602,11 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.AnalyzeOptionType]uint64) (Plan, error) { p := &Analyze{Opts: opts} tblInfo := as.TableNames[0].TableInfo + pruneMode := variable.PartitionPruneMode(b.ctx.GetSessionVars().PartitionPruneMode.Load()) + if len(as.PartitionNames) > 0 && pruneMode == variable.DynamicOnly { + logutil.BgLogger().Info("analyze partition didn't affect in dynamic-prune-mode", zap.String("table", tblInfo.Name.L), zap.String("partitions", as.PartitionNames[0].L)) + return p, nil + } physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames) if err != nil { return nil, err @@ -1582,8 +1615,14 @@ func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.A if isPrimaryIndex(idxName) { handleCols := BuildHandleColsForAnalyze(b.ctx, tblInfo) if handleCols != nil { - for i, id := range physicalIDs { - info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic { + for i, id := range physicalIDs { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{HandleCols: handleCols, analyzeInfo: info, TblInfo: tblInfo}) + } + } + if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, TableID: AnalyzeTableID{PersistID: tblInfo.ID, CollectIDs: physicalIDs}, Incremental: as.Incremental} p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{HandleCols: handleCols, analyzeInfo: info, TblInfo: tblInfo}) } continue @@ -1593,8 +1632,14 @@ func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.A if idx == nil || idx.State != model.StatePublic { return nil, ErrAnalyzeMissIndex.GenWithStackByArgs(idxName.O, tblInfo.Name.O) } - for i, id := range physicalIDs { - info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic { + for i, id := range physicalIDs { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{IndexInfo: idx, analyzeInfo: info, TblInfo: tblInfo}) + } + } + if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, TableID: AnalyzeTableID{PersistID: tblInfo.ID, CollectIDs: physicalIDs}, Incremental: as.Incremental} p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{IndexInfo: idx, analyzeInfo: info, TblInfo: tblInfo}) } } @@ -1604,22 +1649,39 @@ func (b *PlanBuilder) buildAnalyzeIndex(as *ast.AnalyzeTableStmt, opts map[ast.A func (b *PlanBuilder) buildAnalyzeAllIndex(as *ast.AnalyzeTableStmt, opts map[ast.AnalyzeOptionType]uint64) (Plan, error) { p := &Analyze{Opts: opts} tblInfo := as.TableNames[0].TableInfo + pruneMode := variable.PartitionPruneMode(b.ctx.GetSessionVars().PartitionPruneMode.Load()) + if len(as.PartitionNames) > 0 && pruneMode == variable.DynamicOnly { + logutil.BgLogger().Info("analyze partition didn't affect in dynamic-prune-mode", zap.String("table", tblInfo.Name.L), zap.String("partitions", as.PartitionNames[0].L)) + return p, nil + } physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames) if err != nil { return nil, err } for _, idx := range tblInfo.Indices { if idx.State == model.StatePublic { - for i, id := range physicalIDs { - info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic { + for i, id := range physicalIDs { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{IndexInfo: idx, analyzeInfo: info, TblInfo: tblInfo}) + } + } + if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, TableID: AnalyzeTableID{PersistID: tblInfo.ID, CollectIDs: physicalIDs}, Incremental: as.Incremental} p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{IndexInfo: idx, analyzeInfo: info, TblInfo: tblInfo}) } } } handleCols := BuildHandleColsForAnalyze(b.ctx, tblInfo) if handleCols != nil { - for i, id := range physicalIDs { - info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + if pruneMode == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic { + for i, id := range physicalIDs { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, PartitionName: names[i], TableID: AnalyzeTableID{PersistID: id, CollectIDs: []int64{id}}, Incremental: as.Incremental} + p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{HandleCols: handleCols, analyzeInfo: info, TblInfo: tblInfo}) + } + } + if pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic { + info := analyzeInfo{DBName: as.TableNames[0].Schema.O, TableName: as.TableNames[0].Name.O, TableID: AnalyzeTableID{PersistID: tblInfo.ID, CollectIDs: physicalIDs}, Incremental: as.Incremental} p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{HandleCols: handleCols, analyzeInfo: info, TblInfo: tblInfo}) } } diff --git a/planner/core/testdata/analyze_suite_out.json b/planner/core/testdata/analyze_suite_out.json index 86fa54e7b92e7..8d42349bb20fc 100644 --- a/planner/core/testdata/analyze_suite_out.json +++ b/planner/core/testdata/analyze_suite_out.json @@ -354,7 +354,7 @@ "Analyze{Index(a),Index(b)}", "TableReader(Table(t4)->Sel([le(test.t4.a, 2)]))", "IndexReader(Index(t4.b)[[-inf,2)])", - "IndexLookUp(Index(t4.a)[[1,1]], Table(t4)->Sel([le(test.t4.b, 2)]))" + "TableReader(Table(t4)->Sel([eq(test.t4.a, 1) le(test.t4.b, 2)]))" ] }, { diff --git a/session/session.go b/session/session.go index 38370ec38afc1..677e19a540704 100644 --- a/session/session.go +++ b/session/session.go @@ -777,13 +777,16 @@ func (s *session) ExecRestrictedSQLWithContext(ctx context.Context, sql string) se.sessionVars.OptimizerUseInvisibleIndexes = true defer func() { se.sessionVars.OptimizerUseInvisibleIndexes = false }() } + prePruneMode := se.sessionVars.PartitionPruneMode.Load() defer func() { if se != nil && se.GetSessionVars().StmtCtx.WarningCount() > 0 { warnings := se.GetSessionVars().StmtCtx.GetWarnings() s.GetSessionVars().StmtCtx.AppendWarnings(warnings) } + se.sessionVars.PartitionPruneMode.Store(prePruneMode) s.sysSessionPool().Put(tmp) }() + se.sessionVars.PartitionPruneMode.Store(s.sessionVars.PartitionPruneMode.Load()) metrics.SessionRestrictedSQLCounter.Inc() return execRestrictedSQL(ctx, se, sql) @@ -1666,6 +1669,16 @@ func getHostByIP(ip string) []string { return addrs } +// RefreshVars implements the sessionctx.Context interface. +func (s *session) RefreshVars(ctx context.Context) error { + pruneMode, err := s.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBPartitionPruneMode) + if err != nil { + return err + } + s.sessionVars.PartitionPruneMode.Store(pruneMode) + return nil +} + // CreateSession4Test creates a new session environment for test. func CreateSession4Test(store kv.Storage) (Session, error) { return CreateSession4TestWithOpt(store, nil) diff --git a/sessionctx/context.go b/sessionctx/context.go index 86a0f9b662296..c7804b53a3bb3 100644 --- a/sessionctx/context.go +++ b/sessionctx/context.go @@ -60,6 +60,10 @@ type Context interface { // now just for load data and batch insert. RefreshTxnCtx(context.Context) error + // RefreshVars refreshes modified global variable to current session. + // only used to daemon session like `statsHandle` to detect global variable change. + RefreshVars(context.Context) error + // InitTxnWithStartTS initializes a transaction with startTS. // It should be called right before we builds an executor. InitTxnWithStartTS(startTS uint64) error diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index bbd8c023c331d..3bb947b2a9bfa 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -51,6 +51,7 @@ import ( "github.com/pingcap/tidb/util/stringutil" "github.com/pingcap/tidb/util/timeutil" "github.com/twmb/murmur3" + atomic2 "go.uber.org/atomic" ) var preparedStmtCount int64 @@ -712,12 +713,12 @@ type SessionVars struct { LastTxnInfo kv.TxnInfo // PartitionPruneMode indicates how and when to prune partitions. - PartitionPruneMode PartitionPruneMode + PartitionPruneMode atomic2.String } // UseDynamicPartitionPrune indicates whether use new dynamic partition prune. func (s *SessionVars) UseDynamicPartitionPrune() bool { - return s.PartitionPruneMode == DynamicOnly + return PartitionPruneMode(s.PartitionPruneMode.Load()) == DynamicOnly } // PartitionPruneMode presents the prune mode used. @@ -1452,7 +1453,7 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { case TiDBEnableClusteredIndex: s.EnableClusteredIndex = TiDBOptOn(val) case TiDBPartitionPruneMode: - s.PartitionPruneMode = PartitionPruneMode(strings.ToLower(strings.TrimSpace(val))) + s.PartitionPruneMode.Store(strings.ToLower(strings.TrimSpace(val))) case TiDBEnableParallelApply: s.EnableParallelApply = TiDBOptOn(val) case TiDBSlowLogMasking, TiDBRedactLog: diff --git a/statistics/handle/ddl_test.go b/statistics/handle/ddl_test.go index c385e850e3df7..348f65f160ca1 100644 --- a/statistics/handle/ddl_test.go +++ b/statistics/handle/ddl_test.go @@ -17,6 +17,7 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/testkit" ) @@ -184,75 +185,77 @@ func (s *testStatsSuite) TestDDLHistogram(c *C) { func (s *testStatsSuite) TestDDLPartition(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) - testKit.MustExec("use test") - testKit.MustExec("drop table if exists t") - createTable := `CREATE TABLE t (a int, b int, primary key(a), index idx(b)) + testkit.WithPruneMode(testKit, variable.StaticOnly, func() { + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + createTable := `CREATE TABLE t (a int, b int, primary key(a), index idx(b)) PARTITION BY RANGE ( a ) ( PARTITION p0 VALUES LESS THAN (6), PARTITION p1 VALUES LESS THAN (11), PARTITION p2 VALUES LESS THAN (16), PARTITION p3 VALUES LESS THAN (21) )` - testKit.MustExec(createTable) - do := s.do - is := do.InfoSchema() - tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - tableInfo := tbl.Meta() - h := do.StatsHandle() - err = h.HandleDDLEvent(<-h.DDLEventCh()) - c.Assert(err, IsNil) - c.Assert(h.Update(is), IsNil) - pi := tableInfo.GetPartitionInfo() - for _, def := range pi.Definitions { - statsTbl := h.GetPartitionStats(tableInfo, def.ID) - c.Assert(statsTbl.Pseudo, IsFalse) - } + testKit.MustExec(createTable) + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + h := do.StatsHandle() + err = h.HandleDDLEvent(<-h.DDLEventCh()) + c.Assert(err, IsNil) + c.Assert(h.Update(is), IsNil) + pi := tableInfo.GetPartitionInfo() + for _, def := range pi.Definitions { + statsTbl := h.GetPartitionStats(tableInfo, def.ID) + c.Assert(statsTbl.Pseudo, IsFalse) + } - testKit.MustExec("insert into t values (1,2),(6,2),(11,2),(16,2)") - testKit.MustExec("analyze table t") - testKit.MustExec("alter table t add column c varchar(15) DEFAULT '123'") - err = h.HandleDDLEvent(<-h.DDLEventCh()) - c.Assert(err, IsNil) - is = do.InfoSchema() - c.Assert(h.Update(is), IsNil) - tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - tableInfo = tbl.Meta() - pi = tableInfo.GetPartitionInfo() - for _, def := range pi.Definitions { - statsTbl := h.GetPartitionStats(tableInfo, def.ID) - c.Assert(statsTbl.Pseudo, IsFalse) - c.Check(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSize(statsTbl.Count, false), Equals, 3.0) - } + testKit.MustExec("insert into t values (1,2),(6,2),(11,2),(16,2)") + testKit.MustExec("analyze table t") + testKit.MustExec("alter table t add column c varchar(15) DEFAULT '123'") + err = h.HandleDDLEvent(<-h.DDLEventCh()) + c.Assert(err, IsNil) + is = do.InfoSchema() + c.Assert(h.Update(is), IsNil) + tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo = tbl.Meta() + pi = tableInfo.GetPartitionInfo() + for _, def := range pi.Definitions { + statsTbl := h.GetPartitionStats(tableInfo, def.ID) + c.Assert(statsTbl.Pseudo, IsFalse) + c.Check(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSize(statsTbl.Count, false), Equals, 3.0) + } - addPartition := "alter table t add partition (partition p4 values less than (26))" - testKit.MustExec(addPartition) - is = s.do.InfoSchema() - tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - tableInfo = tbl.Meta() - err = h.HandleDDLEvent(<-h.DDLEventCh()) - c.Assert(err, IsNil) - c.Assert(h.Update(is), IsNil) - pi = tableInfo.GetPartitionInfo() - for _, def := range pi.Definitions { - statsTbl := h.GetPartitionStats(tableInfo, def.ID) - c.Assert(statsTbl.Pseudo, IsFalse) - } + addPartition := "alter table t add partition (partition p4 values less than (26))" + testKit.MustExec(addPartition) + is = s.do.InfoSchema() + tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo = tbl.Meta() + err = h.HandleDDLEvent(<-h.DDLEventCh()) + c.Assert(err, IsNil) + c.Assert(h.Update(is), IsNil) + pi = tableInfo.GetPartitionInfo() + for _, def := range pi.Definitions { + statsTbl := h.GetPartitionStats(tableInfo, def.ID) + c.Assert(statsTbl.Pseudo, IsFalse) + } - truncatePartition := "alter table t truncate partition p4" - testKit.MustExec(truncatePartition) - is = s.do.InfoSchema() - tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - tableInfo = tbl.Meta() - err = h.HandleDDLEvent(<-h.DDLEventCh()) - c.Assert(err, IsNil) - c.Assert(h.Update(is), IsNil) - pi = tableInfo.GetPartitionInfo() - for _, def := range pi.Definitions { - statsTbl := h.GetPartitionStats(tableInfo, def.ID) - c.Assert(statsTbl.Pseudo, IsFalse) - } + truncatePartition := "alter table t truncate partition p4" + testKit.MustExec(truncatePartition) + is = s.do.InfoSchema() + tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo = tbl.Meta() + err = h.HandleDDLEvent(<-h.DDLEventCh()) + c.Assert(err, IsNil) + c.Assert(h.Update(is), IsNil) + pi = tableInfo.GetPartitionInfo() + for _, def := range pi.Definitions { + statsTbl := h.GetPartitionStats(tableInfo, def.ID) + c.Assert(statsTbl.Pseudo, IsFalse) + } + }) } diff --git a/statistics/handle/gc_test.go b/statistics/handle/gc_test.go index 733fbfebe31ca..63b9013561b69 100644 --- a/statistics/handle/gc_test.go +++ b/statistics/handle/gc_test.go @@ -18,6 +18,7 @@ import ( "time" . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/testkit" ) @@ -57,37 +58,39 @@ func (s *testStatsSuite) TestGCStats(c *C) { func (s *testStatsSuite) TestGCPartition(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) - testKit.MustExec("use test") - testKit.MustExec("set @@session.tidb_enable_table_partition=1") - testKit.MustExec(`create table t (a bigint(64), b bigint(64), index idx(a, b)) + testkit.WithPruneMode(testKit, variable.StaticOnly, func() { + testKit.MustExec("use test") + testKit.MustExec("set @@session.tidb_enable_table_partition=1") + testKit.MustExec(`create table t (a bigint(64), b bigint(64), index idx(a, b)) partition by range (a) ( partition p0 values less than (3), partition p1 values less than (6))`) - testKit.MustExec("insert into t values (1,2),(2,3),(3,4),(4,5),(5,6)") - testKit.MustExec("analyze table t") + testKit.MustExec("insert into t values (1,2),(2,3),(3,4),(4,5),(5,6)") + testKit.MustExec("analyze table t") - testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("6")) - testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("15")) - h := s.do.StatsHandle() - h.SetLastUpdateVersion(math.MaxUint64) - ddlLease := time.Duration(0) - testKit.MustExec("alter table t drop index idx") - c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) - testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("4")) - testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("10")) + testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("6")) + testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("15")) + h := s.do.StatsHandle() + h.SetLastUpdateVersion(math.MaxUint64) + ddlLease := time.Duration(0) + testKit.MustExec("alter table t drop index idx") + c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) + testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("4")) + testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("10")) - testKit.MustExec("alter table t drop column b") - c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) - testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("2")) - testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("5")) + testKit.MustExec("alter table t drop column b") + c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) + testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("2")) + testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("5")) - testKit.MustExec("drop table t") - c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) - testKit.MustQuery("select count(*) from mysql.stats_meta").Check(testkit.Rows("2")) - testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("0")) - testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("0")) - c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) - testKit.MustQuery("select count(*) from mysql.stats_meta").Check(testkit.Rows("0")) + testKit.MustExec("drop table t") + c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) + testKit.MustQuery("select count(*) from mysql.stats_meta").Check(testkit.Rows("2")) + testKit.MustQuery("select count(*) from mysql.stats_histograms").Check(testkit.Rows("0")) + testKit.MustQuery("select count(*) from mysql.stats_buckets").Check(testkit.Rows("0")) + c.Assert(h.GCStats(s.do.InfoSchema(), ddlLease), IsNil) + testKit.MustQuery("select count(*) from mysql.stats_meta").Check(testkit.Rows("0")) + }) } func (s *testStatsSuite) TestGCExtendedStats(c *C) { diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index 451392b2421ec..2eaa839037e17 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/table" @@ -275,26 +276,6 @@ func (h *Handle) GetPartitionStats(tblInfo *model.TableInfo, pid int64) *statist return tbl } -// CanRuntimePrune indicates whether tbl support runtime prune for table and first partition id. -func (h *Handle) CanRuntimePrune(tid, p0Id int64) bool { - if h == nil { - return false - } - if tid == p0Id { - return false - } - statsCache := h.statsCache.Load().(statsCache) - _, tblExists := statsCache.tables[tid] - if tblExists { - return true - } - _, partExists := statsCache.tables[p0Id] - if !partExists { - return true - } - return false -} - func (h *Handle) updateStatsCache(newCache statsCache) { h.statsCache.Lock() oldCache := h.statsCache.Load().(statsCache) @@ -1124,3 +1105,17 @@ func (h *Handle) SaveExtendedStatsToStorage(tableID int64, extStats *statistics. } return execSQLs(ctx, exec, sqls) } + +// CurrentPruneMode indicates whether tbl support runtime prune for table and first partition id. +func (h *Handle) CurrentPruneMode() variable.PartitionPruneMode { + h.mu.Lock() + defer h.mu.Unlock() + return variable.PartitionPruneMode(h.mu.ctx.GetSessionVars().PartitionPruneMode.Load()) +} + +// RefreshVars uses to pull PartitionPruneMethod vars from kv storage. +func (h *Handle) RefreshVars() error { + h.mu.Lock() + defer h.mu.Unlock() + return h.mu.ctx.RefreshVars(context.Background()) +} diff --git a/statistics/handle/update.go b/statistics/handle/update.go index d786f670291d5..28f331761f9fa 100644 --- a/statistics/handle/update.go +++ b/statistics/handle/update.go @@ -741,7 +741,8 @@ func (h *Handle) HandleAutoAnalyze(is infoschema.InfoSchema) { for _, tbl := range tbls { tblInfo := tbl.Meta() pi := tblInfo.GetPartitionInfo() - if pi == nil { + pruneMode := h.CurrentPruneMode() + if pi == nil || pruneMode == variable.DynamicOnly || pruneMode == variable.StaticButPrepareDynamic { statsTbl := h.GetTableStats(tblInfo) sql := "analyze table `" + db + "`.`" + tblInfo.Name.O + "`" analyzed := h.autoAnalyzeTable(tblInfo, statsTbl, start, end, autoAnalyzeRatio, sql) @@ -750,12 +751,15 @@ func (h *Handle) HandleAutoAnalyze(is infoschema.InfoSchema) { } continue } - for _, def := range pi.Definitions { - sql := "analyze table `" + db + "`.`" + tblInfo.Name.O + "`" + " partition `" + def.Name.O + "`" - statsTbl := h.GetPartitionStats(tblInfo, def.ID) - analyzed := h.autoAnalyzeTable(tblInfo, statsTbl, start, end, autoAnalyzeRatio, sql) - if analyzed { - return + if h.CurrentPruneMode() == variable.StaticOnly || pruneMode == variable.StaticButPrepareDynamic { + for _, def := range pi.Definitions { + sql := "analyze table `" + db + "`.`" + tblInfo.Name.O + "`" + " partition `" + def.Name.O + "`" + statsTbl := h.GetPartitionStats(tblInfo, def.ID) + analyzed := h.autoAnalyzeTable(tblInfo, statsTbl, start, end, autoAnalyzeRatio, sql) + if analyzed { + return + } + continue } continue } diff --git a/statistics/handle/update_test.go b/statistics/handle/update_test.go index 5c9777c03e40d..25c9b933c7854 100644 --- a/statistics/handle/update_test.go +++ b/statistics/handle/update_test.go @@ -371,134 +371,139 @@ func (s *testStatsSuite) TestUpdatePartition(c *C) { func (s *testStatsSuite) TestAutoUpdate(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) - testKit.MustExec("use test") - testKit.MustExec("create table t (a varchar(20))") - - handle.AutoAnalyzeMinCnt = 0 - testKit.MustExec("set global tidb_auto_analyze_ratio = 0.2") - defer func() { - handle.AutoAnalyzeMinCnt = 1000 - testKit.MustExec("set global tidb_auto_analyze_ratio = 0.0") - }() - - do := s.do - is := do.InfoSchema() - tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - tableInfo := tbl.Meta() - h := do.StatsHandle() + testkit.WithPruneMode(testKit, variable.StaticOnly, func() { + testKit.MustExec("use test") + testKit.MustExec("create table t (a varchar(20))") + + handle.AutoAnalyzeMinCnt = 0 + testKit.MustExec("set global tidb_auto_analyze_ratio = 0.2") + defer func() { + handle.AutoAnalyzeMinCnt = 1000 + testKit.MustExec("set global tidb_auto_analyze_ratio = 0.0") + }() + + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + h := do.StatsHandle() - h.HandleDDLEvent(<-h.DDLEventCh()) - c.Assert(h.Update(is), IsNil) - stats := h.GetTableStats(tableInfo) - c.Assert(stats.Count, Equals, int64(0)) + h.HandleDDLEvent(<-h.DDLEventCh()) + c.Assert(h.Update(is), IsNil) + stats := h.GetTableStats(tableInfo) + c.Assert(stats.Count, Equals, int64(0)) - _, err = testKit.Exec("insert into t values ('ss'), ('ss'), ('ss'), ('ss'), ('ss')") - c.Assert(err, IsNil) - c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) - c.Assert(h.Update(is), IsNil) - h.HandleAutoAnalyze(is) - c.Assert(h.Update(is), IsNil) - stats = h.GetTableStats(tableInfo) - c.Assert(stats.Count, Equals, int64(5)) - c.Assert(stats.ModifyCount, Equals, int64(0)) - for _, item := range stats.Columns { - // TotColSize = 5*(2(length of 'ss') + 1(size of len byte)). - c.Assert(item.TotColSize, Equals, int64(15)) - break - } + _, err = testKit.Exec("insert into t values ('ss'), ('ss'), ('ss'), ('ss'), ('ss')") + c.Assert(err, IsNil) + c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) + c.Assert(h.Update(is), IsNil) + h.HandleAutoAnalyze(is) + c.Assert(h.Update(is), IsNil) + stats = h.GetTableStats(tableInfo) + c.Assert(stats.Count, Equals, int64(5)) + c.Assert(stats.ModifyCount, Equals, int64(0)) + for _, item := range stats.Columns { + // TotColSize = 5*(2(length of 'ss') + 1(size of len byte)). + c.Assert(item.TotColSize, Equals, int64(15)) + break + } - // Test that even if the table is recently modified, we can still analyze the table. - h.SetLease(time.Second) - defer func() { h.SetLease(0) }() - _, err = testKit.Exec("insert into t values ('fff')") - c.Assert(err, IsNil) - c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) - c.Assert(h.Update(is), IsNil) - h.HandleAutoAnalyze(is) - c.Assert(h.Update(is), IsNil) - stats = h.GetTableStats(tableInfo) - c.Assert(stats.Count, Equals, int64(6)) - c.Assert(stats.ModifyCount, Equals, int64(1)) + // Test that even if the table is recently modified, we can still analyze the table. + h.SetLease(time.Second) + defer func() { h.SetLease(0) }() + _, err = testKit.Exec("insert into t values ('fff')") + c.Assert(err, IsNil) + c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) + c.Assert(h.Update(is), IsNil) + h.HandleAutoAnalyze(is) + c.Assert(h.Update(is), IsNil) + stats = h.GetTableStats(tableInfo) + c.Assert(stats.Count, Equals, int64(6)) + c.Assert(stats.ModifyCount, Equals, int64(1)) - _, err = testKit.Exec("insert into t values ('fff')") - c.Assert(err, IsNil) - c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) - c.Assert(h.Update(is), IsNil) - h.HandleAutoAnalyze(is) - c.Assert(h.Update(is), IsNil) - stats = h.GetTableStats(tableInfo) - c.Assert(stats.Count, Equals, int64(7)) - c.Assert(stats.ModifyCount, Equals, int64(0)) + _, err = testKit.Exec("insert into t values ('fff')") + c.Assert(err, IsNil) + c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) + c.Assert(h.Update(is), IsNil) + h.HandleAutoAnalyze(is) + c.Assert(h.Update(is), IsNil) + stats = h.GetTableStats(tableInfo) + c.Assert(stats.Count, Equals, int64(7)) + c.Assert(stats.ModifyCount, Equals, int64(0)) - _, err = testKit.Exec("insert into t values ('eee')") - c.Assert(err, IsNil) - c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) - c.Assert(h.Update(is), IsNil) - h.HandleAutoAnalyze(is) - c.Assert(h.Update(is), IsNil) - stats = h.GetTableStats(tableInfo) - c.Assert(stats.Count, Equals, int64(8)) - // Modify count is non-zero means that we do not analyze the table. - c.Assert(stats.ModifyCount, Equals, int64(1)) - for _, item := range stats.Columns { - // TotColSize = 27, because the table has not been analyzed, and insert statement will add 3(length of 'eee') to TotColSize. - c.Assert(item.TotColSize, Equals, int64(27)) - break - } + _, err = testKit.Exec("insert into t values ('eee')") + c.Assert(err, IsNil) + c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) + c.Assert(h.Update(is), IsNil) + h.HandleAutoAnalyze(is) + c.Assert(h.Update(is), IsNil) + stats = h.GetTableStats(tableInfo) + c.Assert(stats.Count, Equals, int64(8)) + // Modify count is non-zero means that we do not analyze the table. + c.Assert(stats.ModifyCount, Equals, int64(1)) + for _, item := range stats.Columns { + // TotColSize = 27, because the table has not been analyzed, and insert statement will add 3(length of 'eee') to TotColSize. + c.Assert(item.TotColSize, Equals, int64(27)) + break + } - testKit.MustExec("analyze table t") - _, err = testKit.Exec("create index idx on t(a)") - c.Assert(err, IsNil) - is = do.InfoSchema() - tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - tableInfo = tbl.Meta() - h.HandleAutoAnalyze(is) - c.Assert(h.Update(is), IsNil) - stats = h.GetTableStats(tableInfo) - c.Assert(stats.Count, Equals, int64(8)) - c.Assert(stats.ModifyCount, Equals, int64(0)) - hg, ok := stats.Indices[tableInfo.Indices[0].ID] - c.Assert(ok, IsTrue) - c.Assert(hg.NDV, Equals, int64(3)) - c.Assert(hg.Len(), Equals, 3) + testKit.MustExec("analyze table t") + _, err = testKit.Exec("create index idx on t(a)") + c.Assert(err, IsNil) + is = do.InfoSchema() + tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo = tbl.Meta() + h.HandleAutoAnalyze(is) + c.Assert(h.Update(is), IsNil) + stats = h.GetTableStats(tableInfo) + c.Assert(stats.Count, Equals, int64(8)) + c.Assert(stats.ModifyCount, Equals, int64(0)) + hg, ok := stats.Indices[tableInfo.Indices[0].ID] + c.Assert(ok, IsTrue) + c.Assert(hg.NDV, Equals, int64(3)) + c.Assert(hg.Len(), Equals, 3) + }) } func (s *testStatsSuite) TestAutoUpdatePartition(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) - testKit.MustExec("use test") - testKit.MustExec("drop table if exists t") - testKit.MustExec("create table t (a int) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (6))") - testKit.MustExec("analyze table t") - - handle.AutoAnalyzeMinCnt = 0 - testKit.MustExec("set global tidb_auto_analyze_ratio = 0.6") - defer func() { - handle.AutoAnalyzeMinCnt = 1000 - testKit.MustExec("set global tidb_auto_analyze_ratio = 0.0") - }() - - do := s.do - is := do.InfoSchema() - tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - c.Assert(err, IsNil) - tableInfo := tbl.Meta() - pi := tableInfo.GetPartitionInfo() - h := do.StatsHandle() + testkit.WithPruneMode(testKit, variable.StaticOnly, func() { + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t (a int) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (6))") + testKit.MustExec("analyze table t") + + handle.AutoAnalyzeMinCnt = 0 + testKit.MustExec("set global tidb_auto_analyze_ratio = 0.6") + defer func() { + handle.AutoAnalyzeMinCnt = 1000 + testKit.MustExec("set global tidb_auto_analyze_ratio = 0.0") + }() + + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + pi := tableInfo.GetPartitionInfo() + h := do.StatsHandle() + c.Assert(h.RefreshVars(), IsNil) - c.Assert(h.Update(is), IsNil) - stats := h.GetPartitionStats(tableInfo, pi.Definitions[0].ID) - c.Assert(stats.Count, Equals, int64(0)) + c.Assert(h.Update(is), IsNil) + stats := h.GetPartitionStats(tableInfo, pi.Definitions[0].ID) + c.Assert(stats.Count, Equals, int64(0)) - testKit.MustExec("insert into t values (1)") - c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) - c.Assert(h.Update(is), IsNil) - h.HandleAutoAnalyze(is) - stats = h.GetPartitionStats(tableInfo, pi.Definitions[0].ID) - c.Assert(stats.Count, Equals, int64(1)) - c.Assert(stats.ModifyCount, Equals, int64(0)) + testKit.MustExec("insert into t values (1)") + c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) + c.Assert(h.Update(is), IsNil) + h.HandleAutoAnalyze(is) + stats = h.GetPartitionStats(tableInfo, pi.Definitions[0].ID) + c.Assert(stats.Count, Equals, int64(1)) + c.Assert(stats.ModifyCount, Equals, int64(0)) + }) } func (s *testStatsSuite) TestTableAnalyzed(c *C) { diff --git a/util/mock/context.go b/util/mock/context.go index 79d307156b055..d0ec81c7e4d6c 100644 --- a/util/mock/context.go +++ b/util/mock/context.go @@ -163,6 +163,11 @@ func (c *Context) RefreshTxnCtx(ctx context.Context) error { return errors.Trace(c.NewTxn(ctx)) } +// RefreshVars implements the sessionctx.Context interface. +func (c *Context) RefreshVars(ctx context.Context) error { + return nil +} + // InitTxnWithStartTS implements the sessionctx.Context interface with startTS. func (c *Context) InitTxnWithStartTS(startTS uint64) error { if c.txn.Valid() { diff --git a/util/testkit/testkit.go b/util/testkit/testkit.go index e5f76ec4d64dd..f586e2a18d1d3 100644 --- a/util/testkit/testkit.go +++ b/util/testkit/testkit.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/testutil" @@ -326,3 +327,10 @@ func (tk *TestKit) GetTableID(tableName string) int64 { tk.c.Assert(err, check.IsNil) return tbl.Meta().ID } + +// WithPruneMode run test case under prune mode. +func WithPruneMode(tk *TestKit, mode variable.PartitionPruneMode, f func()) { + tk.MustExec("set @@tidb_partition_prune_mode=`" + string(mode) + "`") + tk.MustExec("set global tidb_partition_prune_mode=`" + string(mode) + "`") + f() +} From eb81a67b33e763b8ba94f8176b77ce70d931f7a5 Mon Sep 17 00:00:00 2001 From: "Zhuomin(Charming) Liu" Date: Wed, 30 Sep 2020 13:00:09 +0800 Subject: [PATCH 14/16] expression: fix overflow error when convert bit to int64 (#20266) --- planner/core/expression_rewriter_test.go | 9 +++++++++ types/datum.go | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/planner/core/expression_rewriter_test.go b/planner/core/expression_rewriter_test.go index 6179d912cf0a2..54497936cb7df 100644 --- a/planner/core/expression_rewriter_test.go +++ b/planner/core/expression_rewriter_test.go @@ -239,6 +239,15 @@ func (s *testExpressionRewriterSuite) TestCompareSubquery(c *C) { " 2", )) tk.MustQuery("select * from t t1 where b = all (select a from t t2)").Check(testkit.Rows()) + + // for issue 20059 + tk.MustExec("DROP TABLE IF EXISTS `t`") + tk.MustExec("CREATE TABLE `t` ( `a` int(11) DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;") + tk.MustExec("INSERT INTO `t` VALUES (1);") + tk.MustExec("DROP TABLE IF EXISTS `table_40_utf8_4`;") + tk.MustExec("CREATE TABLE `table_40_utf8_4` (`col_tinyint_key_unsigned` tinyint(4) DEFAULT NULL, `col_bit64_key_signed` bit(64) DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;") + tk.MustExec("INSERT INTO `table_40_utf8_4` VALUES (31, -18);") + tk.MustQuery("select count(1) from table_40_utf8_4 where ( select count(1) from t where table_40_utf8_4.col_bit64_key_signed!=table_40_utf8_4.col_tinyint_key_unsigned)").Check(testkit.Rows("1")) } func (s *testExpressionRewriterSuite) TestCheckFullGroupBy(c *C) { diff --git a/types/datum.go b/types/datum.go index fbccd3bdfedaa..03982a0b171e3 100644 --- a/types/datum.go +++ b/types/datum.go @@ -1577,6 +1577,11 @@ func (d *Datum) ToDecimal(sc *stmtctx.StatementContext) (*MyDecimal, error) { // ToInt64 converts to a int64. func (d *Datum) ToInt64(sc *stmtctx.StatementContext) (int64, error) { + switch d.Kind() { + case KindMysqlBit: + uintVal, err := d.GetBinaryLiteral().ToInt(sc) + return int64(uintVal), err + } return d.toSignedInteger(sc, mysql.TypeLonglong) } From 8232546f9c4f3d8fe48e251896e007452a46b4e8 Mon Sep 17 00:00:00 2001 From: YangKian <1207783292@qq.com> Date: Mon, 28 Sep 2020 14:54:47 +0800 Subject: [PATCH 15/16] update pingcap/parser --- go.mod | 5 +++-- go.sum | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index d8baecbe2f4d2..0ade29c631ffb 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200828054126-d677e6fd224a github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 - github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e + github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34 github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.5-0.20200820092506-34ea90c93237+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 @@ -52,8 +52,9 @@ require ( github.com/uber-go/atomic v1.3.2 github.com/uber/jaeger-client-go v2.22.1+incompatible go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 - go.uber.org/atomic v1.6.0 + go.uber.org/atomic v1.7.0 go.uber.org/automaxprocs v1.2.0 + go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.16.0 golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 diff --git a/go.sum b/go.sum index 1a13096c1705e..5616bf4124853 100644 --- a/go.sum +++ b/go.sum @@ -509,6 +509,8 @@ github.com/pingcap/parser v0.0.0-20200813083329-a4bff035d3e2/go.mod h1:vQdbJqobJ github.com/pingcap/parser v0.0.0-20200821073936-cf85e80665c4/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e h1:IFD2pEbIcN+EzG/RGMLrv/Tt6U9KzJGT6hSbGkQ1v7c= github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= +github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34 h1:Ubcf5qesK50AVNCeW5EDOtVDSzpecEDxpzYeVU/CLwA= +github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200714122454-1a64f969cb3c/go.mod h1:v/dY4mVkt3dh/Liphhk0E4ScOkZpIk0m0GvWJ9FapDs= @@ -700,6 +702,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.2.0 h1:+RUihKM+nmYUoB9w0D0Ov5TJ2PpFO2FgenTxMJiZBZA= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= @@ -711,6 +715,8 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= From 3f5c2fe892074de3686d5b670f67e0d0fbc58d1a Mon Sep 17 00:00:00 2001 From: YangKian <1207783292@qq.com> Date: Wed, 30 Sep 2020 13:07:24 +0800 Subject: [PATCH 16/16] redo go.mod update and go mod tidy --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 0ade29c631ffb..74c870fed9007 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200828054126-d677e6fd224a github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 - github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34 + github.com/pingcap/parser v0.0.0-20200929032957-9678b2b7cefb github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.5-0.20200820092506-34ea90c93237+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 diff --git a/go.sum b/go.sum index 5616bf4124853..64d2e1dcd7886 100644 --- a/go.sum +++ b/go.sum @@ -507,10 +507,8 @@ github.com/pingcap/parser v0.0.0-20200730092557-34a468e9b774/go.mod h1:vQdbJqobJ github.com/pingcap/parser v0.0.0-20200731033026-84f62115187c/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200813083329-a4bff035d3e2/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200821073936-cf85e80665c4/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e h1:IFD2pEbIcN+EzG/RGMLrv/Tt6U9KzJGT6hSbGkQ1v7c= -github.com/pingcap/parser v0.0.0-20200924053142-5d7e8ebf605e/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= -github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34 h1:Ubcf5qesK50AVNCeW5EDOtVDSzpecEDxpzYeVU/CLwA= -github.com/pingcap/parser v0.0.0-20200928060942-187c4002fd34/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= +github.com/pingcap/parser v0.0.0-20200929032957-9678b2b7cefb h1:Nlswd41UZDaedHNysE/lb8dc3EpmWAApf480qU2N3nU= +github.com/pingcap/parser v0.0.0-20200929032957-9678b2b7cefb/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200714122454-1a64f969cb3c/go.mod h1:v/dY4mVkt3dh/Liphhk0E4ScOkZpIk0m0GvWJ9FapDs=