diff --git a/DEPS.bzl b/DEPS.bzl index 0bcdc6e81964a..83c417bd6627a 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -1941,8 +1941,8 @@ def go_deps(): name = "com_github_pingcap_kvproto", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/kvproto", - sum = "h1:dsMpneacHyuVslSVndgUfJKrXFNG7VPdXip2ulG6glo=", - version = "v0.0.0-20220517085838-12e2f5a9d167", + sum = "h1:TZ0teMZoKHnZDlJxNkWrp5Sgv3w+ruNbrqtBYKsfaNw=", + version = "v0.0.0-20220525022339-6aaebf466305", ) go_repository( name = "com_github_pingcap_log", @@ -2292,8 +2292,8 @@ def go_deps(): name = "com_github_tikv_client_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/tikv/client-go/v2", - sum = "h1:N5ivsNkDQDgimY0ZVqMnWqXjEnxy5uFChoB4wPIKpPI=", - version = "v2.0.1-0.20220613112734-be31f33ba03b", + sum = "h1:VAyYcN1Nw7RupQszUYqOkueEVapWSxKFU7uBaYY5Dv8=", + version = "v2.0.1-0.20220627063500-947d923945fd", ) go_repository( name = "com_github_tikv_pd_client", diff --git a/bindinfo/BUILD.bazel b/bindinfo/BUILD.bazel index 22cb9e355d99e..f642a950ed2a3 100644 --- a/bindinfo/BUILD.bazel +++ b/bindinfo/BUILD.bazel @@ -12,6 +12,7 @@ go_library( importpath = "github.com/pingcap/tidb/bindinfo", visibility = ["//visibility:public"], deps = [ + "//kv", "//metrics", "//parser", "//parser/ast", diff --git a/bindinfo/handle.go b/bindinfo/handle.go index 48781082b6a3c..c6c5fe8677359 100644 --- a/bindinfo/handle.go +++ b/bindinfo/handle.go @@ -24,6 +24,7 @@ import ( "sync/atomic" "time" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" @@ -134,9 +135,10 @@ func (h *BindHandle) Update(fullLoad bool) (err error) { exec := h.sctx.Context.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) // No need to acquire the session context lock for ExecRestrictedSQL, it // uses another background session. - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation, source + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation, source FROM mysql.bind_info WHERE update_time > %? ORDER BY update_time, create_time`, updateTime) if err != nil { @@ -209,20 +211,21 @@ func (h *BindHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecor h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -239,7 +242,7 @@ func (h *BindHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecor now := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 3) updateTs := now.String() - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %?`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %?`, deleted, updateTs, record.OriginalSQL, updateTs) if err != nil { return err @@ -250,7 +253,7 @@ func (h *BindHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecor record.Bindings[i].UpdateTime = now // Insert the BindRecord to the storage. - _, err = exec.ExecuteInternal(context.TODO(), `INSERT INTO mysql.bind_info VALUES (%?,%?, %?, %?, %?, %?, %?, %?, %?)`, + _, err = exec.ExecuteInternal(ctx, `INSERT INTO mysql.bind_info VALUES (%?,%?, %?, %?, %?, %?, %?, %?, %?)`, record.OriginalSQL, record.Bindings[i].BindSQL, record.Db, @@ -296,20 +299,21 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord) h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -322,7 +326,7 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord) return err } if duplicateBinding != nil { - _, err = exec.ExecuteInternal(context.TODO(), `DELETE FROM mysql.bind_info WHERE original_sql = %? AND bind_sql = %?`, record.OriginalSQL, duplicateBinding.BindSQL) + _, err = exec.ExecuteInternal(ctx, `DELETE FROM mysql.bind_info WHERE original_sql = %? AND bind_sql = %?`, record.OriginalSQL, duplicateBinding.BindSQL) if err != nil { return err } @@ -338,7 +342,7 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord) record.Bindings[i].UpdateTime = now // Insert the BindRecord to the storage. - _, err = exec.ExecuteInternal(context.TODO(), `INSERT INTO mysql.bind_info VALUES (%?, %?, %?, %?, %?, %?, %?, %?, %?)`, + _, err = exec.ExecuteInternal(ctx, `INSERT INTO mysql.bind_info VALUES (%?, %?, %?, %?, %?, %?, %?, %?, %?)`, record.OriginalSQL, record.Bindings[i].BindSQL, record.Db, @@ -365,20 +369,21 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return err } var deleteRows int defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil || deleteRows == 0 { return } @@ -398,10 +403,10 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e updateTs := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 3).String() if binding == nil { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status != %?`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status != %?`, deleted, updateTs, originalSQL, updateTs, deleted) } else { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? and status != %?`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? and status != %?`, deleted, updateTs, originalSQL, updateTs, binding.BindSQL, deleted) } @@ -417,8 +422,9 @@ func (h *BindHandle) SetBindRecordStatus(originalSQL string, binding *Binding, n h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return } @@ -439,12 +445,12 @@ func (h *BindHandle) SetBindRecordStatus(originalSQL string, binding *Binding, n } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -485,10 +491,10 @@ func (h *BindHandle) SetBindRecordStatus(originalSQL string, binding *Binding, n updateTsStr := updateTs.String() if binding == nil { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status IN (%?, %?)`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status IN (%?, %?)`, newStatus, updateTsStr, originalSQL, updateTsStr, oldStatus0, oldStatus1) } else { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? AND status IN (%?, %?)`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? AND status IN (%?, %?)`, newStatus, updateTsStr, originalSQL, updateTsStr, binding.BindSQL, oldStatus0, oldStatus1) } affectRows = int(h.sctx.Context.GetSessionVars().StmtCtx.AffectedRows()) @@ -504,18 +510,19 @@ func (h *BindHandle) GCBindRecord() (err error) { h.bindInfo.Unlock() }() exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return err } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -530,7 +537,7 @@ func (h *BindHandle) GCBindRecord() (err error) { // we only garbage collect those records with update_time before 10 leases. updateTime := time.Now().Add(-(10 * Lease)) updateTimeStr := types.NewTime(types.FromGoTime(updateTime), mysql.TypeTimestamp, 3).String() - _, err = exec.ExecuteInternal(context.TODO(), `DELETE FROM mysql.bind_info WHERE status = 'deleted' and update_time < %?`, updateTimeStr) + _, err = exec.ExecuteInternal(ctx, `DELETE FROM mysql.bind_info WHERE status = 'deleted' and update_time < %?`, updateTimeStr) return err } @@ -542,8 +549,9 @@ func (h *BindHandle) GCBindRecord() (err error) { // even if they come from different tidb instances. func (h *BindHandle) lockBindInfoTable() error { // h.sctx already locked. + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err := exec.ExecuteInternal(context.TODO(), h.LockBindInfoSQL()) + _, err := exec.ExecuteInternal(ctx, h.LockBindInfoSQL()) return err } @@ -790,9 +798,10 @@ func (h *BindHandle) extractCaptureFilterFromStorage() (filter *captureFilter) { users: make(map[string]struct{}), } exec := h.sctx.Context.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) // No need to acquire the session context lock for ExecRestrictedSQL, it // uses another background session. - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT filter_type, filter_value FROM mysql.capture_plan_baselines_blacklist order by filter_type`) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT filter_type, filter_value FROM mysql.capture_plan_baselines_blacklist order by filter_type`) if err != nil { logutil.BgLogger().Warn("[sql-bind] failed to load mysql.capture_plan_baselines_blacklist", zap.Error(err)) return @@ -898,7 +907,8 @@ func getHintsForSQL(sctx sessionctx.Context, sql string) (string, error) { // Usually passing a sprintf to ExecuteInternal is not recommended, but in this case // it is safe because ExecuteInternal does not permit MultiStatement execution. Thus, // the statement won't be able to "break out" from EXPLAIN. - rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), fmt.Sprintf("EXPLAIN FORMAT='hint' %s", sql)) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, fmt.Sprintf("EXPLAIN FORMAT='hint' %s", sql)) sctx.GetSessionVars().UsePlanBaselines = origVals if rs != nil { defer func() { @@ -1018,9 +1028,10 @@ func (h *BindHandle) SaveEvolveTasksToStore() { h.pendingVerifyBindRecordMap.flushToStore() } -func getEvolveParameters(ctx sessionctx.Context) (time.Duration, time.Time, time.Time, error) { - rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL( - context.TODO(), +func getEvolveParameters(sctx sessionctx.Context) (time.Duration, time.Time, time.Time, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) + rows, _, err := sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL( + ctx, nil, "SELECT variable_name, variable_value FROM mysql.global_variables WHERE variable_name IN (%?, %?, %?)", variable.TiDBEvolvePlanTaskMaxTime, @@ -1093,7 +1104,7 @@ func (h *BindHandle) getOnePendingVerifyJob() (string, string, Binding) { } func (h *BindHandle) getRunningDuration(sctx sessionctx.Context, db, sql string, maxTime time.Duration) (time.Duration, error) { - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) if db != "" { _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "use %n", db) if err != nil { diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index 150be18171d2b..bba6726a6b5d6 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -120,6 +120,7 @@ func (gs *tidbSession) Execute(ctx context.Context, sql string) error { } func (gs *tidbSession) ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) rs, err := gs.se.ExecuteInternal(ctx, sql, args...) if err != nil { return errors.Trace(err) diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index f2cd2c49b9983..77a1e0510b66b 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -1923,6 +1923,7 @@ func (rc *Client) GenGlobalID(ctx context.Context) (int64, error) { var id int64 storage := rc.GetDomain().Store() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) err := kv.RunInNewTxn( ctx, storage, @@ -1942,6 +1943,7 @@ func (rc *Client) GenGlobalIDs(ctx context.Context, n int) ([]int64, error) { ids := make([]int64, 0) storage := rc.GetDomain().Store() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) err := kv.RunInNewTxn( ctx, storage, @@ -1961,6 +1963,7 @@ func (rc *Client) UpdateSchemaVersion(ctx context.Context) error { storage := rc.GetDomain().Store() var schemaVersion int64 + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) if err := kv.RunInNewTxn( ctx, storage, diff --git a/ddl/backfilling.go b/ddl/backfilling.go index ab39a1cff1b65..0b4ff4b6d554e 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -607,6 +607,7 @@ func (w *worker) writePhysicalTableRecord(t table.PhysicalTable, bfWorkerType ba defer func() { closeBackfillWorkers(backfillWorkers) }() + jc := w.jobContext(job) for { kvRanges, err := splitTableRanges(t, reorgInfo.d.store, startKey, endKey) @@ -647,19 +648,19 @@ func (w *worker) writePhysicalTableRecord(t table.PhysicalTable, bfWorkerType ba switch bfWorkerType { case typeAddIndexWorker: - idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap, reorgInfo) + idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap, reorgInfo, jc) idxWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, idxWorker.backfillWorker) go idxWorker.backfillWorker.run(reorgInfo.d, idxWorker, job) case typeUpdateColumnWorker: // Setting InCreateOrAlterStmt tells the difference between SELECT casting and ALTER COLUMN casting. sessCtx.GetSessionVars().StmtCtx.InCreateOrAlterStmt = true - updateWorker := newUpdateColumnWorker(sessCtx, i, t, oldColInfo, colInfo, decodeColMap, reorgInfo) + updateWorker := newUpdateColumnWorker(sessCtx, i, t, oldColInfo, colInfo, decodeColMap, reorgInfo, jc) updateWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, updateWorker.backfillWorker) go updateWorker.backfillWorker.run(reorgInfo.d, updateWorker, job) case typeCleanUpIndexWorker: - idxWorker := newCleanUpIndexWorker(sessCtx, w, i, t, decodeColMap, reorgInfo) + idxWorker := newCleanUpIndexWorker(sessCtx, w, i, t, decodeColMap, reorgInfo, jc) idxWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, idxWorker.backfillWorker) go idxWorker.backfillWorker.run(reorgInfo.d, idxWorker, job) @@ -733,6 +734,8 @@ func iterateSnapshotRows(ctx *JobContext, store kv.Storage, priority int, t tabl ver := kv.Version{Ver: version} snap := store.GetSnapshot(ver) snap.SetOption(kv.Priority, priority) + snap.SetOption(kv.RequestSourceInternal, true) + snap.SetOption(kv.RequestSourceType, ctx.ddlJobSourceType()) if tagger := ctx.getResourceGroupTaggerForTopSQL(); tagger != nil { snap.SetOption(kv.ResourceGroupTagger, tagger) } @@ -778,6 +781,8 @@ func getRangeEndKey(ctx *JobContext, store kv.Storage, priority int, t table.Tab if tagger := ctx.getResourceGroupTaggerForTopSQL(); tagger != nil { snap.SetOption(kv.ResourceGroupTagger, tagger) } + snap.SetOption(kv.RequestSourceInternal, true) + snap.SetOption(kv.RequestSourceType, ctx.ddlJobSourceType()) it, err := snap.IterReverse(endKey.Next()) if err != nil { return nil, errors.Trace(err) diff --git a/ddl/column.go b/ddl/column.go index 200b165ef5beb..295c64f98862e 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -714,14 +714,15 @@ func (w *worker) doModifyColumnTypeWithData( updateChangingObjState(changingCol, changingIdxs, model.StateDeleteOnly) failpoint.Inject("mockInsertValueAfterCheckNull", func(val failpoint.Value) { if valStr, ok := val.(string); ok { - var ctx sessionctx.Context - ctx, err := w.sessPool.get() + var sctx sessionctx.Context + sctx, err := w.sessPool.get() if err != nil { failpoint.Return(ver, err) } - defer w.sessPool.put(ctx) + defer w.sessPool.put(sctx) - _, _, err = ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(context.Background(), nil, valStr) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, _, err = sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, valStr) if err != nil { job.State = model.JobStateCancelled failpoint.Return(ver, err) @@ -1086,10 +1087,11 @@ type updateColumnWorker struct { rowMap map[int64]types.Datum // For SQL Mode and warnings. - sqlMode mysql.SQLMode + sqlMode mysql.SQLMode + jobContext *JobContext } -func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, oldCol, newCol *model.ColumnInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo) *updateColumnWorker { +func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, oldCol, newCol *model.ColumnInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *updateColumnWorker { rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) return &updateColumnWorker{ backfillWorker: newBackfillWorker(sessCtx, id, t, reorgInfo), @@ -1099,6 +1101,7 @@ func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalT rowDecoder: rowDecoder, rowMap: make(map[int64]types.Datum, len(decodeColMap)), sqlMode: reorgInfo.ReorgMeta.SQLMode, + jobContext: jc, } } @@ -1264,7 +1267,8 @@ func (w *updateColumnWorker) cleanRowMap() { // BackfillDataInTxn will backfill the table record in a transaction. A lock corresponds to a rowKey if the value of rowKey is changed. func (w *updateColumnWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (taskCtx backfillTaskContext, errInTxn error) { oprStartTime := time.Now() - errInTxn = kv.RunInNewTxn(context.Background(), w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), w.jobContext.ddlJobSourceType()) + errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { taskCtx.addedCount = 0 taskCtx.scanCount = 0 txn.SetOption(kv.Priority, w.priority) diff --git a/ddl/column_change_test.go b/ddl/column_change_test.go index a6de4bc964d2f..0ed158cd740a0 100644 --- a/ddl/column_change_test.go +++ b/ddl/column_change_test.go @@ -161,7 +161,8 @@ func TestModifyAutoRandColumnWithMetaKeyChanged(t *testing.T) { tID = job.TableID if atomic.LoadInt32(&errCount) > 0 && job.Type == model.ActionModifyColumn { atomic.AddInt32(&errCount, -1) - genAutoRandErr = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBackfillDDLPrefix+ddl.DDLBackfillers[model.ActionModifyColumn]) + genAutoRandErr = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) _, err1 := t.GetAutoIDAccessors(dbID, tID).RandomID().Inc(1) return err1 @@ -176,7 +177,8 @@ func TestModifyAutoRandColumnWithMetaKeyChanged(t *testing.T) { const newAutoRandomBits uint64 = 10 testCheckJobDone(t, store, jobID, true) var newTbInfo *model.TableInfo - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) var err error newTbInfo, err = t.GetTable(dbID, tID) diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index 4c3bdfb66faf9..41a3ddc3f3a7d 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -2391,7 +2391,8 @@ func checkPartitionDelRangeDone(t *testing.T, tk *testkit.TestKit, store kv.Stor } hasOldPartitionData := true - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { it, err := txn.Iter(partitionPrefix, nil) if err != nil { return err diff --git a/ddl/ddl.go b/ddl/ddl.go index ff8bd8ada47ba..47456e5f81d17 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -269,6 +269,17 @@ func (dc *ddlCtx) setDDLLabelForTopSQL(job *model.Job) { ctx.setDDLLabelForTopSQL(job) } +func (dc *ddlCtx) setDDLSourceForDiagnosis(job *model.Job) { + dc.jobCtx.Lock() + defer dc.jobCtx.Unlock() + ctx, exists := dc.jobCtx.jobCtxMap[job.ID] + if !exists { + ctx = NewJobContext() + dc.jobCtx.jobCtxMap[job.ID] = ctx + } + ctx.setDDLLabelForDiagnosis(job) +} + func (dc *ddlCtx) getResourceGroupTaggerForTopSQL(job *model.Job) tikvrpc.ResourceGroupTagger { dc.jobCtx.Lock() defer dc.jobCtx.Unlock() @@ -431,6 +442,7 @@ func newDDL(ctx context.Context, options ...Option) *ddl { ddlCtx.jobCtx.jobCtxMap = make(map[int64]*JobContext) ddlCtx.mu.hook = opt.Hook ddlCtx.mu.interceptor = &BaseInterceptor{} + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) ddlCtx.ctx, ddlCtx.cancel = context.WithCancel(ctx) d := &ddl{ ddlCtx: ddlCtx, @@ -524,7 +536,8 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // GetNextDDLSeqNum return the next ddl seq num. func (d *ddl) GetNextDDLSeqNum() (uint64, error) { var count uint64 - err := kv.RunInNewTxn(d.ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(d.ctx, kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) var err error count, err = t.GetHistoryDDLCount() @@ -580,7 +593,8 @@ func (d *ddl) GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.In func (d *ddl) genGlobalIDs(count int) ([]int64, error) { var ret []int64 - err := kv.RunInNewTxn(context.Background(), d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { failpoint.Inject("mockGenGlobalIDFail", func(val failpoint.Value) { if val.(bool) { failpoint.Return(errors.New("gofail genGlobalIDs error")) @@ -598,7 +612,8 @@ func (d *ddl) genGlobalIDs(count int) ([]int64, error) { func (d *ddl) genPlacementPolicyID() (int64, error) { var ret int64 - err := kv.RunInNewTxn(context.Background(), d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) var err error ret, err = m.GenPlacementPolicyID() diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index c2a456ce6eda8..d1c92e48eedb1 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -3088,7 +3088,6 @@ func allSupported(specs []*ast.AlterTableSpec) bool { func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt *ast.AlterTableStmt) (err error) { ident := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} - validSpecs, err := resolveAlterTableSpec(sctx, stmt.Specs) if err != nil { return errors.Trace(err) @@ -5292,7 +5291,8 @@ func (d *ddl) dropTableObject( zap.String("table", fullti.Name.O), ) exec := ctx.(sqlexec.RestrictedSQLExecutor) - _, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "admin check table %n.%n", fullti.Schema.O, fullti.Name.O) + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, _, err := exec.ExecRestrictedSQL(internalCtx, nil, "admin check table %n.%n", fullti.Schema.O, fullti.Name.O) if err != nil { return err } @@ -7078,8 +7078,8 @@ func (d *ddl) AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacem return errors.Trace(err) } -func (d *ddl) AlterTableCache(ctx sessionctx.Context, ti ast.Ident) (err error) { - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) +func (d *ddl) AlterTableCache(sctx sessionctx.Context, ti ast.Ident) (err error) { + schema, t, err := d.getSchemaAndTableByIdent(sctx, ti) if err != nil { return err } @@ -7107,17 +7107,18 @@ func (d *ddl) AlterTableCache(ctx sessionctx.Context, ti ast.Ident) (err error) return dbterror.ErrOptOnCacheTable.GenWithStackByArgs("table too large") } - ddlQuery, _ := ctx.Value(sessionctx.QueryString).(string) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + ddlQuery, _ := sctx.Value(sessionctx.QueryString).(string) // Initialize the cached table meta lock info in `mysql.table_cache_meta`. // The operation shouldn't fail in most cases, and if it does, return the error directly. // This DML and the following DDL is not atomic, that's not a problem. - _, _, err = ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(context.Background(), nil, + _, _, err = sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, "replace into mysql.table_cache_meta values (%?, 'NONE', 0, 0)", t.Meta().ID) if err != nil { return errors.Trace(err) } - ctx.SetValue(sessionctx.QueryString, ddlQuery) + sctx.SetValue(sessionctx.QueryString, ddlQuery) job := &model.Job{ SchemaID: schema.ID, @@ -7129,14 +7130,16 @@ func (d *ddl) AlterTableCache(ctx sessionctx.Context, ti ast.Ident) (err error) Args: []interface{}{}, } - err = d.DoDDLJob(ctx, job) + err = d.DoDDLJob(sctx, job) return d.callHookOnChanged(job, err) } func checkCacheTableSize(store kv.Storage, tableID int64) (bool, error) { const cacheTableSizeLimit = 64 * (1 << 20) // 64M succ := true - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnCacheTable) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { + txn.SetOption(kv.RequestSourceType, kv.InternalTxnCacheTable) prefix := tablecodec.GenTablePrefix(tableID) it, err := txn.Iter(prefix, prefix.PrefixNext()) if err != nil { diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 0dff4c769064e..5ebbb3a4c574a 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -262,6 +262,7 @@ func TestBuildJobDependence(t *testing.T) { defer func() { require.NoError(t, store.Close()) }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) // Add some non-add-index jobs. job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn} job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable} @@ -270,7 +271,7 @@ func TestBuildJobDependence(t *testing.T) { job7 := &model.Job{ID: 7, TableID: 2, Type: model.ActionModifyColumn} job9 := &model.Job{ID: 9, SchemaID: 111, Type: model.ActionDropSchema} job11 := &model.Job{ID: 11, TableID: 2, Type: model.ActionRenameTable, Args: []interface{}{int64(111), "old db name"}} - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.NoError(t, m.EnQueueDDLJob(job1)) require.NoError(t, m.EnQueueDDLJob(job2)) @@ -283,7 +284,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job4 := &model.Job{ID: 4, TableID: 1, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job4) require.NoError(t, err) @@ -292,7 +293,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job5 := &model.Job{ID: 5, TableID: 2, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job5) require.NoError(t, err) @@ -301,7 +302,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job8 := &model.Job{ID: 8, TableID: 3, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job8) require.NoError(t, err) @@ -310,7 +311,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job10 := &model.Job{ID: 10, SchemaID: 111, TableID: 3, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job10) require.NoError(t, err) @@ -319,7 +320,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job12 := &model.Job{ID: 12, SchemaID: 112, TableID: 2, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job12) require.NoError(t, err) @@ -480,8 +481,9 @@ func isDDLJobDone(test *testing.T, t *meta.Meta) bool { func testCheckSchemaState(test *testing.T, d *ddl, dbInfo *model.DBInfo, state model.SchemaState) { isDropped := true + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) for { - err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) info, err := t.GetDatabase(dbInfo.ID) require.NoError(test, err) @@ -556,24 +558,24 @@ func TestReorg(t *testing.T) { time.Sleep(testLease) - ctx := testNewContext(d) + sctx := testNewContext(d) - ctx.SetValue(testCtxKey, 1) - require.Equal(t, ctx.Value(testCtxKey), 1) - ctx.ClearValue(testCtxKey) + sctx.SetValue(testCtxKey, 1) + require.Equal(t, sctx.Value(testCtxKey), 1) + sctx.ClearValue(testCtxKey) - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) - txn, err := ctx.Txn(true) + txn, err := sctx.Txn(true) require.NoError(t, err) err = txn.Set([]byte("a"), []byte("b")) require.NoError(t, err) err = txn.Rollback() require.NoError(t, err) - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) - txn, err = ctx.Txn(true) + txn, err = sctx.Txn(true) require.NoError(t, err) err = txn.Set([]byte("a"), []byte("b")) require.NoError(t, err) @@ -586,9 +588,9 @@ func TestReorg(t *testing.T) { ID: 1, SnapshotVer: 1, // Make sure it is not zero. So the reorgInfo's first is false. } - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) - txn, err = ctx.Txn(true) + txn, err = sctx.Txn(true) require.NoError(t, err) m := meta.NewMeta(txn) e := &meta.Element{ID: 333, TypeKey: meta.IndexElementKey} @@ -617,7 +619,7 @@ func TestReorg(t *testing.T) { // Test whether reorgInfo's Handle is update. err = txn.Commit(context.Background()) require.NoError(t, err) - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) m = meta.NewMeta(txn) @@ -647,7 +649,8 @@ func TestReorg(t *testing.T) { EndKey: test.endKey.Encoded(), PhysicalTableID: 456, } - err = kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) var err1 error _, err1 = getReorgInfo(NewJobContext(), d.ddlCtx, newReorgHandler(m), job, mockTbl, []*meta.Element{element}) @@ -659,7 +662,7 @@ func TestReorg(t *testing.T) { job.SnapshotVer = uint64(1) err = info.UpdateReorgMeta(info.StartKey) require.NoError(t, err) - err = kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) info1, err1 := getReorgInfo(NewJobContext(), d.ddlCtx, newReorgHandler(m), job, mockTbl, []*meta.Element{element}) require.NoError(t, err1) @@ -678,7 +681,7 @@ func TestReorg(t *testing.T) { return nil }) require.Error(t, err) - txn, err = ctx.Txn(true) + txn, err = sctx.Txn(true) require.NoError(t, err) err = txn.Commit(context.Background()) require.NoError(t, err) diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 07f234cd1f982..b22d11e754609 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -105,6 +105,7 @@ type JobContext struct { cacheSQL string cacheNormalizedSQL string cacheDigest *parser.Digest + tp string } // NewJobContext returns a new ddl job context. @@ -114,6 +115,7 @@ func NewJobContext() *JobContext { cacheSQL: "", cacheNormalizedSQL: "", cacheDigest: nil, + tp: "unknown", } } @@ -284,7 +286,8 @@ func (d *ddl) limitDDLJobs() { // addBatchDDLJobs gets global job IDs and puts the DDL jobs in the DDL queue. func (d *ddl) addBatchDDLJobs(tasks []*limitJobTask) { startTime := time.Now() - err := kv.RunInNewTxn(context.Background(), d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) ids, err := t.GenGlobalIDs(len(tasks)) if err != nil { @@ -584,6 +587,25 @@ func (w *worker) unlockSeqNum(err error) { } } +// DDLBackfillers contains the DDL need backfill step. +var DDLBackfillers = map[model.ActionType]string{ + model.ActionAddIndex: "add_index", + model.ActionModifyColumn: "modify_column", + model.ActionDropIndex: "drop_index", +} + +func getDDLRequestSource(job *model.Job) string { + if tp, ok := DDLBackfillers[job.Type]; ok { + return kv.InternalTxnBackfillDDLPrefix + tp + } + return kv.InternalTxnDDL +} + +func (w *JobContext) setDDLLabelForDiagnosis(job *model.Job) { + w.tp = getDDLRequestSource(job) + w.ddlJobCtx = kv.WithInternalSourceType(w.ddlJobCtx, w.ddlJobSourceType()) +} + func (w *JobContext) getResourceGroupTaggerForTopSQL() tikvrpc.ResourceGroupTagger { if !topsqlstate.TopSQLEnabled() || w.cacheDigest == nil { return nil @@ -597,6 +619,10 @@ func (w *JobContext) getResourceGroupTaggerForTopSQL() tikvrpc.ResourceGroupTagg return tagger } +func (w *JobContext) ddlJobSourceType() string { + return w.tp +} + // handleDDLJobQueue handles DDL jobs in DDL Job queue. func (w *worker) handleDDLJobQueue(d *ddlCtx) error { once := true @@ -612,7 +638,8 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx) error { runJobErr error ) waitTime := 2 * d.lease - err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { // We are not owner, return and retry checking later. if !d.isOwner() { return nil @@ -632,6 +659,8 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx) error { } w.setDDLLabelForTopSQL(job) + w.setDDLSourceForDiagnosis(job) + jobContext := w.jobContext(job) if tagger := w.getResourceGroupTaggerForTopSQL(job); tagger != nil { txn.SetOption(kv.ResourceGroupTagger, tagger) } @@ -657,6 +686,8 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx) error { d.mu.hook.OnJobRunBefore(job) d.mu.RUnlock() + // set request source type to DDL type + txn.SetOption(kv.RequestSourceType, jobContext.ddlJobSourceType()) // If running job meets error, we will save this error in job Error // and retry later if the job is not cancelled. schemaVer, runJobErr = w.runDDLJob(d, t, job) diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index a6d3f8ad9fb53..379c2b34df24a 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -113,7 +113,8 @@ func TestParallelDDL(t *testing.T) { qLen2 := int64(0) var err error for { - checkErr = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + checkErr = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) qLen1, err = m.DDLJobQueueLen() if err != nil { diff --git a/ddl/delete_range.go b/ddl/delete_range.go index 04549df17feb5..636c5d54edb2e 100644 --- a/ddl/delete_range.go +++ b/ddl/delete_range.go @@ -170,21 +170,22 @@ func (dr *delRange) startEmulator() { } func (dr *delRange) doDelRangeWork() error { - ctx, err := dr.sessPool.get() + sctx, err := dr.sessPool.get() if err != nil { logutil.BgLogger().Error("[ddl] delRange emulator get session failed", zap.Error(err)) return errors.Trace(err) } - defer dr.sessPool.put(ctx) + defer dr.sessPool.put(sctx) - ranges, err := util.LoadDeleteRanges(ctx, math.MaxInt64) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + ranges, err := util.LoadDeleteRanges(ctx, sctx, math.MaxInt64) if err != nil { logutil.BgLogger().Error("[ddl] delRange emulator load tasks failed", zap.Error(err)) return errors.Trace(err) } for _, r := range ranges { - if err := dr.doTask(ctx, r); err != nil { + if err := dr.doTask(sctx, r); err != nil { logutil.BgLogger().Error("[ddl] delRange emulator do task failed", zap.Error(err)) return errors.Trace(err) } @@ -192,13 +193,14 @@ func (dr *delRange) doDelRangeWork() error { return nil } -func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { +func (dr *delRange) doTask(sctx sessionctx.Context, r util.DelRangeTask) error { var oldStartKey, newStartKey kv.Key oldStartKey = r.StartKey for { finish := true dr.keys = dr.keys[:0] - err := kv.RunInNewTxn(context.Background(), dr.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, dr.store, false, func(ctx context.Context, txn kv.Transaction) error { if topsqlstate.TopSQLEnabled() { // Only when TiDB run without PD(use unistore as storage for test) will run into here, so just set a mock internal resource tagger. txn.SetOption(kv.ResourceGroupTagger, util.GetInternalResourceGroupTaggerForTopSQL()) @@ -235,7 +237,7 @@ func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { return errors.Trace(err) } if finish { - if err := util.CompleteDeleteRange(ctx, r); err != nil { + if err := util.CompleteDeleteRange(sctx, r); err != nil { logutil.BgLogger().Error("[ddl] delRange emulator complete task failed", zap.Error(err)) return errors.Trace(err) } @@ -247,7 +249,7 @@ func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { zap.Stringer("endKey", endKey)) break } - if err := util.UpdateDeleteRange(ctx, r, newStartKey, oldStartKey); err != nil { + if err := util.UpdateDeleteRange(sctx, r, newStartKey, oldStartKey); err != nil { logutil.BgLogger().Error("[ddl] delRange emulator update task failed", zap.Error(err)) } oldStartKey = newStartKey @@ -264,6 +266,7 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, return errors.Trace(err) } + ctx = kv.WithInternalSourceType(ctx, getDDLRequestSource(job)) s := sctx.(sqlexec.SQLExecutor) switch job.Type { case model.ActionDropSchema: diff --git a/ddl/index.go b/ddl/index.go index 9296d2507275c..6701a1e905d32 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -1039,7 +1039,8 @@ type baseIndexWorker struct { rowMap map[int64]types.Datum rowDecoder *decoder.RowDecoder - sqlMode mysql.SQLMode + sqlMode mysql.SQLMode + jobContext *JobContext } type addIndexWorker struct { @@ -1052,7 +1053,7 @@ type addIndexWorker struct { distinctCheckFlags []bool } -func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo) *addIndexWorker { +func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *addIndexWorker { index := tables.NewIndex(t.GetPhysicalID(), t.Meta(), indexInfo) rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) return &addIndexWorker{ @@ -1064,6 +1065,7 @@ func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t tab rowMap: make(map[int64]types.Datum, len(decodeColMap)), metricCounter: metrics.BackfillTotalCounter.WithLabelValues("add_idx_rate"), sqlMode: reorgInfo.ReorgMeta.SQLMode, + jobContext: jc, }, index: index, } @@ -1312,7 +1314,8 @@ func (w *addIndexWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (taskC }) oprStartTime := time.Now() - errInTxn = kv.RunInNewTxn(context.Background(), w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), w.jobContext.ddlJobSourceType()) + errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { taskCtx.addedCount = 0 taskCtx.scanCount = 0 txn.SetOption(kv.Priority, w.priority) @@ -1497,7 +1500,7 @@ type cleanUpIndexWorker struct { baseIndexWorker } -func newCleanUpIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo) *cleanUpIndexWorker { +func newCleanUpIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *cleanUpIndexWorker { indexes := make([]table.Index, 0, len(t.Indices())) rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) for _, index := range t.Indices() { @@ -1514,6 +1517,7 @@ func newCleanUpIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t rowMap: make(map[int64]types.Datum, len(decodeColMap)), metricCounter: metrics.BackfillTotalCounter.WithLabelValues("cleanup_idx_rate"), sqlMode: reorgInfo.ReorgMeta.SQLMode, + jobContext: jc, }, } } @@ -1526,7 +1530,8 @@ func (w *cleanUpIndexWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (t }) oprStartTime := time.Now() - errInTxn = kv.RunInNewTxn(context.Background(), w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), w.jobContext.ddlJobSourceType()) + errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { taskCtx.addedCount = 0 taskCtx.scanCount = 0 txn.SetOption(kv.Priority, w.priority) diff --git a/ddl/placement/meta_bundle_test.go b/ddl/placement/meta_bundle_test.go index f53599bd2e14c..4a2d6f645b136 100644 --- a/ddl/placement/meta_bundle_test.go +++ b/ddl/placement/meta_bundle_test.go @@ -128,7 +128,8 @@ func createMetaBundleSuite() *metaBundleSuite { } func (s *metaBundleSuite) prepareMeta(t *testing.T, store kv.Storage) { - err := kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.NoError(t, m.CreatePolicy(s.policy1)) require.NoError(t, m.CreatePolicy(s.policy2)) @@ -145,7 +146,8 @@ func TestNewTableBundle(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) // tbl1 @@ -180,7 +182,8 @@ func TestNewPartitionBundle(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) // tbl1.par0 @@ -205,7 +208,8 @@ func TestNewPartitionListBundles(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) bundles, err := placement.NewPartitionListBundles(m, s.tbl1.Partition.Definitions) @@ -236,7 +240,8 @@ func TestNewFullTableBundles(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) bundles, err := placement.NewFullTableBundles(m, s.tbl1) diff --git a/ddl/placement_policy_ddl_test.go b/ddl/placement_policy_ddl_test.go index 6525ad6a2b092..48203b9c317f5 100644 --- a/ddl/placement_policy_ddl_test.go +++ b/ddl/placement_policy_ddl_test.go @@ -127,9 +127,10 @@ func TestPlacementPolicyInUse(t *testing.T) { require.NoError(t, err) is := builder.Build() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) for _, policy := range []*model.PolicyInfo{p1, p2, p4, p5} { require.True(t, dbterror.ErrPlacementPolicyInUse.Equal(ddl.CheckPlacementPolicyNotInUseFromInfoSchema(is, policy))) - require.NoError(t, kv.RunInNewTxn(context.Background(), sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { + require.NoError(t, kv.RunInNewTxn(ctx, sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.True(t, dbterror.ErrPlacementPolicyInUse.Equal(ddl.CheckPlacementPolicyNotInUseFromMeta(m, policy))) return nil @@ -137,7 +138,7 @@ func TestPlacementPolicyInUse(t *testing.T) { } require.NoError(t, ddl.CheckPlacementPolicyNotInUseFromInfoSchema(is, p3)) - require.NoError(t, kv.RunInNewTxn(context.Background(), sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { + require.NoError(t, kv.RunInNewTxn(ctx, sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.NoError(t, ddl.CheckPlacementPolicyNotInUseFromMeta(m, p3)) return nil diff --git a/ddl/placement_policy_test.go b/ddl/placement_policy_test.go index b4b71e76cd422..3c4c1b4cd3109 100644 --- a/ddl/placement_policy_test.go +++ b/ddl/placement_policy_test.go @@ -44,7 +44,8 @@ func checkExistTableBundlesInPD(t *testing.T, do *domain.Domain, dbName string, tblInfo, err := do.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) require.NoError(t, err) - require.NoError(t, kv.RunInNewTxn(context.TODO(), do.Store(), false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, do.Store(), false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) checkTableBundlesInPD(t, do, tt, tblInfo.Meta()) return nil @@ -321,7 +322,8 @@ func testGetPolicyByIDFromMeta(t *testing.T, store kv.Storage, policyID int64) * policyInfo *model.PolicyInfo err error ) - err1 := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err1 := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) policyInfo, err = t.GetPolicy(policyID) if err != nil { @@ -870,7 +872,8 @@ func testGetPolicyByName(t *testing.T, ctx sessionctx.Context, name string, must func testGetPolicyDependency(storage kv.Storage, name string) []int64 { ids := make([]int64, 0, 32) - err1 := kv.RunInNewTxn(context.Background(), storage, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err1 := kv.RunInNewTxn(ctx, storage, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) dbs, err := t.ListDatabases() if err != nil { diff --git a/ddl/reorg.go b/ddl/reorg.go index b1437972976ed..f3c87213c684d 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -458,6 +458,8 @@ func (dc *ddlCtx) buildDescTableScan(ctx *JobContext, startTS uint64, tbl table. builder.Request.ResourceGroupTagger = ctx.getResourceGroupTaggerForTopSQL() builder.Request.NotFillCache = true builder.Request.Priority = kv.PriorityLow + builder.RequestSource.RequestSourceInternal = true + builder.RequestSource.RequestSourceType = ctx.ddlJobSourceType() kvReq, err := builder.Build() if err != nil { @@ -730,7 +732,8 @@ func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key) error { return nil } - err := kv.RunInNewTxn(context.Background(), r.d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, r.d.store, true, func(ctx context.Context, txn kv.Transaction) error { rh := newReorgHandler(meta.NewMeta(txn)) return errors.Trace(rh.UpdateDDLReorgHandle(r.Job, startKey, r.EndKey, r.PhysicalTableID, r.currElement)) }) diff --git a/ddl/reorg_test.go b/ddl/reorg_test.go index e365a0e2d1a7e..e6006f53a6f2d 100644 --- a/ddl/reorg_test.go +++ b/ddl/reorg_test.go @@ -35,7 +35,7 @@ func TestReorgOwner(t *testing.T) { d1 := domain.DDL() - ctx := testkit.NewTestKit(t, store).Session() + sctx := testkit.NewTestKit(t, store).Session() require.True(t, d1.OwnerManager().IsOwner()) @@ -60,23 +60,23 @@ func TestReorgOwner(t *testing.T) { dbInfo, err := testSchemaInfo(store, "test_reorg") require.NoError(t, err) - testCreateSchema(t, ctx, d1, dbInfo) + testCreateSchema(t, sctx, d1, dbInfo) tblInfo, err := testTableInfo(store, "t", 3) require.NoError(t, err) - testCreateTable(t, ctx, d1, dbInfo, tblInfo) + testCreateTable(t, sctx, d1, dbInfo, tblInfo) tbl, err := testGetTableWithError(store, dbInfo.ID, tblInfo.ID) require.NoError(t, err) num := 10 - ctx = testkit.NewTestKit(t, store).Session() - err = sessiontxn.NewTxn(context.Background(), ctx) + sctx = testkit.NewTestKit(t, store).Session() + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) for i := 0; i < num; i++ { - _, err := tbl.AddRecord(ctx, types.MakeDatums(i, i, i)) + _, err := tbl.AddRecord(sctx, types.MakeDatums(i, i, i)) require.NoError(t, err) } - require.NoError(t, ctx.CommitTxn(context.Background())) + require.NoError(t, sctx.CommitTxn(context.Background())) tc := &ddl.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { @@ -88,9 +88,10 @@ func TestReorgOwner(t *testing.T) { d1.SetHook(tc) - testDropSchema(t, ctx, d1, dbInfo) + testDropSchema(t, sctx, d1, dbInfo) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) db, err1 := m.GetDatabase(dbInfo.ID) require.NoError(t, err1) diff --git a/ddl/restart_test.go b/ddl/restart_test.go index fc4334a2e1b85..bbec12b40ca6f 100644 --- a/ddl/restart_test.go +++ b/ddl/restart_test.go @@ -287,7 +287,8 @@ func testTableInfo(d *ddl, name string, num int) (*model.TableInfo, error) { } func testCheckTableState(t *testing.T, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, state model.SchemaState) { - require.NoError(t, kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) info, err := m.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) diff --git a/ddl/sanity_check.go b/ddl/sanity_check.go index 3a127c6d2739f..e81b9f31b5c5a 100644 --- a/ddl/sanity_check.go +++ b/ddl/sanity_check.go @@ -58,10 +58,11 @@ func queryDeleteRangeCnt(sessPool *sessionPool, jobID int64) (int, error) { sessPool.put(sctx) }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) query := `select sum(cnt) from (select count(1) cnt from mysql.gc_delete_range where job_id = %? union all select count(1) cnt from mysql.gc_delete_range_done where job_id = %?) as gdr;` - rs, err := s.ExecuteInternal(context.TODO(), query, jobID, jobID) + rs, err := s.ExecuteInternal(ctx, query, jobID, jobID) if err != nil { return 0, errors.Trace(err) } diff --git a/ddl/schema_test.go b/ddl/schema_test.go index 58e7996401f0b..cdb071d5e0daf 100644 --- a/ddl/schema_test.go +++ b/ddl/schema_test.go @@ -55,7 +55,8 @@ func testCreateTable(t *testing.T, ctx sessionctx.Context, d ddl.DDL, dbInfo *mo } func testCheckTableState(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo, state model.SchemaState) { - require.NoError(t, kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) info, err := m.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) @@ -105,7 +106,8 @@ func testTableInfo(store kv.Storage, name string, num int) (*model.TableInfo, er func genGlobalIDs(store kv.Storage, count int) ([]int64, error) { var ret []int64 - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) var err error ret, err = m.GenGlobalIDs(count) @@ -175,8 +177,9 @@ func isDDLJobDone(test *testing.T, t *meta.Meta) bool { func testCheckSchemaState(test *testing.T, store kv.Storage, dbInfo *model.DBInfo, state model.SchemaState) { isDropped := true + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) for { - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) info, err := t.GetDatabase(dbInfo.ID) require.NoError(test, err) diff --git a/ddl/serial_test.go b/ddl/serial_test.go index f1359854f7b6e..a588b8a16169a 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -767,7 +767,8 @@ func TestCanceledJobTakeTime(t *testing.T) { once := sync.Once{} hook.OnJobUpdatedExported = func(job *model.Job) { once.Do(func() { - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := m.GetAutoIDAccessors(job.SchemaID, job.TableID).Del() if err != nil { diff --git a/ddl/split_region.go b/ddl/split_region.go index 87b408df2c3ce..e1ae191812fd1 100644 --- a/ddl/split_region.go +++ b/ddl/split_region.go @@ -32,6 +32,7 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, regionIDs := make([]uint64, 0, len(pi.Definitions)) ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() + ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { for _, def := range pi.Definitions { regionIDs = append(regionIDs, preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, def.ID, scatter)...) @@ -49,6 +50,7 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() + ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) var regionIDs []uint64 if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatter) diff --git a/ddl/stat.go b/ddl/stat.go index 3105ec7d66c17..561a9be90ebb3 100644 --- a/ddl/stat.go +++ b/ddl/stat.go @@ -51,7 +51,8 @@ func (d *ddl) Stats(vars *variable.SessionVars) (map[string]interface{}, error) m[serverID] = d.uuid var ddlInfo *Info - err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { var err1 error ddlInfo, err1 = GetDDLInfo(txn) if err1 != nil { diff --git a/ddl/table_test.go b/ddl/table_test.go index d3c9480a92470..0a1dff690f5b2 100644 --- a/ddl/table_test.go +++ b/ddl/table_test.go @@ -97,7 +97,8 @@ func testLockTable(t *testing.T, ctx sessionctx.Context, d ddl.DDL, newSchemaID } func checkTableLockedTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo, serverID string, sessionID uint64, lockTp model.TableLockType) { - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) info, err := tt.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) @@ -138,7 +139,8 @@ func testTruncateTable(t *testing.T, ctx sessionctx.Context, store kv.Storage, d func testGetTableWithError(store kv.Storage, schemaID, tableID int64) (table.Table, error) { var tblInfo *model.TableInfo - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) var err1 error tblInfo, err1 = t.GetTable(schemaID, tableID) @@ -233,7 +235,8 @@ func TestTable(t *testing.T) { } func checkTableCacheTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo) { - require.NoError(t, kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) info, err := tt.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) @@ -245,7 +248,8 @@ func checkTableCacheTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, t } func checkTableNoCacheTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo) { - require.NoError(t, kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) info, err := tt.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) diff --git a/ddl/tiflash_replica_test.go b/ddl/tiflash_replica_test.go index 5aee99eac0792..10727aed4abac 100644 --- a/ddl/tiflash_replica_test.go +++ b/ddl/tiflash_replica_test.go @@ -367,7 +367,8 @@ func TestTruncateTable2(t *testing.T) { tablePrefix := tablecodec.EncodeTablePrefix(oldTblID) hasOldTableData := true for i := 0; i < waitForCleanDataRound; i++ { - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { it, err1 := txn.Iter(tablePrefix, nil) if err1 != nil { return err1 diff --git a/ddl/util/util.go b/ddl/util/util.go index d4836a187f832..386b8d71f7e78 100644 --- a/ddl/util/util.go +++ b/ddl/util/util.go @@ -56,17 +56,17 @@ func (t DelRangeTask) Range() (kv.Key, kv.Key) { } // LoadDeleteRanges loads delete range tasks from gc_delete_range table. -func LoadDeleteRanges(ctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { - return loadDeleteRangesFromTable(ctx, deleteRangesTable, safePoint) +func LoadDeleteRanges(ctx context.Context, sctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { + return loadDeleteRangesFromTable(ctx, sctx, deleteRangesTable, safePoint) } // LoadDoneDeleteRanges loads deleted ranges from gc_delete_range_done table. -func LoadDoneDeleteRanges(ctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { - return loadDeleteRangesFromTable(ctx, doneDeleteRangesTable, safePoint) +func LoadDoneDeleteRanges(ctx context.Context, sctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { + return loadDeleteRangesFromTable(ctx, sctx, doneDeleteRangesTable, safePoint) } -func loadDeleteRangesFromTable(ctx sessionctx.Context, table string, safePoint uint64) (ranges []DelRangeTask, _ error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), loadDeleteRangeSQL, table, safePoint) +func loadDeleteRangesFromTable(ctx context.Context, sctx sessionctx.Context, table string, safePoint uint64) (ranges []DelRangeTask, _ error) { + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, loadDeleteRangeSQL, table, safePoint) if rs != nil { defer terror.Call(rs.Close) } @@ -106,28 +106,30 @@ func loadDeleteRangesFromTable(ctx sessionctx.Context, table string, safePoint u } // CompleteDeleteRange moves a record from gc_delete_range table to gc_delete_range_done table. -func CompleteDeleteRange(ctx sessionctx.Context, dr DelRangeTask) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "BEGIN") +func CompleteDeleteRange(sctx sessionctx.Context, dr DelRangeTask) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "BEGIN") if err != nil { return errors.Trace(err) } - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), recordDoneDeletedRangeSQL, dr.JobID, dr.ElementID) + _, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, recordDoneDeletedRangeSQL, dr.JobID, dr.ElementID) if err != nil { return errors.Trace(err) } - err = RemoveFromGCDeleteRange(ctx, dr.JobID, dr.ElementID) + err = RemoveFromGCDeleteRange(sctx, dr.JobID, dr.ElementID) if err != nil { return errors.Trace(err) } - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "COMMIT") + _, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "COMMIT") return errors.Trace(err) } // RemoveFromGCDeleteRange is exported for ddl pkg to use. -func RemoveFromGCDeleteRange(ctx sessionctx.Context, jobID, elementID int64) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), completeDeleteRangeSQL, jobID, elementID) +func RemoveFromGCDeleteRange(sctx sessionctx.Context, jobID, elementID int64) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, completeDeleteRangeSQL, jobID, elementID) return errors.Trace(err) } @@ -150,16 +152,18 @@ func RemoveMultiFromGCDeleteRange(ctx context.Context, sctx sessionctx.Context, } // DeleteDoneRecord removes a record from gc_delete_range_done table. -func DeleteDoneRecord(ctx sessionctx.Context, dr DelRangeTask) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), deleteDoneRecordSQL, dr.JobID, dr.ElementID) +func DeleteDoneRecord(sctx sessionctx.Context, dr DelRangeTask) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, deleteDoneRecordSQL, dr.JobID, dr.ElementID) return errors.Trace(err) } // UpdateDeleteRange is only for emulator. -func UpdateDeleteRange(ctx sessionctx.Context, dr DelRangeTask, newStartKey, oldStartKey kv.Key) error { +func UpdateDeleteRange(sctx sessionctx.Context, dr DelRangeTask, newStartKey, oldStartKey kv.Key) error { newStartKeyHex := hex.EncodeToString(newStartKey) oldStartKeyHex := hex.EncodeToString(oldStartKey) - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), updateDeleteRangeSQL, newStartKeyHex, dr.JobID, dr.ElementID, oldStartKeyHex) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, updateDeleteRangeSQL, newStartKeyHex, dr.JobID, dr.ElementID, oldStartKeyHex) return errors.Trace(err) } @@ -177,6 +181,7 @@ func LoadDDLVars(ctx sessionctx.Context) error { // LoadGlobalVars loads global variable from mysql.global_variables. func LoadGlobalVars(ctx context.Context, sctx sessionctx.Context, varNames []string) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) if e, ok := sctx.(sqlexec.RestrictedSQLExecutor); ok { var buf strings.Builder buf.WriteString(loadGlobalVars) diff --git a/distsql/distsql.go b/distsql/distsql.go index 6a502b58478b6..9bc9b9cc323d5 100644 --- a/distsql/distsql.go +++ b/distsql/distsql.go @@ -154,6 +154,8 @@ func SelectWithRuntimeStats(ctx context.Context, sctx sessionctx.Context, kvReq func Analyze(ctx context.Context, client kv.Client, kvReq *kv.Request, vars interface{}, isRestrict bool, stmtCtx *stmtctx.StatementContext) (SelectResult, error) { ctx = WithSQLKvExecCounterInterceptor(ctx, stmtCtx) + kvReq.RequestSource.RequestSourceInternal = true + kvReq.RequestSource.RequestSourceType = kv.InternalTxnStats resp := client.Send(ctx, kvReq, vars, &kv.ClientSendOption{}) if resp == nil { return nil, errors.New("client returns nil response") diff --git a/distsql/request_builder.go b/distsql/request_builder.go index c4840ca8741a3..f1a0309fd1308 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -267,6 +267,8 @@ func (builder *RequestBuilder) SetFromSessionVars(sv *variable.SessionVars) *Req if sv.EnablePaging { builder.SetPaging(true) } + builder.RequestSource.RequestSourceInternal = sv.InRestrictedSQL + builder.RequestSource.RequestSourceType = sv.RequestSourceType return builder } diff --git a/domain/domain.go b/domain/domain.go index 257ab2a71ef2f..9a75b0a3a5491 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -975,14 +975,15 @@ func (do *Domain) GetEtcdClient() *clientv3.Client { // LoadPrivilegeLoop create a goroutine loads privilege tables in a loop, it // should be called only once in BootstrapSession. -func (do *Domain) LoadPrivilegeLoop(ctx sessionctx.Context) error { - ctx.GetSessionVars().InRestrictedSQL = true - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "set @@autocommit = 1") +func (do *Domain) LoadPrivilegeLoop(sctx sessionctx.Context) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + sctx.GetSessionVars().InRestrictedSQL = true + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "set @@autocommit = 1") if err != nil { return err } do.privHandle = privileges.NewHandle() - err = do.privHandle.Update(ctx) + err = do.privHandle.Update(sctx) if err != nil { return err } @@ -1021,7 +1022,7 @@ func (do *Domain) LoadPrivilegeLoop(ctx sessionctx.Context) error { } count = 0 - err := do.privHandle.Update(ctx) + err := do.privHandle.Update(sctx) metrics.LoadPrivilegeCounter.WithLabelValues(metrics.RetLabel(err)).Inc() if err != nil { logutil.BgLogger().Error("load privilege failed", zap.Error(err)) diff --git a/domain/sysvar_cache.go b/domain/sysvar_cache.go index 931e28cec5e15..e235e95b9dec5 100644 --- a/domain/sysvar_cache.go +++ b/domain/sysvar_cache.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/logutil" @@ -87,11 +88,12 @@ func (do *Domain) GetGlobalVar(name string) (string, error) { return "", variable.ErrUnknownSystemVar.GenWithStackByArgs(name) } -func (do *Domain) fetchTableValues(ctx sessionctx.Context) (map[string]string, error) { +func (do *Domain) fetchTableValues(sctx sessionctx.Context) (map[string]string, error) { tableContents := make(map[string]string) // Copy all variables from the table to tableContents - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT variable_name, variable_value FROM mysql.global_variables`) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnSysVar) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT variable_name, variable_value FROM mysql.global_variables`) if err != nil { return nil, err } diff --git a/executor/admin.go b/executor/admin.go index 974f9d1b177d9..ba219b70b6db3 100644 --- a/executor/admin.go +++ b/executor/admin.go @@ -317,7 +317,8 @@ func (e *RecoverIndexExec) backfillIndex(ctx context.Context) (int64, int64, err result backfillResult ) for { - errInTxn := kv.RunInNewTxn(context.Background(), e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin) + errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) var err error result, err = e.backfillIndexInTxn(ctx, txn, currentHandle) @@ -694,7 +695,8 @@ func (e *CleanupIndexExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *CleanupIndexExec) cleanTableIndex(ctx context.Context) error { for { - errInTxn := kv.RunInNewTxn(context.Background(), e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin) + errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { txn.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) err := e.fetchIndex(ctx, txn) diff --git a/executor/analyze.go b/executor/analyze.go index c9559ccff3cd4..927c5c834e756 100644 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/planner/core" @@ -175,8 +176,9 @@ func (e *AnalyzeExec) saveV2AnalyzeOpts() error { } idx += 1 } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - _, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, sql.String()) + _, _, err := exec.ExecRestrictedSQL(ctx, nil, sql.String()) if err != nil { return err } @@ -342,22 +344,23 @@ func AddNewAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob) { } // StartAnalyzeJob marks the state of the analyze job as running and sets the start time. -func StartAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob) { +func StartAnalyzeJob(sctx sessionctx.Context, job *statistics.AnalyzeJob) { if job == nil || job.ID == nil { return } job.StartTime = time.Now() job.Progress.SetLastDumpTime(job.StartTime) - exec := ctx.(sqlexec.RestrictedSQLExecutor) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "UPDATE mysql.analyze_jobs SET start_time = CONVERT_TZ(%?, '+00:00', @@TIME_ZONE), state = %? WHERE id = %?" - _, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, job.StartTime.UTC().Format(types.TimeFormat), statistics.AnalyzeRunning, *job.ID) + _, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, job.StartTime.UTC().Format(types.TimeFormat), statistics.AnalyzeRunning, *job.ID) if err != nil { logutil.BgLogger().Warn("failed to update analyze job", zap.String("update", fmt.Sprintf("%s->%s", statistics.AnalyzePending, statistics.AnalyzeRunning)), zap.Error(err)) } } // UpdateAnalyzeJob updates count of the processed rows when increment reaches a threshold. -func UpdateAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, rowCount int64) { +func UpdateAnalyzeJob(sctx sessionctx.Context, job *statistics.AnalyzeJob, rowCount int64) { if job == nil || job.ID == nil { return } @@ -365,16 +368,17 @@ func UpdateAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, rowCou if delta == 0 { return } - exec := ctx.(sqlexec.RestrictedSQLExecutor) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "UPDATE mysql.analyze_jobs SET processed_rows = processed_rows + %? WHERE id = %?" - _, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, delta, *job.ID) + _, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, delta, *job.ID) if err != nil { logutil.BgLogger().Warn("failed to update analyze job", zap.String("update", fmt.Sprintf("process %v rows", delta)), zap.Error(err)) } } // FinishAnalyzeJob updates the state of the analyze job to finished/failed according to `meetError` and sets the end time. -func FinishAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, analyzeErr error) { +func FinishAnalyzeJob(sctx sessionctx.Context, job *statistics.AnalyzeJob, analyzeErr error) { if job == nil || job.ID == nil { return } @@ -395,8 +399,9 @@ func FinishAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, analyz sql = "UPDATE mysql.analyze_jobs SET processed_rows = processed_rows + %?, end_time = CONVERT_TZ(%?, '+00:00', @@TIME_ZONE), state = %?, process_id = NULL WHERE id = %?" args = []interface{}{job.Progress.GetDeltaCount(), job.EndTime.UTC().Format(types.TimeFormat), statistics.AnalyzeFinished, *job.ID} } - exec := ctx.(sqlexec.RestrictedSQLExecutor) - _, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, args...) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, args...) if err != nil { var state string if analyzeErr != nil { diff --git a/executor/analyze_fast.go b/executor/analyze_fast.go index 27e514150e314..8af6486ba2ca1 100644 --- a/executor/analyze_fast.go +++ b/executor/analyze_fast.go @@ -114,7 +114,8 @@ type AnalyzeFastExec struct { func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID()) if err != nil { return } @@ -150,7 +151,7 @@ func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { } } var rs sqlexec.RecordSet - rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql.String()) + rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return } @@ -173,15 +174,16 @@ func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { } func (e *AnalyzeFastExec) activateTxnForRowCount() (rollbackFn func() error, err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) txn, err := e.ctx.Txn(true) if err != nil { if kv.ErrInvalidTxn.Equal(err) { - _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin") + _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") if err != nil { return nil, errors.Trace(err) } rollbackFn = func() error { - _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback") + _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "rollback") return err } } else { diff --git a/executor/analyzetest/analyze_test.go b/executor/analyzetest/analyze_test.go index b37376132ec5b..5a12296d0643a 100644 --- a/executor/analyzetest/analyze_test.go +++ b/executor/analyzetest/analyze_test.go @@ -15,6 +15,7 @@ package analyzetest import ( + "context" "encoding/json" "fmt" "strconv" @@ -169,9 +170,10 @@ func TestAnalyzeRestrict(t *testing.T) { tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int)") - ctx := tk.Session().(sessionctx.Context) - ctx.GetSessionVars().InRestrictedSQL = true - tk.MustExec("analyze table t") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rs, err := tk.Session().ExecuteInternal(ctx, "analyze table t") + require.Nil(t, err) + require.Nil(t, rs) } func TestAnalyzeParameters(t *testing.T) { diff --git a/executor/brie.go b/executor/brie.go index 78fdcbe9f1285..6dd1d9053a4ac 100644 --- a/executor/brie.go +++ b/executor/brie.go @@ -463,11 +463,13 @@ func (gs *tidbGlueSession) CreateSession(store kv.Storage) (glue.Session, error) // such as BACKUP and RESTORE have already been privilege checked. // NOTE: Maybe drain the restult too? See `gluetidb.tidbSession.ExecuteInternal` for more details. func (gs *tidbGlueSession) Execute(ctx context.Context, sql string) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) _, _, err := gs.se.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, sql) return err } func (gs *tidbGlueSession) ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) exec := gs.se.(sqlexec.SQLExecutor) _, err := exec.ExecuteInternal(ctx, sql, args...) return err diff --git a/executor/builder.go b/executor/builder.go index 9335231d217e3..8f930f5d7a107 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2443,7 +2443,8 @@ func (b *executorBuilder) getApproximateTableCountFromStorage(sctx sessionctx.Co if task.PartitionName != "" { sqlexec.MustFormatSQL(sql, " partition(%n)", task.PartitionName) } - rows, _, err := b.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(context.TODO(), nil, sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := b.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, sql.String()) if err != nil { return 0, false } diff --git a/executor/ddl.go b/executor/ddl.go index 269014684d52a..2887033b78f23 100644 --- a/executor/ddl.go +++ b/executor/ddl.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -88,6 +89,7 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { } e.done = true + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) // For each DDL, we should commit the previous transaction and create a new transaction. // Following cases are exceptions var localTempTablesToDrop []*ast.TableName @@ -409,7 +411,8 @@ func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, dom *domain.Do if err != nil { return nil, nil, err } - defer e.releaseSysSession(se) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + defer e.releaseSysSession(ctx, se) job, err := ddl.GetHistoryJobByID(se, s.JobID) if err != nil { return nil, nil, err diff --git a/executor/ddl_test.go b/executor/ddl_test.go index bf9da41257e04..5abca41820d1c 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -880,7 +880,8 @@ func TestShardRowIDBits(t *testing.T) { tblInfo.ShardRowIDBits = 5 tblInfo.MaxShardRowIDBits = 5 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) _, err = m.GenSchemaVersion() require.NoError(t, err) diff --git a/executor/executor.go b/executor/executor.go index 7b370615582cc..76ad77ac68b90 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -341,7 +341,8 @@ type CancelDDLJobsExec struct { // Open implements the Executor Open interface. func (e *CancelDDLJobsExec) Open(ctx context.Context) error { // We want to use a global transaction to execute the admin command, so we don't use e.ctx here. - errInTxn := kv.RunInNewTxn(context.Background(), e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) (err error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) + errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) (err error) { e.errs, err = ddl.CancelJobs(txn, e.jobIDs) return }) diff --git a/executor/executor_test.go b/executor/executor_test.go index fb0b9816ccbf7..e426d78227606 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -5417,7 +5417,8 @@ func TestHistoryReadInTxn(t *testing.T) { // After `ExecRestrictedSQL` with a specified snapshot and use current session, the original snapshot ts should not be reset // See issue: https://github.com/pingcap/tidb/issues/34529 exec := tk.Session().(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(ts2), sqlexec.ExecOptionUseCurSession}, "select * from his_t0 where id=1") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + rows, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(ts2), sqlexec.ExecOptionUseCurSession}, "select * from his_t0 where id=1") require.NoError(t, err) require.Equal(t, 1, len(rows)) require.Equal(t, int64(1), rows[0].GetInt64(0)) diff --git a/executor/grant.go b/executor/grant.go index 99db32abe79d1..b0f29e586fb7a 100644 --- a/executor/grant.go +++ b/executor/grant.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -67,6 +68,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } e.done = true + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName := e.Level.DBName if len(dbName) == 0 { @@ -132,15 +134,15 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } defer func() { if !isCommit { - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "rollback") + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "rollback") if err != nil { logutil.BgLogger().Error("rollback error occur at grant privilege", zap.Error(err)) } } - e.releaseSysSession(internalSession) + e.releaseSysSession(internalCtx, internalSession) }() - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "begin") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "begin") if err != nil { return err } @@ -166,7 +168,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { if user.AuthOpt != nil && user.AuthOpt.AuthPlugin != "" { authPlugin = user.AuthOpt.AuthPlugin } - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, `INSERT INTO %n.%n (Host, User, authentication_string, plugin) VALUES (%?, %?, %?, %?);`, mysql.SystemDB, mysql.UserTable, user.User.Hostname, user.User.Username, pwd, authPlugin) if err != nil { @@ -234,7 +236,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } } - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "commit") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "commit") if err != nil { return err } @@ -322,31 +324,36 @@ func (e *GrantExec) checkAndInitColumnPriv(user string, host string, cols []*ast } // initGlobalPrivEntry inserts a new row into mysql.DB with empty privilege. -func initGlobalPrivEntry(ctx sessionctx.Context, user string, host string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, PRIV) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.GlobalPrivTable, host, user, "{}") +func initGlobalPrivEntry(sctx sessionctx.Context, user string, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, PRIV) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.GlobalPrivTable, host, user, "{}") return err } // initDBPrivEntry inserts a new row into mysql.DB with empty privilege. -func initDBPrivEntry(ctx sessionctx.Context, user string, host string, db string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, DB) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.DBTable, host, user, db) +func initDBPrivEntry(sctx sessionctx.Context, user string, host string, db string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, DB) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.DBTable, host, user, db) return err } // initTablePrivEntry inserts a new row into mysql.Tables_priv with empty privilege. -func initTablePrivEntry(ctx sessionctx.Context, user string, host string, db string, tbl string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, DB, Table_name, Table_priv, Column_priv) VALUES (%?, %?, %?, %?, '', '')`, mysql.SystemDB, mysql.TablePrivTable, host, user, db, tbl) +func initTablePrivEntry(sctx sessionctx.Context, user string, host string, db string, tbl string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, DB, Table_name, Table_priv, Column_priv) VALUES (%?, %?, %?, %?, '', '')`, mysql.SystemDB, mysql.TablePrivTable, host, user, db, tbl) return err } // initColumnPrivEntry inserts a new row into mysql.Columns_priv with empty privilege. -func initColumnPrivEntry(ctx sessionctx.Context, user string, host string, db string, tbl string, col string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, DB, Table_name, Column_name, Column_priv) VALUES (%?, %?, %?, %?, %?, '')`, mysql.SystemDB, mysql.ColumnPrivTable, host, user, db, tbl, col) +func initColumnPrivEntry(sctx sessionctx.Context, user string, host string, db string, tbl string, col string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, DB, Table_name, Column_name, Column_priv) VALUES (%?, %?, %?, %?, %?, '')`, mysql.SystemDB, mysql.ColumnPrivTable, host, user, db, tbl, col) return err } // grantGlobalPriv grants priv to user in global scope. -func (e *GrantExec) grantGlobalPriv(ctx sessionctx.Context, user *ast.UserSpec) error { +func (e *GrantExec) grantGlobalPriv(sctx sessionctx.Context, user *ast.UserSpec) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) if len(e.TLSOptions) == 0 { return nil } @@ -354,7 +361,7 @@ func (e *GrantExec) grantGlobalPriv(ctx sessionctx.Context, user *ast.UserSpec) if err != nil { return errors.Trace(err) } - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `UPDATE %n.%n SET PRIV=%? WHERE User=%? AND Host=%?`, mysql.SystemDB, mysql.GlobalPrivTable, priv, user.User.Username, user.User.Hostname) + _, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `UPDATE %n.%n SET PRIV=%? WHERE User=%? AND Host=%?`, mysql.SystemDB, mysql.GlobalPrivTable, priv, user.User.Username, user.User.Hostname) return err } @@ -473,7 +480,8 @@ func (e *GrantExec) grantDynamicPriv(privName string, user *ast.UserSpec, intern if e.WithGrant { grantOption = "Y" } - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `REPLACE INTO %n.global_grants (user,host,priv,with_grant_option) VALUES (%?, %?, %?, %?)`, mysql.SystemDB, user.User.Username, user.User.Hostname, privName, grantOption) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `REPLACE INTO %n.global_grants (user,host,priv,with_grant_option) VALUES (%?, %?, %?, %?)`, mysql.SystemDB, user.User.Username, user.User.Hostname, privName, grantOption) return err } @@ -491,7 +499,8 @@ func (e *GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, int } sqlexec.MustFormatSQL(sql, ` WHERE User=%? AND Host=%?`, user.User.Username, user.User.Hostname) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } @@ -519,7 +528,8 @@ func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, interna } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", user.User.Username, user.User.Hostname, dbName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } @@ -542,7 +552,8 @@ func (e *GrantExec) grantTableLevel(priv *ast.PrivElem, user *ast.UserSpec, inte } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user.User.Username, user.User.Hostname, dbName, tblName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } @@ -567,7 +578,8 @@ func (e *GrantExec) grantColumnLevel(priv *ast.PrivElem, user *ast.UserSpec, int } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user.User.Username, user.User.Hostname, dbName, tbl.Meta().Name.O, col.Name.O) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return err } @@ -663,12 +675,13 @@ func composeColumnPrivUpdateForGrant(ctx sessionctx.Context, sql *strings.Builde } // recordExists is a helper function to check if the sql returns any row. -func recordExists(ctx sessionctx.Context, sql string, args ...interface{}) (bool, error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql, args...) +func recordExists(sctx sessionctx.Context, sql string, args ...interface{}) (bool, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, args...) if err != nil { return false, err } - rows, _, err := getRowsAndFields(ctx, rs) + rows, _, err := getRowsAndFields(sctx, rs) if err != nil { return false, err } @@ -697,13 +710,14 @@ func columnPrivEntryExists(ctx sessionctx.Context, name string, host string, db // getTablePriv gets current table scope privilege set from mysql.Tables_priv. // Return Table_priv and Column_priv. -func getTablePriv(ctx sessionctx.Context, name string, host string, db string, tbl string) (string, string, error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `SELECT Table_priv, Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?`, mysql.SystemDB, mysql.TablePrivTable, name, host, db, tbl) +func getTablePriv(sctx sessionctx.Context, name string, host string, db string, tbl string) (string, string, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `SELECT Table_priv, Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?`, mysql.SystemDB, mysql.TablePrivTable, name, host, db, tbl) if err != nil { return "", "", err } var tPriv, cPriv string - rows, fields, err := getRowsAndFields(ctx, rs) + rows, fields, err := getRowsAndFields(sctx, rs) if err != nil { return "", "", errors.Errorf("get table privilege fail for %s %s %s %s: %v", name, host, db, tbl, err) } @@ -724,12 +738,13 @@ func getTablePriv(ctx sessionctx.Context, name string, host string, db string, t // getColumnPriv gets current column scope privilege set from mysql.Columns_priv. // Return Column_priv. -func getColumnPriv(ctx sessionctx.Context, name string, host string, db string, tbl string, col string) (string, error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `SELECT Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?;`, mysql.SystemDB, mysql.ColumnPrivTable, name, host, db, tbl, col) +func getColumnPriv(sctx sessionctx.Context, name string, host string, db string, tbl string, col string) (string, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `SELECT Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?;`, mysql.SystemDB, mysql.ColumnPrivTable, name, host, db, tbl, col) if err != nil { return "", err } - rows, fields, err := getRowsAndFields(ctx, rs) + rows, fields, err := getRowsAndFields(sctx, rs) if err != nil { return "", errors.Errorf("get column privilege fail for %s %s %s %s: %s", name, host, db, tbl, err) } @@ -764,11 +779,12 @@ func getTargetSchemaAndTable(ctx sessionctx.Context, dbName, tableName string, i } // getRowsAndFields is used to extract rows from record sets. -func getRowsAndFields(ctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, []*ast.ResultField, error) { +func getRowsAndFields(sctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, []*ast.ResultField, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) if rs == nil { return nil, nil, errors.Errorf("nil recordset") } - rows, err := getRowFromRecordSet(context.Background(), ctx, rs) + rows, err := getRowFromRecordSet(ctx, sctx, rs) if err != nil { return nil, nil, err } diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 7f7766eda2b23..76da279bf2f03 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -289,6 +289,7 @@ func (c *statsCache) get(ctx context.Context, sctx sessionctx.Context) (map[int6 } c.mu.RUnlock() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) c.mu.Lock() defer c.mu.Unlock() if time.Since(c.modifyTime) < TableStatsCacheExpiry { @@ -690,8 +691,9 @@ func (e *hugeMemTableRetriever) dataForColumnsInTable(ctx context.Context, sctx _, ok := e.viewSchemaMap[tbl.ID] if !ok { var viewLogicalPlan plannercore.Plan + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) // Build plan is not thread safe, there will be concurrency on sessionctx. - if err := runWithSystemSession(sctx, func(s sessionctx.Context) error { + if err := runWithSystemSession(internalCtx, sctx, func(s sessionctx.Context) error { planBuilder, _ := plannercore.NewPlanBuilder().Init(s, is, &hint.BlockHintProcessor{}) var err error viewLogicalPlan, err = planBuilder.BuildDataSourceFromView(ctx, schema.Name, tbl) @@ -1906,7 +1908,8 @@ func dataForAnalyzeStatusHelper(sctx sessionctx.Context) (rows [][]types.Datum, const maxAnalyzeJobs = 30 const sql = "SELECT table_schema, table_name, partition_name, job_info, processed_rows, CONVERT_TZ(start_time, @@TIME_ZONE, '+00:00'), CONVERT_TZ(end_time, @@TIME_ZONE, '+00:00'), state, fail_reason, instance, process_id FROM mysql.analyze_jobs ORDER BY update_time DESC LIMIT %?" exec := sctx.(sqlexec.RestrictedSQLExecutor) - chunkRows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, sql, maxAnalyzeJobs) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + chunkRows, _, err := exec.ExecRestrictedSQL(ctx, nil, sql, maxAnalyzeJobs) if err != nil { return nil, err } diff --git a/executor/inspection_profile.go b/executor/inspection_profile.go index 15885010dce25..a46bee924bb2c 100644 --- a/executor/inspection_profile.go +++ b/executor/inspection_profile.go @@ -24,6 +24,7 @@ import ( "time" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/sqlexec" ) @@ -167,7 +168,8 @@ func (n *metricNode) getLabelValue(label string) *metricValue { func (n *metricNode) queryRowsByLabel(pb *profileBuilder, query string, handleRowFn func(label string, v float64)) error { exec := pb.sctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, query) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, query) if err != nil { return err } diff --git a/executor/inspection_result.go b/executor/inspection_result.go index 741508c5cf88f..4c1bf196836b4 100644 --- a/executor/inspection_result.go +++ b/executor/inspection_result.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -117,6 +118,7 @@ func (e *inspectionResultRetriever) retrieve(ctx context.Context, sctx sessionct } e.retrieved = true + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) // Some data of cluster-level memory tables will be retrieved many times in different inspection rules, // and the cost of retrieving some data is expensive. We use the `TableSnapshot` to cache those data // and obtain them lazily, and provide a consistent view of inspection tables for each inspection rules. diff --git a/executor/inspection_summary.go b/executor/inspection_summary.go index ebd3f69abc4f8..30fc542a9898b 100644 --- a/executor/inspection_summary.go +++ b/executor/inspection_summary.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" @@ -417,6 +418,7 @@ func (e *inspectionSummaryRetriever) retrieve(ctx context.Context, sctx sessionc return nil, nil } e.retrieved = true + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) rules := inspectionFilter{set: e.extractor.Rules} names := inspectionFilter{set: e.extractor.MetricNames} diff --git a/executor/metrics_reader.go b/executor/metrics_reader.go index 3e90897d03192..127c03119948c 100644 --- a/executor/metrics_reader.go +++ b/executor/metrics_reader.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" plannercore "github.com/pingcap/tidb/planner/core" @@ -204,6 +205,7 @@ func (e *MetricsSummaryRetriever) retrieve(ctx context.Context, sctx sessionctx. } sort.Strings(tables) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) filter := inspectionFilter{set: e.extractor.MetricsNames} condition := e.timeRange.Condition() for _, name := range tables { @@ -280,6 +282,7 @@ func (e *MetricsSummaryByLabelRetriever) retrieve(ctx context.Context, sctx sess } sort.Strings(tables) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) filter := inspectionFilter{set: e.extractor.MetricsNames} condition := e.timeRange.Condition() for _, name := range tables { diff --git a/executor/opt_rule_blacklist.go b/executor/opt_rule_blacklist.go index 5773f80efe7a2..2b80adecd4383 100644 --- a/executor/opt_rule_blacklist.go +++ b/executor/opt_rule_blacklist.go @@ -17,6 +17,8 @@ package executor import ( "context" + "github.com/pingcap/tidb/kv" + plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/chunk" @@ -31,13 +33,14 @@ type ReloadOptRuleBlacklistExec struct { // Next implements the Executor Next interface. func (e *ReloadOptRuleBlacklistExec) Next(ctx context.Context, _ *chunk.Chunk) error { - return LoadOptRuleBlacklist(e.ctx) + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + return LoadOptRuleBlacklist(internalCtx, e.ctx) } // LoadOptRuleBlacklist loads the latest data from table mysql.opt_rule_blacklist. -func LoadOptRuleBlacklist(ctx sessionctx.Context) (err error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select HIGH_PRIORITY name from mysql.opt_rule_blacklist") +func LoadOptRuleBlacklist(ctx context.Context, sctx sessionctx.Context) (err error) { + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select HIGH_PRIORITY name from mysql.opt_rule_blacklist") if err != nil { return err } diff --git a/executor/reload_expr_pushdown_blacklist.go b/executor/reload_expr_pushdown_blacklist.go index 1511e7a280195..c32f84c957e1e 100644 --- a/executor/reload_expr_pushdown_blacklist.go +++ b/executor/reload_expr_pushdown_blacklist.go @@ -37,9 +37,10 @@ func (e *ReloadExprPushdownBlacklistExec) Next(ctx context.Context, _ *chunk.Chu } // LoadExprPushdownBlacklist loads the latest data from table mysql.expr_pushdown_blacklist. -func LoadExprPushdownBlacklist(ctx sessionctx.Context) (err error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select HIGH_PRIORITY name, store_type from mysql.expr_pushdown_blacklist") +func LoadExprPushdownBlacklist(sctx sessionctx.Context) (err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnSysVar) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select HIGH_PRIORITY name, store_type from mysql.expr_pushdown_blacklist") if err != nil { return err } diff --git a/executor/revoke.go b/executor/revoke.go index 2165f6aaa50ff..36f7a36ac75de 100644 --- a/executor/revoke.go +++ b/executor/revoke.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -62,6 +63,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } e.done = true + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Commit the old transaction, like DDL. if err := sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { @@ -77,15 +79,15 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { } defer func() { if !isCommit { - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "rollback") + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "rollback") if err != nil { logutil.BgLogger().Error("rollback error occur at grant privilege", zap.Error(err)) } } - e.releaseSysSession(internalSession) + e.releaseSysSession(internalCtx, internalSession) }() - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "begin") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "begin") if err != nil { return err } @@ -116,7 +118,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { } } - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "commit") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "commit") if err != nil { return err } @@ -197,16 +199,18 @@ func (e *RevokeExec) revokeDynamicPriv(internalSession sessionctx.Context, privN if !privilege.GetPrivilegeManager(e.ctx).IsDynamicPrivilege(privName) { // for MySQL compatibility e.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrDynamicPrivilegeNotRegistered.GenWithStackByArgs(privName)) } - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "DELETE FROM mysql.global_grants WHERE user = %? AND host = %? AND priv = %?", user, host, privName) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "DELETE FROM mysql.global_grants WHERE user = %? AND host = %? AND priv = %?", user, host, privName) return err } func (e *RevokeExec) revokeGlobalPriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) if priv.Priv == mysql.ExtendedPriv { return e.revokeDynamicPriv(internalSession, priv.Name, user, host) } if priv.Priv == mysql.AllPriv { // If ALL, also revoke dynamic privileges - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "DELETE FROM mysql.global_grants WHERE user = %? AND host = %?", user, host) + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "DELETE FROM mysql.global_grants WHERE user = %? AND host = %?", user, host) if err != nil { return err } @@ -219,11 +223,12 @@ func (e *RevokeExec) revokeGlobalPriv(internalSession sessionctx.Context, priv * } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%?", user, strings.ToLower(host)) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast.PrivElem, userName, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName := e.Level.DBName if len(dbName) == 0 { dbName = e.ctx.GetSessionVars().CurrentDB @@ -237,11 +242,12 @@ func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast. } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", userName, host, dbName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) if err != nil && !terror.ErrorEqual(err, infoschema.ErrTableNotExists) { return err @@ -260,11 +266,12 @@ func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *a } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user, host, dbName, tblName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } func (e *RevokeExec) revokeColumnPriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) if err != nil { return err @@ -284,7 +291,7 @@ func (e *RevokeExec) revokeColumnPriv(internalSession sessionctx.Context, priv * } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user, host, dbName, tbl.Meta().Name.O, col.Name.O) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return err } diff --git a/executor/show.go b/executor/show.go index 82f79c58c7b64..ff4dc728ca022 100644 --- a/executor/show.go +++ b/executor/show.go @@ -354,6 +354,7 @@ func (e *ShowExec) fetchShowBind() error { func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBindInfo) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, fmt.Sprintf("SELECT count(*) FROM mysql.bind_info where status = '%s' or status = '%s';", bindinfo.Enabled, bindinfo.Using)) if err != nil { @@ -384,6 +385,7 @@ func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { } func (e *ShowExec) fetchShowEngines(ctx context.Context) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) exec := e.ctx.(sqlexec.RestrictedSQLExecutor) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT * FROM information_schema.engines`) @@ -539,6 +541,7 @@ func (e *ShowExec) fetchShowTableStatus(ctx context.Context) error { } exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) var snapshot uint64 txn, err := e.ctx.Txn(false) @@ -1520,6 +1523,7 @@ func (e *ShowExec) fetchShowCreateUser(ctx context.Context) error { if checker == nil { return errors.New("miss privilege checker") } + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) userName, hostName := e.User.Username, e.User.Hostname sessVars := e.ctx.GetSessionVars() @@ -2039,10 +2043,11 @@ func tryFillViewColumnType(ctx context.Context, sctx sessionctx.Context, is info if !tbl.IsView() { return nil } + ctx = kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) // We need to run the build plan process in another session because there may be // multiple goroutines running at the same time while session is not goroutine-safe. // Take joining system table as an example, `fetchBuildSideRows` and `fetchProbeSideChunks` can be run concurrently. - return runWithSystemSession(sctx, func(s sessionctx.Context) error { + return runWithSystemSession(ctx, sctx, func(s sessionctx.Context) error { // Retrieve view columns info. planBuilder, _ := plannercore.NewPlanBuilder().Init(s, is, &hint.BlockHintProcessor{}) if viewLogicalPlan, err := planBuilder.BuildDataSourceFromView(ctx, dbName, tbl); err == nil { @@ -2064,12 +2069,12 @@ func tryFillViewColumnType(ctx context.Context, sctx sessionctx.Context, is info }) } -func runWithSystemSession(sctx sessionctx.Context, fn func(sessionctx.Context) error) error { +func runWithSystemSession(ctx context.Context, sctx sessionctx.Context, fn func(sessionctx.Context) error) error { b := &baseExecutor{ctx: sctx} sysCtx, err := b.getSysSession() if err != nil { return err } - defer b.releaseSysSession(sysCtx) + defer b.releaseSysSession(ctx, sysCtx) return fn(sysCtx) } diff --git a/executor/simple.go b/executor/simple.go index cfede41d49057..5568ab5c5b4d9 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -98,17 +98,17 @@ func (e *baseExecutor) getSysSession() (sessionctx.Context, error) { return restrictedCtx, nil } -func (e *baseExecutor) releaseSysSession(ctx sessionctx.Context) { - if ctx == nil { +func (e *baseExecutor) releaseSysSession(ctx context.Context, sctx sessionctx.Context) { + if sctx == nil { return } dom := domain.GetDomain(e.ctx) sysSessionPool := dom.SysSessionPool() - if _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback"); err != nil { - ctx.(pools.Resource).Close() + if _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "rollback"); err != nil { + sctx.(pools.Resource).Close() return } - sysSessionPool.Put(ctx.(pools.Resource)) + sysSessionPool.Put(sctx.(pools.Resource)) } // Next implements the Executor Next interface. @@ -183,9 +183,10 @@ func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + defer e.releaseSysSession(ctx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { return err } sql := new(strings.Builder) @@ -195,15 +196,15 @@ func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { } sql.Reset() sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", u.Username, u.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } return nil @@ -233,9 +234,10 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } sql := new(strings.Builder) @@ -245,9 +247,9 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul } sql.Reset() sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err @@ -258,22 +260,22 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul if ok { sql.Reset() sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles values(%?, %?, %?, %?);", user.Hostname, user.Username, role.Hostname, role.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } else { - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return ErrRoleNotGranted.GenWithStackByArgs(role.String(), user.String()) } } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } return nil @@ -289,13 +291,14 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol return ErrCannotUser.GenWithStackByArgs("SET DEFAULT ROLE", user.String()) } } + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) restrictedCtx, err := e.getSysSession() if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } sql := new(strings.Builder) @@ -305,24 +308,24 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol } sql.Reset() sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } sql.Reset() sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles(HOST,USER,DEFAULT_ROLE_HOST,DEFAULT_ROLE_USER) SELECT TO_HOST,TO_USER,FROM_HOST,FROM_USER FROM mysql.role_edges WHERE TO_HOST=%? AND TO_USER=%?;", user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } return nil @@ -338,18 +341,19 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + defer e.releaseSysSession(ctx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { return err } sql := new(strings.Builder) sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { return rollbackErr } return err @@ -375,14 +379,14 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { return rollbackErr } return err } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } return nil @@ -639,6 +643,7 @@ func (e *SimpleExec) executeReleaseSavepoint(s *ast.ReleaseSavepointStmt) error } func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) for _, role := range s.Roles { exists, err := userExists(ctx, e.ctx, role.Username, role.Hostname) if err != nil { @@ -653,11 +658,11 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) // begin a transaction to insert role graph edges. - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return errors.Trace(err) } sql := new(strings.Builder) @@ -673,7 +678,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm return errors.Trace(err) } if !exists { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", user.String()) @@ -684,8 +689,8 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm } sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE FROM_HOST=%? and FROM_USER=%? and TO_HOST=%? and TO_USER=%?`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", role.String()) @@ -693,8 +698,8 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE DEFAULT_ROLE_HOST=%? and DEFAULT_ROLE_USER=%? and HOST=%? and USER=%?`, mysql.SystemDB, mysql.DefaultRoleTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", role.String()) @@ -711,7 +716,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm } } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } checker := privilege.GetPrivilegeManager(e.ctx) @@ -763,6 +768,7 @@ func (e *SimpleExec) executeRollback(s *ast.RollbackStmt) error { } func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Check `CREATE USER` privilege. if !config.GetGlobalConfig().Security.SkipGrantTable { checker := privilege.GetPrivilegeManager(e.ctx) @@ -854,15 +860,15 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return errors.Trace(err) } - _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()) if err != nil { - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err @@ -876,21 +882,22 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm } sqlexec.MustFormatSQL(sql, `(%?, %?, %?)`, user.Hostname, user.Username, string(hack.String(privData))) } - _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()) if err != nil { - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return errors.Trace(err) } return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() } func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) if s.CurrentAuth != nil { user := e.ctx.GetSessionVars().User if user == nil { @@ -1022,6 +1029,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) sessionVars := e.ctx.GetSessionVars() for i, user := range s.Users { if user.CurrentUser { @@ -1053,11 +1061,11 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) // begin a transaction to insert role graph edges. - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } @@ -1066,16 +1074,16 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) for _, role := range s.Roles { sql.Reset() sqlexec.MustFormatSQL(sql, `INSERT IGNORE INTO %n.%n (FROM_HOST, FROM_USER, TO_HOST, TO_USER) VALUES (%?,%?,%?,%?)`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } return ErrCannotUser.GenWithStackByArgs("GRANT ROLE", user.String()) } } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() @@ -1083,16 +1091,16 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) // Should cover same internal mysql.* tables as DROP USER, so this function is very similar func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { - + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) var failedUser string sysSession, err := e.getSysSession() - defer e.releaseSysSession(sysSession) + defer e.releaseSysSession(ctx, sysSession) if err != nil { return err } sqlExecutor := sysSession.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { return err } @@ -1104,7 +1112,7 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { if len(newUser.Hostname) > auth.HostNameMaxLength { return ErrWrongStringLength.GenWithStackByArgs(newUser.Hostname, "host name", auth.HostNameMaxLength) } - exists, err := userExistsInternal(sqlExecutor, oldUser.Username, oldUser.Hostname) + exists, err := userExistsInternal(ctx, sqlExecutor, oldUser.Username, oldUser.Hostname) if err != nil { return err } @@ -1113,7 +1121,7 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { break } - exists, err = userExistsInternal(sqlExecutor, newUser.Username, newUser.Hostname) + exists, err = userExistsInternal(ctx, sqlExecutor, newUser.Username, newUser.Hostname) if err != nil { return err } @@ -1181,11 +1189,11 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { } if failedUser == "" { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } } else { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "rollback"); err != nil { return err } return ErrCannotUser.GenWithStackByArgs("RENAME USER", failedUser) @@ -1194,16 +1202,18 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { } func renameUserHostInSystemTable(sqlExecutor sqlexec.SQLExecutor, tableName, usernameColumn, hostColumn string, users *ast.UserToUser) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) sql := new(strings.Builder) sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET %n = %?, %n = %? WHERE %n = %? and %n = %?;`, mysql.SystemDB, tableName, usernameColumn, users.NewUser.Username, hostColumn, strings.ToLower(users.NewUser.Hostname), usernameColumn, users.OldUser.Username, hostColumn, strings.ToLower(users.OldUser.Hostname)) - _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) return err } func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Check privileges. // Check `CREATE USER` privilege. checker := privilege.GetPrivilegeManager(e.ctx) @@ -1226,13 +1236,13 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e hasRestrictedUserPriv := checker.RequestDynamicVerification(activeRoles, "RESTRICTED_USER_ADMIN", false) failedUsers := make([]string, 0, len(s.UserList)) sysSession, err := e.getSysSession() - defer e.releaseSysSession(sysSession) + defer e.releaseSysSession(internalCtx, sysSession) if err != nil { return err } sqlExecutor := sysSession.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } @@ -1257,7 +1267,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // any user with SUPER requires a user with SUPER to be able to DROP the user. // We also allow RESTRICTED_USER_ADMIN to count for simplicity. if checker.RequestDynamicVerificationWithUser("SYSTEM_USER", false, user) && !(hasSystemUserPriv || hasRestrictedUserPriv) { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } return plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("SYSTEM_USER or SUPER") @@ -1266,7 +1276,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // begin a transaction to delete a user. sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.UserTable, strings.ToLower(user.Hostname), user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1274,9 +1284,9 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.global_priv sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.GlobalPrivTable, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } continue @@ -1285,7 +1295,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.db sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.DBTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1293,7 +1303,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.tables_priv sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.TablePrivTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1301,7 +1311,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.columns_priv sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.ColumnPrivTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1309,14 +1319,14 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.role_edges sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE TO_HOST = %? and TO_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE FROM_HOST = %? and FROM_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1324,14 +1334,14 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.default_roles sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE DEFAULT_ROLE_HOST = %? and DEFAULT_ROLE_USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE HOST = %? and USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1339,7 +1349,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.global_grants sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, "global_grants", user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1358,7 +1368,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e } if len(failedUsers) == 0 { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } if s.IsDropRole { @@ -1369,7 +1379,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e } } } else { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } if s.IsDropRole { @@ -1382,6 +1392,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e func userExists(ctx context.Context, sctx sessionctx.Context, name string, host string) (bool, error) { exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT * FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) if err != nil { return false, err @@ -1390,15 +1401,15 @@ func userExists(ctx context.Context, sctx sessionctx.Context, name string, host } // use the same internal executor to read within the same transaction, otherwise same as userExists -func userExistsInternal(sqlExecutor sqlexec.SQLExecutor, name string, host string) (bool, error) { +func userExistsInternal(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, name string, host string) (bool, error) { sql := new(strings.Builder) sqlexec.MustFormatSQL(sql, `SELECT * FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) - recordSet, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err } req := recordSet.NewChunk(nil) - err = recordSet.Next(context.TODO(), req) + err = recordSet.Next(ctx, req) var rows int = 0 if err == nil { rows = req.NumRows() @@ -1420,6 +1431,7 @@ func (e *SimpleExec) userAuthPlugin(name string, host string) (string, error) { } func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) var u, h string if s.User == nil || s.User.CurrentUser { if e.ctx.GetSessionVars().User == nil { diff --git a/executor/split.go b/executor/split.go index 52dba35747c43..3aa6bb2bda4b9 100644 --- a/executor/split.go +++ b/executor/split.go @@ -366,6 +366,7 @@ func (e *SplitTableRegionExec) splitTableRegion(ctx context.Context) error { start := time.Now() ctxWithTimeout, cancel := context.WithTimeout(ctx, e.ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() + ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) regionIDs, err := s.SplitRegions(ctxWithTimeout, e.splitKeys, true, &e.tableInfo.ID) if err != nil { diff --git a/executor/trace.go b/executor/trace.go index bc842802feee6..d5ed5128c9ba3 100644 --- a/executor/trace.go +++ b/executor/trace.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -242,6 +243,7 @@ func (e *TraceExec) executeChild(ctx context.Context, se sqlexec.SQLExecutor) { defer func() { vars.InRestrictedSQL = origin }() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnTrace) rs, err := se.ExecuteStmt(ctx, e.stmtNode) if err != nil { var errCode uint16 diff --git a/expression/util.go b/expression/util.go index 40d4ab3f95774..f81737e4b7369 100644 --- a/expression/util.go +++ b/expression/util.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/opcode" @@ -1366,6 +1367,7 @@ func (r *SQLDigestTextRetriever) runMockQuery(data map[string]string, inValues [ // queries information_schema.statements_summary and information_schema.statements_summary_history; otherwise, it // queries the cluster version of these two tables. func (r *SQLDigestTextRetriever) runFetchDigestQuery(ctx context.Context, sctx sessionctx.Context, queryGlobal bool, inValues []interface{}) (map[string]string, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) // If mock data is set, query the mock data instead of the real statements_summary tables. if !queryGlobal && r.mockLocalData != nil { return r.runMockQuery(r.mockLocalData, inValues) diff --git a/go.mod b/go.mod index 443d6ee0d5431..189650006547e 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20220423142525-ae43b7f4e5c3 github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 - github.com/pingcap/kvproto v0.0.0-20220517085838-12e2f5a9d167 + github.com/pingcap/kvproto v0.0.0-20220525022339-6aaebf466305 github.com/pingcap/log v1.1.0 github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4 github.com/pingcap/tidb/parser v0.0.0-20211011031125-9b13dc409c5e @@ -62,7 +62,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.1-0.20220613112734-be31f33ba03b + github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 github.com/twmb/murmur3 v1.1.3 github.com/uber/jaeger-client-go v2.22.1+incompatible diff --git a/go.sum b/go.sum index 395e388d04d89..56c52603876a8 100644 --- a/go.sum +++ b/go.sum @@ -651,8 +651,8 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20220302110454-c696585a961b/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20220517085838-12e2f5a9d167 h1:dsMpneacHyuVslSVndgUfJKrXFNG7VPdXip2ulG6glo= -github.com/pingcap/kvproto v0.0.0-20220517085838-12e2f5a9d167/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= +github.com/pingcap/kvproto v0.0.0-20220525022339-6aaebf466305 h1:TZ0teMZoKHnZDlJxNkWrp5Sgv3w+ruNbrqtBYKsfaNw= +github.com/pingcap/kvproto v0.0.0-20220525022339-6aaebf466305/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= @@ -772,8 +772,8 @@ github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df/go.mod h1:6Fq8o github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= -github.com/tikv/client-go/v2 v2.0.1-0.20220613112734-be31f33ba03b h1:N5ivsNkDQDgimY0ZVqMnWqXjEnxy5uFChoB4wPIKpPI= -github.com/tikv/client-go/v2 v2.0.1-0.20220613112734-be31f33ba03b/go.mod h1:KzWkFRax8foxw13dSXAQZN+dLgixwahT10ZaAK9V/pg= +github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd h1:VAyYcN1Nw7RupQszUYqOkueEVapWSxKFU7uBaYY5Dv8= +github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd/go.mod h1:uoZHYWKB+PsDueEnZ0EvF5zvNJPEauEWN26Tgi7qvNI= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 h1:jxgmKOscXSjaFEKQGRyY5qOpK8hLqxs2irb/uDJMtwk= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710/go.mod h1:AtvppPwkiyUgQlR1W9qSqfTB+OsOIu19jDCOxOsPkmU= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= diff --git a/infoschema/infoschema_test.go b/infoschema/infoschema_test.go index e414f97d02906..7f1f36b030b29 100644 --- a/infoschema/infoschema_test.go +++ b/infoschema/infoschema_test.go @@ -102,7 +102,8 @@ func TestBasic(t *testing.T) { } dbInfos := []*model.DBInfo{dbInfo} - err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { err := meta.NewMeta(txn).CreateDatabase(dbInfo) require.NoError(t, err) return errors.Trace(err) @@ -193,7 +194,7 @@ func TestBasic(t *testing.T) { require.NoError(t, err) require.NotNil(t, tb) - err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { err := meta.NewMeta(txn).CreateTableOrView(dbID, tblInfo) require.NoError(t, err) return errors.Trace(err) @@ -305,7 +306,8 @@ func TestInfoTables(t *testing.T) { func genGlobalID(store kv.Storage) (int64, error) { var globalID int64 - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { var err error globalID, err = meta.NewMeta(txn).GenGlobalID() return errors.Trace(err) @@ -356,7 +358,8 @@ func TestBuildBundle(t *testing.T) { var tb1Bundle, p1Bundle *placement.Bundle - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) (err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) (err error) { m := meta.NewMeta(txn) tb1Bundle, err = placement.NewTableBundle(m, tbl1.Meta()) require.NoError(t, err) diff --git a/kv/BUILD.bazel b/kv/BUILD.bazel index fed476f803dfc..835cd827b6bfa 100644 --- a/kv/BUILD.bazel +++ b/kv/BUILD.bazel @@ -46,6 +46,7 @@ go_library( "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//tikvrpc", + "@com_github_tikv_client_go_v2//util", "@com_github_tikv_pd_client//:client", "@org_uber_go_zap//:zap", ], diff --git a/kv/kv.go b/kv/kv.go index 85c7dbc5d0e46..39dbcc7c9bf78 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/tikvrpc" + "github.com/tikv/client-go/v2/util" pd "github.com/tikv/pd/client" ) @@ -365,6 +366,8 @@ type Request struct { ResourceGroupTagger tikvrpc.ResourceGroupTagger // Paging indicates whether the request is a paging request. Paging bool + // RequestSource indicates whether the request is an internal request. + RequestSource util.RequestSource } // PartitionIDAndRanges used by PartitionTableScan in tiflash. diff --git a/kv/option.go b/kv/option.go index 56a2e58e9196e..295cf60ee0c11 100644 --- a/kv/option.go +++ b/kv/option.go @@ -14,6 +14,10 @@ package kv +import ( + "github.com/tikv/client-go/v2/util" +) + // Transaction options const ( // BinlogInfo contains the binlog data and client. @@ -81,6 +85,10 @@ const ( TableToColumnMaps // AssertionLevel controls how strict the assertions on data during transactions should be. AssertionLevel + // RequestSourceInternal set request source scope of transaction. + RequestSourceInternal + // RequestSourceType set request source type of the current statement. + RequestSourceType ) // ReplicaReadType is the type of replica to read data from @@ -106,3 +114,49 @@ func (r ReplicaReadType) IsFollowerRead() bool { func (r ReplicaReadType) IsClosestRead() bool { return r == ReplicaReadClosest } + +// RequestSourceKey is used as the key of request source type in context. +var RequestSourceKey = util.RequestSourceKey + +// RequestSource is the scope and type of the request and it's passed by go context. +type RequestSource = util.RequestSource + +// WithInternalSourceType create context with internal source. +var WithInternalSourceType = util.WithInternalSourceType + +const ( + // InternalTxnOthers is the type of requests that consume low resources. + // This reduces the size of metrics. + InternalTxnOthers = util.InternalTxnOthers + // InternalTxnGC is the type of GC txn. + InternalTxnGC = util.InternalTxnGC + // InternalTxnBootstrap is the type of TiDB bootstrap txns. + InternalTxnBootstrap = InternalTxnOthers + // InternalTxnMeta is the type of the miscellaneous meta usage. + InternalTxnMeta = util.InternalTxnMeta + // InternalTxnDDL is the type of inner txns in ddl module. + InternalTxnDDL = "ddl" + // InternalTxnBackfillDDLPrefix is the prefix of the types of DDL operations needs backfilling. + InternalTxnBackfillDDLPrefix = "ddl_" + // InternalTxnCacheTable is the type of cache table usage. + InternalTxnCacheTable = InternalTxnOthers + // InternalTxnStats is the type of statistics txn. + InternalTxnStats = "stats" + // InternalTxnBindInfo is the type of bind info txn. + InternalTxnBindInfo = InternalTxnOthers + // InternalTxnSysVar is the type of sys var txn. + InternalTxnSysVar = InternalTxnOthers + // InternalTxnTelemetry is the type of telemetry. + InternalTxnTelemetry = InternalTxnOthers + // InternalTxnAdmin is the type of admin operations. + InternalTxnAdmin = "admin" + // InternalTxnPrivilege is the type of privilege txn. + InternalTxnPrivilege = InternalTxnOthers + // InternalTxnTools is the type of tools usage of TiDB. + // Do not classify different tools by now. + InternalTxnTools = "tools" + // InternalTxnBR is the type of BR usage. + InternalTxnBR = InternalTxnTools + // InternalTxnTrace handles the trace statement. + InternalTxnTrace = "Trace" +) diff --git a/kv/txn.go b/kv/txn.go index dd51b7a2e56fe..dfad0ee2efed0 100644 --- a/kv/txn.go +++ b/kv/txn.go @@ -17,6 +17,7 @@ package kv import ( "context" "errors" + "flag" "fmt" "math" "math/rand" @@ -99,7 +100,7 @@ func PrintLongTimeInternalTxn(now time.Time, startTS uint64, runByFunction bool) } } -// RunInNewTxn will run the f in a new transaction environment. +// RunInNewTxn will run the f in a new transaction environment, should be used by inner txn only. func RunInNewTxn(ctx context.Context, store Storage, retryable bool, f func(ctx context.Context, txn Transaction) error) error { var ( err error @@ -117,6 +118,7 @@ func RunInNewTxn(ctx context.Context, store Storage, retryable bool, f func(ctx logutil.BgLogger().Error("RunInNewTxn", zap.Error(err)) return err } + setRequestSourceForInnerTxn(ctx, txn) // originalTxnTS is used to trace the original transaction when the function is retryable. if i == 0 { @@ -188,3 +190,24 @@ func BackOff(attempts uint) int { time.Sleep(sleep) return int(sleep) } + +func setRequestSourceForInnerTxn(ctx context.Context, txn Transaction) { + if source := ctx.Value(RequestSourceKey); source != nil { + requestSource := source.(RequestSource) + if !requestSource.RequestSourceInternal { + logutil.Logger(ctx).Warn("`RunInNewTxn` should be used by inner txn only") + } + txn.SetOption(RequestSourceInternal, requestSource.RequestSourceInternal) + txn.SetOption(RequestSourceType, requestSource.RequestSourceType) + } else { + // panic in test mode in case there are requests without source in the future. + // log warnings in production mode. + if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { + panic("unexpected no source type context, if you see this error, " + + "the `RequestSourceTypeKey` is missing in your context") + } else { + logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, " + + "the `RequestSourceTypeKey` is missing in the context") + } + } +} diff --git a/kv/txn_test.go b/kv/txn_test.go index 22fc61a482042..9e01a6fbd92a7 100644 --- a/kv/txn_test.go +++ b/kv/txn_test.go @@ -42,17 +42,18 @@ func TestRetryExceedCountError(t *testing.T) { }(maxRetryCnt) maxRetryCnt = 5 - err := RunInNewTxn(context.Background(), &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { + ctx := WithInternalSourceType(context.Background(), InternalTxnOthers) + err := RunInNewTxn(ctx, &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { return nil }) assert.NotNil(t, err) - err = RunInNewTxn(context.Background(), &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { + err = RunInNewTxn(ctx, &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { return ErrTxnRetryable }) assert.NotNil(t, err) - err = RunInNewTxn(context.Background(), &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { + err = RunInNewTxn(ctx, &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { return errors.New("do not retry") }) assert.NotNil(t, err) @@ -62,7 +63,7 @@ func TestRetryExceedCountError(t *testing.T) { cfg.SetGetError(err1) cfg.SetCommitError(err1) storage := NewInjectedStore(newMockStorage(), &cfg) - err = RunInNewTxn(context.Background(), storage, true, func(ctx context.Context, txn Transaction) error { + err = RunInNewTxn(ctx, storage, true, func(ctx context.Context, txn Transaction) error { return nil }) assert.NotNil(t, err) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 5d8745cf78697..7082ea184b476 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -234,7 +234,8 @@ func (alloc *allocator) End() int64 { func (alloc *allocator) NextGlobalAutoID() (int64, error) { var autoID int64 startTime := time.Now() - err := kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { var err1 error autoID, err1 = alloc.getIDAccessor(txn).Get() if err1 != nil { @@ -271,6 +272,7 @@ func (alloc *allocator) rebase4Unsigned(ctx context.Context, requiredBase uint64 } var newBase, newEnd uint64 startTime := time.Now() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if allocatorStats != nil { txn.SetOption(kv.CollectRuntimeStats, allocatorStats.SnapshotRuntimeStats) @@ -330,6 +332,7 @@ func (alloc *allocator) rebase4Signed(ctx context.Context, requiredBase int64, a } var newBase, newEnd int64 startTime := time.Now() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if allocatorStats != nil { txn.SetOption(kv.CollectRuntimeStats, allocatorStats.SnapshotRuntimeStats) @@ -370,7 +373,8 @@ func (alloc *allocator) rebase4Signed(ctx context.Context, requiredBase int64, a func (alloc *allocator) rebase4Sequence(requiredBase int64) (int64, bool, error) { startTime := time.Now() alreadySatisfied := false - err := kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { acc := meta.NewMeta(txn).GetAutoIDAccessors(alloc.dbID, alloc.tbID) currentEnd, err := acc.SequenceValue().Get() if err != nil { @@ -427,7 +431,8 @@ func (alloc *allocator) ForceRebase(requiredBase int64) error { alloc.mu.Lock() defer alloc.mu.Unlock() startTime := time.Now() - err := kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { idAcc := alloc.getIDAccessor(txn) currentEnd, err1 := idAcc.Get() if err1 != nil { @@ -758,6 +763,7 @@ func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, o }() } + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("alloc.alloc4Signed", opentracing.ChildOf(span.Context())) @@ -847,6 +853,7 @@ func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, }() } + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("alloc.alloc4Unsigned", opentracing.ChildOf(span.Context())) @@ -931,7 +938,8 @@ func (alloc *allocator) alloc4Sequence() (min int64, max int64, round int64, err var newBase, newEnd int64 startTime := time.Now() - err = kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { acc := meta.NewMeta(txn).GetAutoIDAccessors(alloc.dbID, alloc.tbID) var ( err1 error diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index e529764918eff..0b8cd60257cf4 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -47,7 +47,8 @@ func TestSignedAutoid(t *testing.T) { require.NoError(t, err) }() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -69,7 +70,6 @@ func TestSignedAutoid(t *testing.T) { alloc := autoid.NewAllocator(store, 1, 1, false, autoid.RowIDAllocType) require.NotNil(t, alloc) - ctx := context.Background() globalAutoID, err := alloc.NextGlobalAutoID() require.NoError(t, err) require.Equal(t, int64(1), globalAutoID) @@ -252,7 +252,8 @@ func TestUnsignedAutoid(t *testing.T) { require.NoError(t, err) }() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -273,7 +274,6 @@ func TestUnsignedAutoid(t *testing.T) { alloc := autoid.NewAllocator(store, 1, 1, true, autoid.RowIDAllocType) require.NotNil(t, alloc) - ctx := context.Background() globalAutoID, err := alloc.NextGlobalAutoID() require.NoError(t, err) require.Equal(t, int64(1), globalAutoID) @@ -416,7 +416,8 @@ func TestConcurrentAlloc(t *testing.T) { dbID := int64(2) tblID := int64(100) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -501,7 +502,8 @@ func TestRollbackAlloc(t *testing.T) { }() dbID := int64(1) tblID := int64(2) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -511,7 +513,6 @@ func TestRollbackAlloc(t *testing.T) { }) require.NoError(t, err) - ctx := context.Background() injectConf := new(kv.InjectionConfig) injectConf.SetCommitError(errors.New("injected")) injectedStore := kv.NewInjectedStore(store, injectConf) @@ -551,7 +552,8 @@ func TestAllocComputationIssue(t *testing.T) { require.NoError(t, err) }() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -582,7 +584,6 @@ func TestAllocComputationIssue(t *testing.T) { // Simulate the rest cache is not enough for next batch, assuming 10 & 13, batch size = 4. autoid.TestModifyBaseAndEndInjection(signedAlloc1, 4, 6) - ctx := context.Background() // Here will recompute the new allocator batch size base on new base = 10, which will get 6. min, max, err := unsignedAlloc1.Alloc(ctx, 2, 3, 1) require.NoError(t, err) diff --git a/meta/autoid/bench_test.go b/meta/autoid/bench_test.go index e10ce091e5c84..d8b489060875d 100644 --- a/meta/autoid/bench_test.go +++ b/meta/autoid/bench_test.go @@ -41,7 +41,8 @@ func BenchmarkAllocator_Alloc(b *testing.B) { }() dbID := int64(1) tblID := int64(2) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) if err != nil { @@ -56,7 +57,6 @@ func BenchmarkAllocator_Alloc(b *testing.B) { if err != nil { return } - ctx := context.Background() alloc := autoid.NewAllocator(store, 1, 2, false, autoid.RowIDAllocType) b.StartTimer() for i := 0; i < b.N; i++ { @@ -81,7 +81,8 @@ func BenchmarkAllocator_SequenceAlloc(b *testing.B) { }() var seq *model.SequenceInfo var sequenceBase int64 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) if err != nil { diff --git a/meta/autoid/seq_autoid_test.go b/meta/autoid/seq_autoid_test.go index 80568edd1c8ee..761f3955cb65d 100644 --- a/meta/autoid/seq_autoid_test.go +++ b/meta/autoid/seq_autoid_test.go @@ -40,7 +40,8 @@ func TestSequenceAutoid(t *testing.T) { var seq *model.SequenceInfo var sequenceBase int64 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -164,7 +165,8 @@ func TestConcurrentAllocSequence(t *testing.T) { var seq *model.SequenceInfo var sequenceBase int64 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err1 := m.CreateDatabase(&model.DBInfo{ID: 2, Name: model.NewCIStr("a")}) require.NoError(t, err1) diff --git a/meta/meta.go b/meta/meta.go index f8bf8e6eafd19..8a8ec96687404 100644 --- a/meta/meta.go +++ b/meta/meta.go @@ -137,6 +137,8 @@ func NewMeta(txn kv.Transaction, jobListKeys ...JobListKeyType) *Meta { // NewSnapshotMeta creates a Meta with snapshot. func NewSnapshotMeta(snapshot kv.Snapshot) *Meta { + snapshot.SetOption(kv.RequestSourceInternal, true) + snapshot.SetOption(kv.RequestSourceType, kv.InternalTxnMeta) t := structure.NewStructure(snapshot, nil, mMetaPrefix) return &Meta{txn: t} } diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index 16a5407af9f0f..bc4c3d2fd4c8f 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -6921,7 +6921,7 @@ "y": 21 }, "hiddenSeries": false, - "id": 250, + "id": 252, "legend": { "alignAsTable": true, "avg": true, @@ -8859,7 +8859,7 @@ "h": 7, "w": 8, "x": 0, - "y": 15 + "y": 16 }, "id": 219, "legend": { @@ -8952,7 +8952,7 @@ "h": 7, "w": 8, "x": 8, - "y": 15 + "y": 16 }, "id": 220, "legend": { @@ -9042,7 +9042,7 @@ "h": 7, "w": 8, "x": 16, - "y": 15 + "y": 16 }, "id": 164, "legend": { @@ -9129,7 +9129,7 @@ "h": 7, "w": 8, "x": 0, - "y": 22 + "y": 23 }, "id": 250, "legend": { @@ -9221,10 +9221,10 @@ "h": 7, "w": 8, "x": 8, - "y": 22 + "y": 23 }, "hiddenSeries": false, - "id": 251, + "id": 255, "legend": { "alignAsTable": true, "avg": false, @@ -15724,6 +15724,277 @@ ], "title": "TopSQL", "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 257, + "panels": [ + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "kv request count by instance and command source", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 15 + }, + "hiddenSeries": false, + "id": 253, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.11", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "KV Requst Count", + "bars": false, + "color": "#FADE2A", + "lines": true, + "linewidth": 1, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (instance, type, source)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}-{{type}}-{{source}}", + "refId": "A", + "step": 40 + }, + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "KV Requst Count", + "refId": "B", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "KV Request OPS by source", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:62", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:63", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "kv request time by instance and command source", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 15 + }, + "hiddenSeries": false, + "id": 254, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.11", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "KV Requst Time", + "bars": false, + "color": "#FADE2A", + "lines": true, + "linewidth": 1, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_time_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (instance, type, source)", + "format": "time_series", + "interval": "", + "legendFormat": "{{instance}}-{{type}}-{{source}}", + "refId": "A", + "step": 40 + }, + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_time_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", + "format": "time_series", + "interval": "", + "legendFormat": "KV Requst Time", + "refId": "B", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "KV Request Time by source", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:62", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:63", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "SourceSQL", + "type": "row" } ], "refresh": "30s", diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 1059e8eadf1a5..4f574a6010b63 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -2261,7 +2261,8 @@ func (b *PlanBuilder) genV2AnalyzeOptions( func (b *PlanBuilder) getSavedAnalyzeOpts(physicalID int64, tblInfo *model.TableInfo) (map[ast.AnalyzeOptionType]uint64, model.ColumnChoice, []*model.ColumnInfo, error) { analyzeOptions := map[ast.AnalyzeOptionType]uint64{} exec := b.ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select sample_num,sample_rate,buckets,topn,column_choice,column_ids from mysql.analyze_options where table_id = %?", physicalID) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select sample_num,sample_rate,buckets,topn,column_choice,column_ids from mysql.analyze_options where table_id = %?", physicalID) if err != nil { return nil, model.DefaultChoice, nil, err } diff --git a/plugin/integration_test.go b/plugin/integration_test.go index cb470b4c11b6c..84cb8d2076a24 100644 --- a/plugin/integration_test.go +++ b/plugin/integration_test.go @@ -21,6 +21,7 @@ import ( "strings" "testing" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/plugin" "github.com/pingcap/tidb/server" @@ -698,7 +699,8 @@ func TestAuditLogNormal(t *testing.T) { testResults = testResults[:0] errMsg := fmt.Sprintf("statement: %s", test.sql) query := append([]byte{mysql.ComQuery}, []byte(test.sql)...) - err := conn.Dispatch(context.Background(), query) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + err := conn.Dispatch(ctx, query) require.NoError(t, err, errMsg) resultCount := test.resCnt if resultCount == 0 { diff --git a/privilege/privileges/BUILD.bazel b/privilege/privileges/BUILD.bazel index 7ada9e31fbaa6..dc68d8297561d 100644 --- a/privilege/privileges/BUILD.bazel +++ b/privilege/privileges/BUILD.bazel @@ -12,6 +12,7 @@ go_library( deps = [ "//errno", "//infoschema", + "//kv", "//parser/ast", "//parser/auth", "//parser/mysql", diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go index d1e85b3d45091..44e02bf1645ff 100644 --- a/privilege/privileges/cache.go +++ b/privilege/privileges/cache.go @@ -26,6 +26,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" @@ -566,7 +567,7 @@ func (p *MySQLPrivilege) LoadDefaultRoles(ctx sessionctx.Context) error { func (p *MySQLPrivilege) loadTable(sctx sessionctx.Context, sql string, decodeTableRow func(chunk.Row, []*ast.ResultField) error) error { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index c7d465c52006e..9b159309c0795 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -1415,7 +1415,8 @@ func TestMetricsSchema(t *testing.T) { Hostname: "localhost", }, nil, nil) - rs, err := tk.Session().ExecuteInternal(context.Background(), test.stmt) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := tk.Session().ExecuteInternal(ctx, test.stmt) if err == nil { _, err = session.GetRows4Test(context.Background(), tk.Session(), rs) } @@ -1891,33 +1892,34 @@ func TestSecurityEnhancedLocalBackupRestore(t *testing.T) { Hostname: "localhost", }, nil, nil) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Prior to SEM nolocal has permission, the error should be because backup requires tikv - _, err := tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'Local:///tmp/test';") + _, err := tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'Local:///tmp/test';") require.EqualError(t, err, "BACKUP requires tikv store, not unistore") - _, err = tk.Session().ExecuteInternal(context.Background(), "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") require.EqualError(t, err, "RESTORE requires tikv store, not unistore") sem.Enable() defer sem.Disable() // With SEM enabled nolocal does not have permission, but yeslocal does. - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'local:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'local:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'file:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'file:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO '/tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO '/tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'hdfs:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'hdfs:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'hdfs storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "RESTORE DATABASE * FROM 'HDFS:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "RESTORE DATABASE * FROM 'HDFS:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'hdfs storage' is not supported when security enhanced mode is enabled") } diff --git a/server/http_handler.go b/server/http_handler.go index 5db67bcd3be0c..73d3020fbec52 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -1001,7 +1001,7 @@ func getSchemaTablesStorageInfo(h *schemaStorageHandler, schema *model.CIStr, ta } defer s.Close() - ctx := s.(sessionctx.Context) + sctx := s.(sessionctx.Context) condition := make([]string, 0) params := make([]interface{}, 0) @@ -1019,14 +1019,15 @@ func getSchemaTablesStorageInfo(h *schemaStorageHandler, schema *model.CIStr, ta sql += ` WHERE ` + strings.Join(condition, ` AND `) } var results sqlexec.RecordSet - if results, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql, params...); err != nil { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + if results, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, params...); err != nil { logutil.BgLogger().Error(`ExecuteInternal`, zap.Error(err)) } else if results != nil { messages = make([]*schemaTableStorage, 0) defer terror.Call(results.Close) for { req := results.NewChunk(nil) - if err = results.Next(context.TODO(), req); err != nil { + if err = results.Next(ctx, req); err != nil { break } diff --git a/session/advisory_locks.go b/session/advisory_locks.go index aca6914de2029..f51bb061a119c 100644 --- a/session/advisory_locks.go +++ b/session/advisory_locks.go @@ -17,6 +17,7 @@ package session import ( "context" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/terror" ) @@ -67,6 +68,7 @@ func (a *advisoryLock) Close() { // We will never COMMIT the transaction, but the err indicates // if the lock was successfully acquired. func (a *advisoryLock) GetLock(lockName string, timeout int64) error { + a.ctx = kv.WithInternalSourceType(a.ctx, kv.InternalTxnOthers) _, err := a.session.ExecuteInternal(a.ctx, "SET innodb_lock_wait_timeout = %?", timeout) if err != nil { return err diff --git a/session/bootstrap.go b/session/bootstrap.go index d63181829993d..5656b9c60a5d3 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/model" @@ -722,8 +723,9 @@ var ( ) func checkBootstrapped(s Session) (bool, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) // Check if system db exists. - _, err := s.ExecuteInternal(context.Background(), "USE %n", mysql.SystemDB) + _, err := s.ExecuteInternal(ctx, "USE %n", mysql.SystemDB) if err != nil && infoschema.ErrDatabaseNotExists.NotEqual(err) { logutil.BgLogger().Fatal("check bootstrap error", zap.Error(err)) @@ -739,7 +741,7 @@ func checkBootstrapped(s Session) (bool, error) { isBootstrapped := sVal == varTrue if isBootstrapped { // Make sure that doesn't affect the following operations. - if err = s.CommitTxn(context.Background()); err != nil { + if err = s.CommitTxn(ctx); err != nil { return false, errors.Trace(err) } } @@ -749,7 +751,7 @@ func checkBootstrapped(s Session) (bool, error) { // getTiDBVar gets variable value from mysql.tidb table. // Those variables are used by TiDB server. func getTiDBVar(s Session, name string) (sVal string, isNull bool, e error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, `SELECT HIGH_PRIORITY VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME= %?`, mysql.SystemDB, mysql.TiDBTable, @@ -789,7 +791,8 @@ func upgrade(s Session) { } updateBootstrapVer(s) - _, err = s.ExecuteInternal(context.Background(), "COMMIT") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err = s.ExecuteInternal(ctx, "COMMIT") if err != nil { sleepTime := 1 * time.Second @@ -877,8 +880,9 @@ func upgradeToVer8(s Session, ver int64) { if ver >= version8 { return } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) // This is a dummy upgrade, it checks whether upgradeToVer7 success, if not, do it again. - if _, err := s.ExecuteInternal(context.Background(), "SELECT HIGH_PRIORITY `Process_priv` FROM mysql.user LIMIT 0"); err == nil { + if _, err := s.ExecuteInternal(ctx, "SELECT HIGH_PRIORITY `Process_priv` FROM mysql.user LIMIT 0"); err == nil { return } upgradeToVer7(s, ver) @@ -894,7 +898,8 @@ func upgradeToVer9(s Session, ver int64) { } func doReentrantDDL(s Session, sql string, ignorableErrs ...error) { - _, err := s.ExecuteInternal(context.Background(), sql) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, sql) for _, ignorableErr := range ignorableErrs { if terror.ErrorEqual(err, ignorableErr) { return @@ -920,7 +925,8 @@ func upgradeToVer11(s Session, ver int64) { if ver >= version11 { return } - _, err := s.ExecuteInternal(context.Background(), "ALTER TABLE mysql.user ADD COLUMN `References_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Grant_priv`") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, "ALTER TABLE mysql.user ADD COLUMN `References_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Grant_priv`") if err != nil { if terror.ErrorEqual(err, infoschema.ErrColumnExists) { return @@ -934,7 +940,7 @@ func upgradeToVer12(s Session, ver int64) { if ver >= version12 { return } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) _, err := s.ExecuteInternal(ctx, "BEGIN") terror.MustNil(err) sql := "SELECT HIGH_PRIORITY user, host, password FROM mysql.user WHERE password != ''" @@ -988,7 +994,7 @@ func upgradeToVer13(s Session, ver int64) { "ALTER TABLE mysql.user ADD COLUMN `Alter_routine_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Create_routine_priv`", "ALTER TABLE mysql.user ADD COLUMN `Event_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Create_user_priv`", } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) for _, sql := range sqls { _, err := s.ExecuteInternal(ctx, sql) if err != nil { @@ -1017,7 +1023,7 @@ func upgradeToVer14(s Session, ver int64) { "ALTER TABLE mysql.db ADD COLUMN `Event_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Execute_priv`", "ALTER TABLE mysql.db ADD COLUMN `Trigger_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Event_priv`", } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) for _, sql := range sqls { _, err := s.ExecuteInternal(ctx, sql) if err != nil { @@ -1034,7 +1040,8 @@ func upgradeToVer15(s Session, ver int64) { return } var err error - _, err = s.ExecuteInternal(context.Background(), CreateGCDeleteRangeTable) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err = s.ExecuteInternal(ctx, CreateGCDeleteRangeTable) if err != nil { logutil.BgLogger().Fatal("upgradeToVer15 error", zap.Error(err)) } @@ -1239,7 +1246,8 @@ func upgradeToVer38(s Session, ver int64) { return } var err error - _, err = s.ExecuteInternal(context.Background(), CreateGlobalPrivTable) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err = s.ExecuteInternal(ctx, CreateGlobalPrivTable) if err != nil { logutil.BgLogger().Fatal("upgradeToVer38 error", zap.Error(err)) } @@ -1407,7 +1415,7 @@ func upgradeToVer55(s Session, ver int64) { } selectSQL := "select HIGH_PRIORITY * from mysql.global_variables where variable_name in ('" + strings.Join(names, quoteCommaQuote) + "')" - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, selectSQL) terror.MustNil(err) defer terror.Call(rs.Close) @@ -1513,8 +1521,9 @@ func upgradeToVer67(s Session, ver int64) { mustExecute(s, "COMMIT") }() mustExecute(s, h.LockBindInfoSQL()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) var rs sqlexec.RecordSet - rs, err = s.ExecuteInternal(context.Background(), + rs, err = s.ExecuteInternal(ctx, `SELECT bind_sql, default_db, status, create_time, charset, collation, source FROM mysql.bind_info WHERE source != 'builtin' @@ -1733,7 +1742,7 @@ func upgradeToVer80(s Session, ver int64) { } // Check if tidb_analyze_version exists in mysql.GLOBAL_VARIABLES. // If not, insert "tidb_analyze_version | 1" since this is the old behavior before we introduce this variable. - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?;", mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBAnalyzeVersion) terror.MustNil(err) @@ -1756,7 +1765,7 @@ func upgradeToVer81(s Session, ver int64) { } // Check if tidb_enable_index_merge exists in mysql.GLOBAL_VARIABLES. // If not, insert "tidb_enable_index_merge | off". - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?;", mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBEnableIndexMerge) terror.MustNil(err) @@ -2069,7 +2078,8 @@ func doDMLWorks(s Session) { writeStmtSummaryVars(s) - _, err := s.ExecuteInternal(context.Background(), "COMMIT") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, "COMMIT") if err != nil { sleepTime := 1 * time.Second logutil.BgLogger().Info("doDMLWorks failed", zap.Error(err), zap.Duration("sleeping time", sleepTime)) @@ -2087,7 +2097,8 @@ func doDMLWorks(s Session) { } func mustExecute(s Session, sql string, args ...interface{}) { - _, err := s.ExecuteInternal(context.Background(), sql, args...) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, sql, args...) if err != nil { debug.PrintStack() logutil.BgLogger().Fatal("mustExecute error", zap.Error(err)) diff --git a/session/session.go b/session/session.go index 5ae3f2165b146..a00b269d2ee06 100644 --- a/session/session.go +++ b/session/session.go @@ -25,6 +25,7 @@ import ( "encoding/hex" "encoding/json" stderrs "errors" + "flag" "fmt" "math/rand" "runtime/pprof" @@ -1252,6 +1253,9 @@ func drainRecordSet(ctx context.Context, se *session, rs sqlexec.RecordSet, allo // getTableValue executes restricted sql and the result is one column. // It returns a string value. func (s *session) getTableValue(ctx context.Context, tblName string, varName string) (string, error) { + if ctx.Value(kv.RequestSourceKey) == nil { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnSysVar) + } rows, fields, err := s.ExecRestrictedSQL(ctx, nil, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?", mysql.SystemDB, tblName, varName) if err != nil { return "", err @@ -1270,6 +1274,7 @@ func (s *session) getTableValue(ctx context.Context, tblName string, varName str // replaceGlobalVariablesTableValue executes restricted sql updates the variable value // It will then notify the etcd channel that the value has changed. func (s *session) replaceGlobalVariablesTableValue(ctx context.Context, varName, val string) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnSysVar) _, _, err := s.ExecRestrictedSQL(ctx, nil, `REPLACE INTO %n.%n (variable_name, variable_value) VALUES (%?, %?)`, mysql.SystemDB, mysql.GlobalVariablesTable, varName, val) if err != nil { return err @@ -1357,7 +1362,8 @@ func (s *session) SetGlobalSysVarOnly(name, value string) (err error) { // SetTiDBTableValue implements GlobalVarAccessor.SetTiDBTableValue interface. func (s *session) SetTiDBTableValue(name, value, comment string) error { - _, _, err := s.ExecRestrictedSQL(context.TODO(), nil, `REPLACE INTO mysql.tidb (variable_name, variable_value, comment) VALUES (%?, %?, %?)`, name, value, comment) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnSysVar) + _, _, err := s.ExecRestrictedSQL(ctx, nil, `REPLACE INTO mysql.tidb (variable_name, variable_value, comment) VALUES (%?, %?, %?)`, name, value, comment) return err } @@ -1730,6 +1736,7 @@ func (s *session) ExecRestrictedStmt(ctx context.Context, stmtNode ast.StmtNode, func ExecRestrictedStmt4Test(ctx context.Context, s Session, stmtNode ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ( []chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) return s.(*session).ExecRestrictedStmt(ctx, stmtNode, opts...) } @@ -1933,6 +1940,9 @@ func (s *session) ExecuteStmt(ctx context.Context, stmtNode ast.StmtNode) (sqlex } }) + stmtLabel := executor.GetStmtLabel(stmtNode) + s.setRequestSource(ctx, stmtLabel, stmtNode) + // Transform abstract syntax tree to a physical plan(stored in executor.ExecStmt). compiler := executor.Compiler{Ctx: s} stmt, err := compiler.Compile(ctx, stmtNode) @@ -2428,6 +2438,9 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ if err = s.onTxnManagerStmtStartOrRetry(ctx, execStmt); err != nil { return nil, err } + s.setRequestSource(ctx, preparedStmt.PreparedAst.StmtType, preparedStmt.PreparedAst.Stmt) + // even the txn is valid, still need to set session variable for coprocessor usage. + s.sessionVars.RequestSourceType = preparedStmt.PreparedAst.StmtType if ok { rs, ok, err := s.cachedPointPlanExec(ctx, txnManager.GetTxnInfoSchema(), execStmt, preparedStmt, replicaReadScope, args) @@ -2498,6 +2511,12 @@ func (s *session) NewTxn(ctx context.Context) error { }, } s.txn.SetOption(kv.SnapInterceptor, s.getSnapshotInterceptor()) + if s.GetSessionVars().InRestrictedSQL { + s.txn.SetOption(kv.RequestSourceInternal, true) + if source := ctx.Value(kv.RequestSourceKey); source != nil { + s.txn.SetOption(kv.RequestSourceType, source.(kv.RequestSource).RequestSourceType) + } + } return nil } @@ -2761,8 +2780,8 @@ func CreateSessionWithOpt(store kv.Storage, opt *Opt) (Session, error) { } // loadCollationParameter loads collation parameter from mysql.tidb -func loadCollationParameter(se *session) (bool, error) { - para, err := se.getTableValue(context.TODO(), mysql.TiDBTable, tidbNewCollationEnabled) +func loadCollationParameter(ctx context.Context, se *session) (bool, error) { + para, err := se.getTableValue(ctx, mysql.TiDBTable, tidbNewCollationEnabled) if err != nil { return false, err } @@ -2781,6 +2800,7 @@ var errResultIsEmpty = dbterror.ClassExecutor.NewStd(errno.ErrResultIsEmpty) // BootstrapSession runs the first time when the TiDB server start. func BootstrapSession(store kv.Storage) (*domain.Domain, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) cfg := config.GetGlobalConfig() if len(cfg.Instance.PluginLoad) > 0 { err := plugin.Load(context.Background(), plugin.Config{ @@ -2806,14 +2826,14 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { ses[0].GetSessionVars().InRestrictedSQL = true // get system tz from mysql.tidb - tz, err := ses[0].getTableValue(context.TODO(), mysql.TiDBTable, tidbSystemTZ) + tz, err := ses[0].getTableValue(ctx, mysql.TiDBTable, tidbSystemTZ) if err != nil { return nil, err } timeutil.SetSystemTZ(tz) // get the flag from `mysql`.`tidb` which indicating if new collations are enabled. - newCollationEnabled, err := loadCollationParameter(ses[0]) + newCollationEnabled, err := loadCollationParameter(ctx, ses[0]) if err != nil { return nil, err } @@ -2854,7 +2874,7 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { if err != nil { return nil, err } - err = executor.LoadOptRuleBlacklist(ses[5]) + err = executor.LoadOptRuleBlacklist(ctx, ses[5]) if err != nil { return nil, err } @@ -3012,7 +3032,8 @@ func getStoreBootstrapVersion(store kv.Storage) int64 { var ver int64 // check in kv store - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ver, err = t.GetBootstrapVersion() @@ -3034,7 +3055,8 @@ func getStoreBootstrapVersion(store kv.Storage) int64 { func finishBootstrap(store kv.Storage) { setStoreBootstrapped(store.UUID()) - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) err := t.FinishBootstrap(currentBootstrapVersion) return err @@ -3490,3 +3512,29 @@ func (s *session) DecodeSessionStates(ctx context.Context, sctx sessionctx.Conte // so we decode stmt ctx at last. return s.sessionVars.DecodeSessionStates(ctx, sessionStates) } + +func (s *session) setRequestSource(ctx context.Context, stmtLabel string, stmtNode ast.StmtNode) { + if !s.isInternal() { + if txn, _ := s.Txn(false); txn != nil && txn.Valid() { + txn.SetOption(kv.RequestSourceType, stmtLabel) + } else { + s.sessionVars.RequestSourceType = stmtLabel + } + } else { + if source := ctx.Value(kv.RequestSourceKey); source != nil { + s.sessionVars.RequestSourceType = source.(kv.RequestSource).RequestSourceType + } else { + // panic in test mode in case there are requests without source in the future. + // log warnings in production mode. + if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { + panic("unexpected no source type context, if you see this error, " + + "the `RequestSourceTypeKey` is missing in your context") + } else { + logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, "+ + "the `RequestSourceTypeKey` is missing in the context", + zap.Bool("internal", s.isInternal()), + zap.String("sql", stmtNode.Text())) + } + } + } +} diff --git a/session/session_test/session_test.go b/session/session_test/session_test.go index 2a0f95fcbc473..a6227be338959 100644 --- a/session/session_test/session_test.go +++ b/session/session_test/session_test.go @@ -246,16 +246,15 @@ func TestDisableTxnAutoRetry(t *testing.T) { tk1.MustExec("update no_retry set id = 5") // RestrictedSQL should retry. - tk1.Session().GetSessionVars().InRestrictedSQL = true - tk1.MustExec("begin") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + tk1.Session().ExecuteInternal(ctx, "begin") tk2.MustExec("update no_retry set id = 6") - tk1.MustExec("update no_retry set id = 7") - tk1.MustExec("commit") + tk1.Session().ExecuteInternal(ctx, "update no_retry set id = 7") + tk1.Session().ExecuteInternal(ctx, "commit") // test for disable transaction local latch - tk1.Session().GetSessionVars().InRestrictedSQL = false defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.TxnLocalLatches.Enabled = false diff --git a/session/tidb_test.go b/session/tidb_test.go index 70831a8f64d89..2023921445026 100644 --- a/session/tidb_test.go +++ b/session/tidb_test.go @@ -47,7 +47,8 @@ func TestSysSessionPoolGoroutineLeak(t *testing.T) { for i := 0; i < count; i++ { s := stmts[i] wg.Run(func() { - _, _, err := se.ExecRestrictedStmt(context.Background(), s) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + _, _, err := se.ExecRestrictedStmt(ctx, s) require.NoError(t, err) }) } diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 8302f27ea953f..4ed0b20abedea 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -1158,6 +1158,9 @@ type SessionVars struct { // MaxAllowedPacket indicates the maximum size of a packet for the MySQL protocol. MaxAllowedPacket uint64 + + // RequestSourceType is the type of inner request. + RequestSourceType string } // InitStatementContext initializes a StatementContext, the object is reused to reduce allocation. diff --git a/sessiontxn/isolation/base.go b/sessiontxn/isolation/base.go index 4bcd657974053..53386fa32c47c 100644 --- a/sessiontxn/isolation/base.go +++ b/sessiontxn/isolation/base.go @@ -215,6 +215,14 @@ func (p *baseTxnContextProvider) ActivateTxn() (kv.Transaction, error) { p.onTxnActive(txn, p.enterNewTxnType) } + if p.sctx.GetSessionVars().InRestrictedSQL { + txn.SetOption(kv.RequestSourceInternal, true) + } + + if tp := p.sctx.GetSessionVars().RequestSourceType; tp != "" { + txn.SetOption(kv.RequestSourceType, tp) + } + p.txn = txn return txn, nil } diff --git a/sessiontxn/txn.go b/sessiontxn/txn.go index 6955f9c5ee7fb..b986d1048ca5f 100644 --- a/sessiontxn/txn.go +++ b/sessiontxn/txn.go @@ -76,6 +76,12 @@ func CanReuseTxnWhenExplicitBegin(sctx sessionctx.Context) bool { func GetSnapshotWithTS(s sessionctx.Context, ts uint64) kv.Snapshot { snap := s.GetStore().GetSnapshot(kv.Version{Ver: ts}) snap.SetOption(kv.SnapInterceptor, temptable.SessionSnapshotInterceptor(s)) + if s.GetSessionVars().InRestrictedSQL { + snap.SetOption(kv.RequestSourceInternal, true) + } + if tp := s.GetSessionVars().RequestSourceType; tp != "" { + snap.SetOption(kv.RequestSourceType, tp) + } return snap } diff --git a/statistics/handle/bootstrap.go b/statistics/handle/bootstrap.go index a2e5714380643..d865bb2343703 100644 --- a/statistics/handle/bootstrap.go +++ b/statistics/handle/bootstrap.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -60,8 +61,9 @@ func (h *Handle) initStatsMeta4Chunk(is infoschema.InfoSchema, cache *statsCache } func (h *Handle) initStatsMeta(is infoschema.InfoSchema) (statsCache, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY version, table_id, modify_count, count from mysql.stats_meta" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return statsCache{}, errors.Trace(err) } @@ -70,7 +72,7 @@ func (h *Handle) initStatsMeta(is infoschema.InfoSchema) (statsCache, error) { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return statsCache{}, errors.Trace(err) } @@ -159,8 +161,9 @@ func (h *Handle) initStatsHistograms4Chunk(is infoschema.InfoSchema, cache *stat } func (h *Handle) initStatsHistograms(is infoschema.InfoSchema, cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, is_index, hist_id, distinct_count, version, null_count, cm_sketch, tot_col_size, stats_ver, correlation, flag, last_analyze_pos from mysql.stats_histograms" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -168,7 +171,7 @@ func (h *Handle) initStatsHistograms(is infoschema.InfoSchema, cache *statsCache req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -205,8 +208,9 @@ func (h *Handle) initStatsTopN4Chunk(cache *statsCache, iter *chunk.Iterator4Chu } func (h *Handle) initStatsTopN(cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, hist_id, value, count from mysql.stats_top_n where is_index = 1" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -214,7 +218,7 @@ func (h *Handle) initStatsTopN(cache *statsCache) error { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -253,8 +257,9 @@ func (h *Handle) initStatsFMSketch4Chunk(cache *statsCache, iter *chunk.Iterator } func (h *Handle) initStatsFMSketch(cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, is_index, hist_id, value from mysql.stats_fm_sketch" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -262,7 +267,7 @@ func (h *Handle) initStatsFMSketch(cache *statsCache) error { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -324,8 +329,9 @@ func (h *Handle) initTopNCountSum(tableID, colID int64) (int64, error) { // Before stats ver 2, histogram represents all data in this column. // In stats ver 2, histogram + TopN represent all data in this column. // So we need to add TopN total count here. + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) selSQL := "select sum(count) from mysql.stats_top_n where table_id = %? and is_index = 0 and hist_id = %?" - rs, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), selSQL, tableID, colID) + rs, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, selSQL, tableID, colID) if rs != nil { defer terror.Call(rs.Close) } @@ -334,7 +340,7 @@ func (h *Handle) initTopNCountSum(tableID, colID int64) (int64, error) { } req := rs.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) - err = rs.Next(context.TODO(), req) + err = rs.Next(ctx, req) if err != nil { return 0, err } @@ -345,8 +351,9 @@ func (h *Handle) initTopNCountSum(tableID, colID int64) (int64, error) { } func (h *Handle) initStatsBuckets(cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, is_index, hist_id, count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets order by table_id, is_index, hist_id, bucket_id" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -354,7 +361,7 @@ func (h *Handle) initStatsBuckets(cache *statsCache) error { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -386,15 +393,16 @@ func (h *Handle) initStatsBuckets(cache *statsCache) error { // InitStats will init the stats cache using full load strategy. func (h *Handle) InitStats(is infoschema.InfoSchema) (err error) { loadFMSketch := config.GetGlobalConfig().Performance.EnableLoadFMSketch + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) h.mu.Lock() defer func() { - _, err1 := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "commit") + _, err1 := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") if err == nil && err1 != nil { err = err1 } h.mu.Unlock() }() - _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin") + _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") if err != nil { return err } diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go index 0c9d5cbd5e723..7e628a34c674a 100644 --- a/statistics/handle/ddl.go +++ b/statistics/handle/ddl.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -191,7 +192,7 @@ func (h *Handle) insertTableStats2KV(info *model.TableInfo, physicalID int64) (e }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { @@ -234,7 +235,7 @@ func (h *Handle) insertColStats2KV(physicalID int64, colInfos []*model.ColumnInf h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { diff --git a/statistics/handle/gc.go b/statistics/handle/gc.go index a3ac7b9191f4c..1babb4321eb9e 100644 --- a/statistics/handle/gc.go +++ b/statistics/handle/gc.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/sqlexec" @@ -142,7 +143,7 @@ func (h *Handle) deleteHistStatsFromKV(physicalID int64, histID int64, isIndex i h.mu.Lock() defer h.mu.Unlock() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { @@ -191,18 +192,18 @@ func (h *Handle) DeleteTableStatsFromKV(statsIDs []int64) (err error) { h.mu.Lock() defer h.mu.Unlock() exec := h.mu.ctx.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.Background(), "begin") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { return errors.Trace(err) } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) if err != nil { return errors.Trace(err) } - ctx := context.Background() startTS := txn.StartTS() for _, statsID := range statsIDs { // We only update the version so that other tidb will know that this table is deleted. @@ -241,7 +242,7 @@ func (h *Handle) removeDeletedExtendedStats(version uint64) (err error) { h.mu.Lock() defer h.mu.Unlock() exec := h.mu.ctx.(sqlexec.SQLExecutor) - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { return errors.Trace(err) diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index 6bd49b1f12302..aa4e970f1c180 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" @@ -127,12 +128,14 @@ func (h *Handle) withRestrictedSQLExecutor(ctx context.Context, fn func(context. } func (h *Handle) execRestrictedSQL(ctx context.Context, sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) { return exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, params...) }) } func (h *Handle) execRestrictedSQLWithStatsVer(ctx context.Context, statsVer int, procTrackID uint64, sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) { optFuncs := []sqlexec.OptionFuncAlias{ execOptionForAnalyze[statsVer], @@ -145,6 +148,7 @@ func (h *Handle) execRestrictedSQLWithStatsVer(ctx context.Context, statsVer int } func (h *Handle) execRestrictedSQLWithSnapshot(ctx context.Context, sql string, snapshot uint64, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) { optFuncs := []sqlexec.OptionFuncAlias{ sqlexec.ExecOptionWithSnapshot(snapshot), @@ -306,7 +310,7 @@ func (h *Handle) Update(is infoschema.InfoSchema, opts ...TableStatsOpt) error { } else { lastVersion = 0 } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) rows, _, err := h.execRestrictedSQL(ctx, "SELECT version, table_id, modify_count, count from mysql.stats_meta where version > %? order by version", lastVersion) if err != nil { return errors.Trace(err) @@ -1044,14 +1048,14 @@ func (h *Handle) SaveTableStatsToStorage(results *statistics.AnalyzeResults, nee }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { return err } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) if err != nil { @@ -1254,14 +1258,14 @@ func (h *Handle) SaveStatsToStorage(tableID int64, count int64, isIndex int, hg }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { return errors.Trace(err) } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) if err != nil { @@ -1355,7 +1359,7 @@ func (h *Handle) SaveMetaToStorage(tableID, count, modifyCount int64) (err error }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { @@ -1450,7 +1454,7 @@ func (h *Handle) columnCountFromStorage(reader *statsReader, tableID, colID, sta } func (h *Handle) statsMetaByTableIDFromStorage(tableID int64, snapshot uint64) (version uint64, modifyCount, count int64, err error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) var rows []chunk.Row if snapshot == 0 { rows, _, err = h.execRestrictedSQL(ctx, "SELECT version, modify_count, count from mysql.stats_meta where table_id = %? order by version", tableID) @@ -1477,7 +1481,7 @@ type statsReader struct { } func (sr *statsReader) read(sql string, args ...interface{}) (rows []chunk.Row, fields []*ast.ResultField, err error) { - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) if sr.snapshot > 0 { return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool, sqlexec.ExecOptionWithSnapshot(sr.snapshot)}, sql, args...) } @@ -1506,33 +1510,35 @@ func (h *Handle) releaseGlobalStatsReader(reader *statsReader) error { return h.releaseStatsReader(reader, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) } -func (h *Handle) getStatsReader(snapshot uint64, ctx sqlexec.RestrictedSQLExecutor) (reader *statsReader, err error) { +func (h *Handle) getStatsReader(snapshot uint64, exec sqlexec.RestrictedSQLExecutor) (reader *statsReader, err error) { failpoint.Inject("mockGetStatsReaderFail", func(val failpoint.Value) { if val.(bool) { failpoint.Return(nil, errors.New("gofail genStatsReader error")) } }) if snapshot > 0 { - return &statsReader{ctx: ctx, snapshot: snapshot}, nil + return &statsReader{ctx: exec, snapshot: snapshot}, nil } defer func() { if r := recover(); r != nil { err = fmt.Errorf("getStatsReader panic %v", r) } }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) failpoint.Inject("mockGetStatsReaderPanic", nil) - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin") + _, err = exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") if err != nil { return nil, err } - return &statsReader{ctx: ctx}, nil + return &statsReader{ctx: exec}, nil } -func (h *Handle) releaseStatsReader(reader *statsReader, ctx sqlexec.RestrictedSQLExecutor) error { +func (h *Handle) releaseStatsReader(reader *statsReader, exec sqlexec.RestrictedSQLExecutor) error { if reader.snapshot > 0 { return nil } - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "commit") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, err := exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") return err } @@ -1561,7 +1567,7 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t strColIDs := string(bytes) h.mu.Lock() defer h.mu.Unlock() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { @@ -1624,7 +1630,7 @@ func (h *Handle) MarkExtendedStatsDeleted(statsName string, tableID int64, ifExi err = h.recordHistoricalStatsMeta(tableID, statsVer) } }() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) rows, _, err := h.execRestrictedSQL(ctx, "SELECT name FROM mysql.stats_extended WHERE name = %? and table_id = %? and status in (%?, %?)", statsName, tableID, StatsStatusInited, StatsStatusAnalyzed) if err != nil { return errors.Trace(err) @@ -1721,7 +1727,7 @@ func (h *Handle) ReloadExtendedStatistics() error { // BuildExtendedStats build extended stats for column groups if needed based on the column samples. func (h *Handle) BuildExtendedStats(tableID int64, cols []*model.ColumnInfo, collectors []*statistics.SampleCollector) (*statistics.ExtendedStatsColl, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)" rows, _, err := h.execRestrictedSQL(ctx, sql, tableID, StatsStatusAnalyzed, StatsStatusInited) if err != nil { @@ -1842,7 +1848,7 @@ func (h *Handle) SaveExtendedStatsToStorage(tableID int64, extStats *statistics. } h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { @@ -1918,7 +1924,8 @@ type colStatsTimeInfo struct { // getDisableColumnTrackingTime reads the value of tidb_disable_column_tracking_time from mysql.tidb if it exists. func (h *Handle) getDisableColumnTrackingTime() (*time.Time, error) { - rows, fields, err := h.execRestrictedSQL(context.Background(), "SELECT variable_value FROM %n.%n WHERE variable_name = %?", mysql.SystemDB, mysql.TiDBTable, variable.TiDBDisableColumnTrackingTime) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, fields, err := h.execRestrictedSQL(ctx, "SELECT variable_value FROM %n.%n WHERE variable_name = %?", mysql.SystemDB, mysql.TiDBTable, variable.TiDBDisableColumnTrackingTime) if err != nil { return nil, err } @@ -1944,8 +1951,9 @@ func (h *Handle) LoadColumnStatsUsage(loc *time.Location) (map[model.TableColumn if err != nil { return nil, errors.Trace(err) } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) // Since we use another session from session pool to read mysql.column_stats_usage, which may have different @@time_zone, so we do time zone conversion here. - rows, _, err := h.execRestrictedSQL(context.Background(), "SELECT table_id, column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00'), CONVERT_TZ(last_analyzed_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage") + rows, _, err := h.execRestrictedSQL(ctx, "SELECT table_id, column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00'), CONVERT_TZ(last_analyzed_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage") if err != nil { return nil, errors.Trace(err) } @@ -1983,7 +1991,7 @@ func (h *Handle) LoadColumnStatsUsage(loc *time.Location) (map[model.TableColumn // CollectColumnsInExtendedStats returns IDs of the columns involved in extended stats. func (h *Handle) CollectColumnsInExtendedStats(tableID int64) ([]int64, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)" rows, _, err := h.execRestrictedSQL(ctx, sql, tableID, StatsStatusAnalyzed, StatsStatusInited) if err != nil { @@ -2012,7 +2020,8 @@ func (h *Handle) GetPredicateColumns(tableID int64) ([]int64, error) { if err != nil { return nil, errors.Trace(err) } - rows, _, err := h.execRestrictedSQL(context.Background(), "SELECT column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage WHERE table_id = %? AND last_used_at IS NOT NULL", tableID) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := h.execRestrictedSQL(ctx, "SELECT column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage WHERE table_id = %? AND last_used_at IS NOT NULL", tableID) if err != nil { return nil, errors.Trace(err) } @@ -2040,7 +2049,7 @@ const maxColumnSize = 6 << 20 // RecordHistoricalStatsToStorage records the given table's stats data to mysql.stats_history func (h *Handle) RecordHistoricalStatsToStorage(dbName string, tableInfo *model.TableInfo) (uint64, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) js, err := h.DumpStatsToJSON(dbName, tableInfo, nil) if err != nil { return 0, errors.Trace(err) @@ -2100,7 +2109,7 @@ func (h *Handle) recordHistoricalStatsMeta(tableID int64, version uint64) error return nil } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) h.mu.Lock() defer h.mu.Unlock() rows, _, err := h.execRestrictedSQL(ctx, "select modify_count, count from mysql.stats_meta where table_id = %? and version = %?", tableID, version) @@ -2133,7 +2142,7 @@ func (h *Handle) InsertAnalyzeJob(job *statistics.AnalyzeJob, instance string, p h.mu.Lock() defer h.mu.Unlock() exec := h.mu.ctx.(sqlexec.RestrictedSQLExecutor) - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) jobInfo := job.JobInfo const textMaxLength = 65535 if len(jobInfo) > textMaxLength { @@ -2156,7 +2165,8 @@ func (h *Handle) InsertAnalyzeJob(job *statistics.AnalyzeJob, instance string, p // DeleteAnalyzeJobs deletes the analyze jobs whose update time is earlier than updateTime. func (h *Handle) DeleteAnalyzeJobs(updateTime time.Time) error { - _, _, err := h.execRestrictedSQL(context.TODO(), "DELETE FROM mysql.analyze_jobs WHERE update_time < CONVERT_TZ(%?, '+00:00', @@TIME_ZONE)", updateTime.UTC().Format(types.TimeFormat)) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, _, err := h.execRestrictedSQL(ctx, "DELETE FROM mysql.analyze_jobs WHERE update_time < CONVERT_TZ(%?, '+00:00', @@TIME_ZONE)", updateTime.UTC().Format(types.TimeFormat)) return err } diff --git a/statistics/handle/update.go b/statistics/handle/update.go index 29976a0e24164..7a39fc77bed74 100644 --- a/statistics/handle/update.go +++ b/statistics/handle/update.go @@ -345,7 +345,7 @@ const batchInsertSize = 10 // DumpIndexUsageToKV will dump in-memory index usage information to KV. func (h *Handle) DumpIndexUsageToKV() error { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) mapper := h.sweepIdxUsageList() type FullIndexUsageInformation struct { id GlobalIndexID @@ -519,14 +519,14 @@ func (h *Handle) dumpTableStatCountToKV(id int64, delta variable.TableDelta) (up } h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { return false, errors.Trace(err) } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) @@ -579,9 +579,10 @@ func (h *Handle) dumpTableStatColSizeToKV(id int64, delta variable.TableDelta) e if len(values) == 0 { return nil } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := fmt.Sprintf("insert into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, tot_col_size) "+ "values %s on duplicate key update tot_col_size = tot_col_size + values(tot_col_size)", strings.Join(values, ",")) - _, _, err := h.execRestrictedSQL(context.Background(), sql) + _, _, err := h.execRestrictedSQL(ctx, sql) return errors.Trace(err) } @@ -631,9 +632,10 @@ func (h *Handle) DumpFeedbackToKV(fb *statistics.QueryFeedback) error { if fb.Tp == statistics.IndexType { isIndex = 1 } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "insert into mysql.stats_feedback (table_id, hist_id, is_index, feedback) values (%?, %?, %?, %?)" h.mu.Lock() - _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql, fb.PhysicalID, fb.Hist.ID, isIndex, vals) + _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, fb.PhysicalID, fb.Hist.ID, isIndex, vals) h.mu.Unlock() if err != nil { metrics.DumpFeedbackCounter.WithLabelValues(metrics.LblError).Inc() @@ -751,7 +753,7 @@ func (h *Handle) UpdateErrorRate(is infoschema.InfoSchema) { // HandleUpdateStats update the stats using feedback. func (h *Handle) HandleUpdateStats(is infoschema.InfoSchema) error { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) tables, _, err := h.execRestrictedSQL(ctx, "SELECT distinct table_id from mysql.stats_feedback") if err != nil { return errors.Trace(err) @@ -866,9 +868,10 @@ func (h *Handle) deleteOutdatedFeedback(tableID, histID, isIndex int64) error { h.mu.Lock() defer h.mu.Unlock() hasData := true + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) for hasData { sql := "delete from mysql.stats_feedback where table_id = %? and hist_id = %? and is_index = %? limit 10000" - _, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql, tableID, histID, isIndex) + _, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, tableID, histID, isIndex) if err != nil { return errors.Trace(err) } @@ -995,7 +998,7 @@ func NeedAnalyzeTable(tbl *statistics.Table, limit time.Duration, autoAnalyzeRat } func (h *Handle) getAutoAnalyzeParameters() map[string]string { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select variable_name, variable_value from mysql.global_variables where variable_name in (%?, %?, %?)" rows, _, err := h.execRestrictedSQL(ctx, sql, variable.TiDBAutoAnalyzeRatio, variable.TiDBAutoAnalyzeStartTime, variable.TiDBAutoAnalyzeEndTime) if err != nil { diff --git a/store/copr/coprocessor.go b/store/copr/coprocessor.go index f869711a50a05..29cc437b182e6 100644 --- a/store/copr/coprocessor.go +++ b/store/copr/coprocessor.go @@ -93,6 +93,7 @@ func (c *CopClient) Send(ctx context.Context, req *kv.Request, variables interfa req.Paging = false } ctx = context.WithValue(ctx, tikv.TxnStartKey(), req.StartTs) + ctx = context.WithValue(ctx, util.RequestSourceKey, req.RequestSource) bo := backoff.NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars) ranges := NewKeyRanges(req.KeyRanges) tasks, err := buildCopTasks(bo, c.store.GetRegionCache(), ranges, req, eventCb) @@ -165,6 +166,7 @@ type copTask struct { pagingSize uint64 partitionIndex int64 // used by balanceBatchCopTask in PartitionTableScan + requestSource util.RequestSource } func (r *copTask) String() string { @@ -212,15 +214,16 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, req *kv pagingSize = paging.MinPagingSize } tasks = append(tasks, &copTask{ - region: loc.Location.Region, - bucketsVer: loc.getBucketVersion(), - ranges: loc.Ranges.Slice(i, nextI), - respChan: make(chan *copResponse, chanSize), - cmdType: cmdType, - storeType: req.StoreType, - eventCb: eventCb, - paging: req.Paging, - pagingSize: pagingSize, + region: loc.Location.Region, + bucketsVer: loc.getBucketVersion(), + ranges: loc.Ranges.Slice(i, nextI), + respChan: make(chan *copResponse, chanSize), + cmdType: cmdType, + storeType: req.StoreType, + eventCb: eventCb, + paging: req.Paging, + pagingSize: pagingSize, + requestSource: req.RequestSource, }) i = nextI } @@ -741,6 +744,7 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch RecordTimeStat: true, RecordScanStat: true, TaskId: worker.req.TaskID, + RequestSource: task.requestSource.GetRequestSource(), }) if worker.req.ResourceGroupTagger != nil { worker.req.ResourceGroupTagger(req) @@ -895,7 +899,9 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R // We may meet RegionError at the first packet, but not during visiting the stream. return buildCopTasks(bo, worker.store.GetRegionCache(), task.ranges, worker.req, task.eventCb) } + var resolveLockDetail *util.ResolveLockDetail if lockErr := resp.pbResp.GetLocked(); lockErr != nil { + resolveLockDetail = worker.getLockResolverDetails() // Be care that we didn't redact the SQL statement because the log is DEBUG level. if task.eventCb != nil { task.eventCb(trxevents.WrapCopMeetLock(&trxevents.CopMeetLock{ @@ -905,11 +911,17 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R logutil.Logger(bo.GetCtx()).Debug("coprocessor encounters lock", zap.Stringer("lock", lockErr)) } - msBeforeExpired, err1 := worker.kvclient.ResolveLocks(bo.TiKVBackoffer(), worker.req.StartTs, []*txnlock.Lock{txnlock.NewLock(lockErr)}) + resolveLocksOpts := txnlock.ResolveLocksOptions{ + CallerStartTS: worker.req.StartTs, + Locks: []*txnlock.Lock{txnlock.NewLock(lockErr)}, + Detail: resolveLockDetail, + } + resolveLocksRes, err1 := worker.kvclient.ResolveLocksWithOpts(bo.TiKVBackoffer(), resolveLocksOpts) err1 = derr.ToTiDBErr(err1) if err1 != nil { return nil, errors.Trace(err1) } + msBeforeExpired := resolveLocksRes.TTL if msBeforeExpired > 0 { if err := bo.BackoffWithMaxSleepTxnLockFast(int(msBeforeExpired), errors.New(lockErr.String())); err != nil { return nil, errors.Trace(err) @@ -944,7 +956,7 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R } else if task.ranges != nil && task.ranges.Len() > 0 { resp.startKey = task.ranges.At(0).StartKey } - worker.handleCollectExecutionInfo(bo, rpcCtx, resp) + worker.handleCollectExecutionInfo(bo, rpcCtx, resp, resolveLockDetail) resp.respTime = costTime if resp.pbResp.IsCacheHit { if cacheValue == nil { @@ -1004,7 +1016,14 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R return nil, nil } -func (worker *copIteratorWorker) handleCollectExecutionInfo(bo *Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse) { +func (worker *copIteratorWorker) getLockResolverDetails() *util.ResolveLockDetail { + if !worker.enableCollectExecutionInfo { + return nil + } + return &util.ResolveLockDetail{} +} + +func (worker *copIteratorWorker) handleCollectExecutionInfo(bo *Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse, resolveLockDetail *util.ResolveLockDetail) { defer func() { worker.kvclient.Stats = nil }() @@ -1032,6 +1051,9 @@ func (worker *copIteratorWorker) handleCollectExecutionInfo(bo *Backoffer, rpcCt resp.detail.CalleeAddress = rpcCtx.Addr } sd := &util.ScanDetail{} + if resolveLockDetail != nil { + sd.ResolveLock = resolveLockDetail + } td := util.TimeDetail{} if pbDetails := resp.pbResp.ExecDetailsV2; pbDetails != nil { // Take values in `ExecDetailsV2` first. diff --git a/store/driver/txn/snapshot.go b/store/driver/txn/snapshot.go index 7ecfae6a57e81..9b39e635b1d71 100644 --- a/store/driver/txn/snapshot.go +++ b/store/driver/txn/snapshot.go @@ -123,6 +123,10 @@ func (s *tikvSnapshot) SetOption(opt int, val interface{}) { s.interceptor = val.(kv.SnapshotInterceptor) case kv.RPCInterceptor: s.KVSnapshot.SetRPCInterceptor(val.(interceptor.RPCInterceptor)) + case kv.RequestSourceInternal: + s.KVSnapshot.SetRequestSourceInternal(val.(bool)) + case kv.RequestSourceType: + s.KVSnapshot.SetRequestSourceType(val.(string)) } } diff --git a/store/driver/txn/txn_driver.go b/store/driver/txn/txn_driver.go index 4cdd45a6f1f92..29baf3c48c06f 100644 --- a/store/driver/txn/txn_driver.go +++ b/store/driver/txn/txn_driver.go @@ -251,6 +251,10 @@ func (txn *tikvTxn) SetOption(opt int, val interface{}) { txn.KVTxn.SetAssertionLevel(val.(kvrpcpb.AssertionLevel)) case kv.TableToColumnMaps: txn.columnMapsCache = val + case kv.RequestSourceInternal: + txn.KVTxn.SetRequestSourceInternal(val.(bool)) + case kv.RequestSourceType: + txn.KVTxn.SetRequestSourceType(val.(string)) } } @@ -262,6 +266,8 @@ func (txn *tikvTxn) GetOption(opt int) interface{} { return txn.KVTxn.GetScope() case kv.TableToColumnMaps: return txn.columnMapsCache + case kv.RequestSourceType: + return txn.RequestSourceType default: return nil } diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index da73f28053618..096ef5923d71f 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -111,6 +111,7 @@ func NewGCWorker(store kv.Storage, pdClient pd.Client) (*GCWorker, error) { func (w *GCWorker) Start() { var ctx context.Context ctx, w.cancel = context.WithCancel(context.Background()) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnGC) var wg sync.WaitGroup wg.Add(1) go w.start(ctx, &wg) @@ -276,7 +277,7 @@ func (w *GCWorker) Stats(vars *variable.SessionVars) (map[string]interface{}, er } func (w *GCWorker) tick(ctx context.Context) { - isLeader, err := w.checkLeader() + isLeader, err := w.checkLeader(ctx) if err != nil { logutil.Logger(ctx).Warn("[gc worker] check leader", zap.Error(err)) metrics.GCJobFailureCounter.WithLabelValues("check_leader").Inc() @@ -302,7 +303,7 @@ func (w *GCWorker) leaderTick(ctx context.Context) error { return nil } - ok, safePoint, err := w.prepare() + ok, safePoint, err := w.prepare(ctx) if err != nil || !ok { if err != nil { metrics.GCJobFailureCounter.WithLabelValues("prepare").Inc() @@ -338,14 +339,13 @@ func (w *GCWorker) leaderTick(ctx context.Context) error { // prepare checks preconditions for starting a GC job. It returns a bool // that indicates whether the GC job should start and the new safePoint. -func (w *GCWorker) prepare() (bool, uint64, error) { +func (w *GCWorker) prepare(ctx context.Context) (bool, uint64, error) { // Add a transaction here is to prevent following situations: // 1. GC check gcEnable is true, continue to do GC // 2. The user sets gcEnable to false // 3. The user gets `tikv_gc_safe_point` value is t1, then the user thinks the data after time t1 won't be clean by GC. // 4. GC update `tikv_gc_safe_point` value to t2, continue do GC in this round. // Then the data record that has been dropped between time t1 and t2, will be cleaned by GC, but the user thinks the data after t1 won't be clean by GC. - ctx := context.Background() se := createSession(w.store) defer se.Close() _, err := se.ExecuteInternal(ctx, "BEGIN") @@ -693,7 +693,7 @@ func (w *GCWorker) deleteRanges(ctx context.Context, safePoint uint64, concurren se := createSession(w.store) defer se.Close() - ranges, err := util.LoadDeleteRanges(se, safePoint) + ranges, err := util.LoadDeleteRanges(ctx, se, safePoint) if err != nil { return errors.Trace(err) } @@ -765,7 +765,7 @@ func (w *GCWorker) redoDeleteRanges(ctx context.Context, safePoint uint64, concu redoDeleteRangesTs := safePoint - oracle.ComposeTS(int64(gcRedoDeleteRangeDelay.Seconds())*1000, 0) se := createSession(w.store) - ranges, err := util.LoadDoneDeleteRanges(se, redoDeleteRangesTs) + ranges, err := util.LoadDoneDeleteRanges(ctx, se, redoDeleteRangesTs) se.Close() if err != nil { return errors.Trace(err) @@ -1070,6 +1070,8 @@ func (w *GCWorker) resolveLocksForRange(ctx context.Context, safePoint uint64, s req := tikvrpc.NewRequest(tikvrpc.CmdScanLock, &kvrpcpb.ScanLockRequest{ MaxVersion: safePoint, Limit: gcScanLockLimit, + }, kvrpcpb.Context{ + RequestSource: tikvutil.RequestSourceFromCtx(ctx), }) failpoint.Inject("lowScanLockLimit", func() { @@ -1681,12 +1683,11 @@ func (w *GCWorker) doGC(ctx context.Context, safePoint uint64, concurrency int) return nil } -func (w *GCWorker) checkLeader() (bool, error) { +func (w *GCWorker) checkLeader(ctx context.Context) (bool, error) { metrics.GCWorkerCounter.WithLabelValues("check_leader").Inc() se := createSession(w.store) defer se.Close() - ctx := context.Background() _, err := se.ExecuteInternal(ctx, "BEGIN") if err != nil { return false, errors.Trace(err) @@ -1817,7 +1818,7 @@ func (w *GCWorker) loadDurationWithDefault(key string, def time.Duration) (*time } func (w *GCWorker) loadValueFromSysTable(key string) (string, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) se := createSession(w.store) defer se.Close() rs, err := se.ExecuteInternal(ctx, `SELECT HIGH_PRIORITY (variable_value) FROM mysql.tidb WHERE variable_name=%? FOR UPDATE`, key) @@ -1850,7 +1851,8 @@ func (w *GCWorker) saveValueToSysTable(key, value string) error { UPDATE variable_value = %?, comment = %?` se := createSession(w.store) defer se.Close() - _, err := se.ExecuteInternal(context.Background(), stmt, + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) + _, err := se.ExecuteInternal(ctx, stmt, key, value, gcVariableComments[key], value, gcVariableComments[key]) logutil.BgLogger().Debug("[gc worker] save kv", @@ -2122,6 +2124,7 @@ func NewMockGCWorker(store kv.Storage) (*MockGCWorker, error) { // DeleteRanges calls deleteRanges internally, just for test. func (w *MockGCWorker) DeleteRanges(ctx context.Context, safePoint uint64) error { logutil.Logger(ctx).Error("deleteRanges is called") + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnGC) return w.worker.deleteRanges(ctx, safePoint, 1) } diff --git a/store/gcworker/gc_worker_test.go b/store/gcworker/gc_worker_test.go index 049406363facd..09ec0437f6d79 100644 --- a/store/gcworker/gc_worker_test.go +++ b/store/gcworker/gc_worker_test.go @@ -60,6 +60,11 @@ type mockGCWorkerClient struct { type handler = func(addr string, req *tikvrpc.Request) (*tikvrpc.Response, error) +func gcContext() context.Context { + // internal statements must bind with resource type + return kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) +} + func (c *mockGCWorkerClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { if req.Type == tikvrpc.CmdUnsafeDestroyRange && c.unsafeDestroyRangeHandler != nil { return c.unsafeDestroyRangeHandler(addr, req) @@ -302,7 +307,7 @@ func TestPrepareGC(t *testing.T) { now, err := s.gcWorker.getOracleTime() require.NoError(t, err) close(s.gcWorker.done) - ok, _, err := s.gcWorker.prepare() + ok, _, err := s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) lastRun, err := s.gcWorker.loadTime(gcLastRunTimeKey) @@ -316,11 +321,11 @@ func TestPrepareGC(t *testing.T) { err = s.gcWorker.saveDuration(gcRunIntervalKey, time.Minute*5) require.NoError(t, err) s.oracle.AddOffset(time.Minute * 4) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) s.oracle.AddOffset(time.Minute * 2) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) @@ -328,13 +333,13 @@ func TestPrepareGC(t *testing.T) { err = s.gcWorker.saveDuration(gcLifeTimeKey, time.Minute*30) require.NoError(t, err) s.oracle.AddOffset(time.Minute * 5) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) s.oracle.AddOffset(time.Minute * 40) now, err = s.gcWorker.getOracleTime() require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) safePoint, err = s.gcWorker.loadTime(gcSafePointKey) @@ -368,12 +373,12 @@ func TestPrepareGC(t *testing.T) { s.oracle.AddOffset(time.Minute * 40) err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanTrue) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) @@ -381,7 +386,7 @@ func TestPrepareGC(t *testing.T) { s.oracle.AddOffset(time.Minute * 40) err = s.gcWorker.saveDuration(gcLifeTimeKey, time.Minute) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) lifeTime, err := s.gcWorker.loadDuration(gcLifeTimeKey) @@ -391,7 +396,7 @@ func TestPrepareGC(t *testing.T) { s.oracle.AddOffset(time.Minute * 40) err = s.gcWorker.saveDuration(gcLifeTimeKey, time.Minute*30) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) lifeTime, err = s.gcWorker.loadDuration(gcLifeTimeKey) @@ -418,7 +423,7 @@ func TestPrepareGC(t *testing.T) { err = spkv.Put(fmt.Sprintf("%s/%s", infosync.ServerMinStartTSPath, "a"), strconv.FormatUint(minStartTS, 10)) require.NoError(t, err) s.oracle.AddOffset(time.Minute * 40) - ok, safepoint, err := s.gcWorker.prepare() + ok, safepoint, err := s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) require.Equal(t, uint64(0), safepoint) @@ -655,7 +660,7 @@ func TestDeleteRangesFailure(t *testing.T) { // Put some delete range tasks. se := createSession(s.gcWorker.store) defer se.Close() - _, err := se.Execute(context.Background(), `INSERT INTO mysql.gc_delete_range VALUES + _, err := se.Execute(gcContext(), `INSERT INTO mysql.gc_delete_range VALUES ("1", "2", "31", "32", "10"), ("3", "4", "33", "34", "10"), ("5", "6", "35", "36", "10")`) @@ -683,7 +688,7 @@ func TestDeleteRangesFailure(t *testing.T) { } // Check the DeleteRanges tasks. - preparedRanges, err := util.LoadDeleteRanges(se, 20) + preparedRanges, err := util.LoadDeleteRanges(gcContext(), se, 20) se.Close() require.NoError(t, err) require.Equal(t, ranges, preparedRanges) @@ -735,14 +740,14 @@ func TestDeleteRangesFailure(t *testing.T) { failKey = ranges[0].StartKey failStore = stores[0] - err = deleteRangeFunc(context.Background(), 20, 1) + err = deleteRangeFunc(gcContext(), 20, 1) require.NoError(t, err) s.checkDestroyRangeReq(t, sendReqCh, ranges, stores) // The first delete range task should be still here since it didn't success. se = createSession(s.gcWorker.store) - remainingRanges, err := loadRangesFunc(se, 20) + remainingRanges, err := loadRangesFunc(gcContext(), se, 20) se.Close() require.NoError(t, err) require.Equal(t, ranges[:1], remainingRanges) @@ -751,12 +756,12 @@ func TestDeleteRangesFailure(t *testing.T) { failStore = nil // Delete the remaining range again. - err = deleteRangeFunc(context.Background(), 20, 1) + err = deleteRangeFunc(gcContext(), 20, 1) require.NoError(t, err) s.checkDestroyRangeReq(t, sendReqCh, ranges[:1], stores) se = createSession(s.gcWorker.store) - remainingRanges, err = loadRangesFunc(se, 20) + remainingRanges, err = loadRangesFunc(gcContext(), se, 20) se.Close() require.NoError(t, err) require.Len(t, remainingRanges, 0) @@ -836,7 +841,7 @@ func TestLeaderTick(t *testing.T) { // Skip if prepare failed (disabling GC will make prepare returns ok = false). err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) require.NoError(t, err) - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) s.checkNotCollected(t, p) err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanTrue) @@ -847,7 +852,7 @@ func TestLeaderTick(t *testing.T) { // Skip if gcWaitTime not exceeded. s.gcWorker.lastFinish = time.Now() - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) s.checkNotCollected(t, p) s.gcWorker.lastFinish = time.Now().Add(-veryLong) @@ -856,7 +861,7 @@ func TestLeaderTick(t *testing.T) { require.NoError(t, err) // Continue GC if all those checks passed. - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) // Wait for GC finish select { @@ -876,7 +881,7 @@ func TestLeaderTick(t *testing.T) { p = s.createGCProbe(t, "k1") s.oracle.AddOffset(gcDefaultLifeTime * 2) - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) // Wait for GC finish select { @@ -910,7 +915,7 @@ func TestResolveLockRangeInfine(t *testing.T) { require.NoError(t, failpoint.Disable("tikvclient/invalidCacheAndRetry")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/gcworker/setGcResolveMaxBackoff")) }() - _, err := s.gcWorker.resolveLocksForRange(context.Background(), 1, []byte{0}, []byte{1}) + _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, []byte{0}, []byte{1}) require.Error(t, err) } @@ -944,7 +949,7 @@ func TestResolveLockRangeMeetRegionCacheMiss(t *testing.T) { } return true, nil } - _, err := s.gcWorker.resolveLocksForRange(context.Background(), 1, []byte{0}, []byte{10}) + _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, []byte{0}, []byte{10}) require.NoError(t, err) require.Equal(t, 2, resolveCnt) require.Equal(t, 1, scanCnt) @@ -1020,7 +1025,7 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { return true, nil } - _, err := s.gcWorker.resolveLocksForRange(context.Background(), 1, []byte(""), []byte("z")) + _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, []byte(""), []byte("z")) require.NoError(t, err) require.Len(t, resolvedLock, 4) expects := [][]byte{[]byte("a"), []byte("b"), []byte("o"), []byte("p")} @@ -1039,7 +1044,7 @@ func TestRunGCJob(t *testing.T) { useDistributedGC := s.gcWorker.checkUseDistributedGC() require.True(t, useDistributedGC) safePoint := s.mustAllocTs(t) - err := s.gcWorker.runGCJob(context.Background(), safePoint, 1) + err := s.gcWorker.runGCJob(gcContext(), safePoint, 1) require.NoError(t, err) pdSafePoint := s.mustGetSafePointFromPd(t) @@ -1049,7 +1054,7 @@ func TestRunGCJob(t *testing.T) { require.Equal(t, safePoint, etcdSafePoint) // Test distributed mode with safePoint regressing (although this is impossible) - err = s.gcWorker.runGCJob(context.Background(), safePoint-1, 1) + err = s.gcWorker.runGCJob(gcContext(), safePoint-1, 1) require.Error(t, err) // Central mode is deprecated in v5.0, fallback to distributed mode if it's set. @@ -1060,7 +1065,7 @@ func TestRunGCJob(t *testing.T) { p := s.createGCProbe(t, "k1") safePoint = s.mustAllocTs(t) - err = s.gcWorker.runGCJob(context.Background(), safePoint, 1) + err = s.gcWorker.runGCJob(gcContext(), safePoint, 1) require.NoError(t, err) s.checkCollected(t, p) @@ -1117,7 +1122,7 @@ func TestRunGCJobAPI(t *testing.T) { p := s.createGCProbe(t, "k1") safePoint := s.mustAllocTs(t) - err := RunGCJob(context.Background(), s.tikvStore, s.pdClient, safePoint, "mock", 1) + err := RunGCJob(gcContext(), s.tikvStore, s.pdClient, safePoint, "mock", 1) require.NoError(t, err) s.checkCollected(t, p) etcdSafePoint := s.loadEtcdSafePoint(t) @@ -1132,7 +1137,7 @@ func TestRunDistGCJobAPI(t *testing.T) { gcSafePointCacheInterval = 0 safePoint := s.mustAllocTs(t) - err := RunDistributedGCJob(context.Background(), s.tikvStore, s.pdClient, safePoint, "mock", 1) + err := RunDistributedGCJob(gcContext(), s.tikvStore, s.pdClient, safePoint, "mock", 1) require.NoError(t, err) pdSafePoint := s.mustGetSafePointFromPd(t) require.Equal(t, safePoint, pdSafePoint) @@ -1207,7 +1212,7 @@ func (s *mockGCWorkerSuite) makeMergedMockClient(t *testing.T, count int) (*merg const scanLockLimit = 3 - storesMap, err := s.gcWorker.getStoresMapForGC(context.Background()) + storesMap, err := s.gcWorker.getStoresMapForGC(gcContext()) require.NoError(t, err) scanner := newMergeLockScanner(100000, s.client, storesMap) scanner.scanLockLimit = scanLockLimit @@ -1252,7 +1257,7 @@ func (s *mockGCWorkerSuite) makeMergedMockClient(t *testing.T, count int) (*merg resultCh := make(chan []*txnlock.Lock) // Initializing and getting result from scanner is blocking operations. Collect the result in a separated thread. go func() { - err := scanner.Start(context.Background()) + err := scanner.Start(gcContext()) require.NoError(t, err) // Get a batch of a enough-large size to get all results. result := scanner.NextBatch(1000) @@ -1462,7 +1467,7 @@ func TestResolveLocksPhysical(t *testing.T) { s.client.removeLockObserverHandler = alwaysSucceedHandler } - ctx := context.Background() + ctx := gcContext() var safePoint uint64 = 10000 // No lock @@ -1618,7 +1623,7 @@ func TestPhysicalScanLockDeadlock(t *testing.T) { s, clean := createGCWorkerSuite(t) defer clean() - ctx := context.Background() + ctx := gcContext() stores := s.cluster.GetAllStores() require.Greater(t, len(stores), 1) @@ -1733,7 +1738,7 @@ func TestGCWithPendingTxn(t *testing.T) { s, clean := createGCWorkerSuite(t) defer clean() - ctx := context.Background() + ctx := gcContext() gcSafePointCacheInterval = 0 err := s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) require.NoError(t, err) diff --git a/store/store_test.go b/store/store_test.go index 698f3d4714ce3..081462bb9bc85 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -653,7 +653,8 @@ func TestIsolationInc(t *testing.T) { defer wg.Done() for j := 0; j < 100; j++ { var id int64 - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { var err1 error id, err1 = kv.IncInt64(txn, []byte("key"), 1) return err1 @@ -698,12 +699,13 @@ func TestIsolationMultiInc(t *testing.T) { var wg sync.WaitGroup + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) wg.Add(threadCnt) for i := 0; i < threadCnt; i++ { go func() { defer wg.Done() for j := 0; j < incCnt; j++ { - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { for _, key := range keys { _, err1 := kv.IncInt64(txn, key, 1) if err1 != nil { @@ -720,7 +722,7 @@ func TestIsolationMultiInc(t *testing.T) { wg.Wait() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { for _, key := range keys { id, err1 := kv.GetInt64(context.TODO(), txn, key) if err1 != nil { diff --git a/structure/structure_test.go b/structure/structure_test.go index 9da07b2dba604..63300f004c533 100644 --- a/structure/structure_test.go +++ b/structure/structure_test.go @@ -316,7 +316,8 @@ func TestHash(t *testing.T) { err = txn.Commit(context.Background()) require.NoError(t, err) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { newTxn := structure.NewStructure(txn, txn, []byte{0x00}) err = newTxn.Set(key, []byte("abc")) require.NoError(t, err) diff --git a/table/tables/cache.go b/table/tables/cache.go index fc9f3f52ce16c..4dc2bfc5c6209 100644 --- a/table/tables/cache.go +++ b/table/tables/cache.go @@ -146,7 +146,8 @@ func (c *cachedTable) loadDataFromOriginalTable(store kv.Storage) (kv.MemBuffer, } var startTS uint64 totalSize := int64(0) - err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnCacheTable) + err = kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { prefix := tablecodec.GenTablePrefix(c.tableID) if err != nil { return errors.Trace(err) diff --git a/table/tables/state_remote.go b/table/tables/state_remote.go index 4a8d0b39b632c..3e8ff3e90f801 100644 --- a/table/tables/state_remote.go +++ b/table/tables/state_remote.go @@ -20,6 +20,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/sqlexec" @@ -385,6 +386,7 @@ func (h *stateRemoteHandle) rollbackTxn(ctx context.Context) error { } func (h *stateRemoteHandle) runInTxn(ctx context.Context, pessimistic bool, fn func(ctx context.Context, txnTS uint64) error) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := h.beginTxn(ctx, pessimistic) if err != nil { return errors.Trace(err) @@ -415,6 +417,7 @@ func (h *stateRemoteHandle) runInTxn(ctx context.Context, pessimistic bool, fn f } func (h *stateRemoteHandle) loadRow(ctx context.Context, tid int64, forUpdate bool) (CachedTableLockType, uint64, uint64, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) var chunkRows []chunk.Row var err error if forUpdate { diff --git a/table/tables/state_remote_test.go b/table/tables/state_remote_test.go index 5d75c5e47d129..140c2fc1e3da9 100644 --- a/table/tables/state_remote_test.go +++ b/table/tables/state_remote_test.go @@ -40,7 +40,7 @@ func TestStateRemote(t *testing.T) { tk.MustExec("use test") se := tk.Session() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) h := tables.NewStateRemote(se) // Check the initial value. diff --git a/table/tables/tables.go b/table/tables/tables.go index aab32e1f18d8f..9b35a1af8cc3b 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -1246,18 +1246,19 @@ func (t *TableCommon) addDeleteBinlog(ctx sessionctx.Context, r []types.Datum, c return nil } -func writeSequenceUpdateValueBinlog(ctx sessionctx.Context, db, sequence string, end int64) error { +func writeSequenceUpdateValueBinlog(sctx sessionctx.Context, db, sequence string, end int64) error { // 1: when sequenceCommon update the local cache passively. // 2: When sequenceCommon setval to the allocator actively. // Both of this two case means the upper bound the sequence has changed in meta, which need to write the binlog // to the downstream. // Sequence sends `select setval(seq, num)` sql string to downstream via `setDDLBinlog`, which is mocked as a DDL binlog. - binlogCli := ctx.GetSessionVars().BinlogClient - sqlMode := ctx.GetSessionVars().SQLMode + binlogCli := sctx.GetSessionVars().BinlogClient + sqlMode := sctx.GetSessionVars().SQLMode sequenceFullName := stringutil.Escape(db, sqlMode) + "." + stringutil.Escape(sequence, sqlMode) sql := "select setval(" + sequenceFullName + ", " + strconv.FormatInt(end, 10) + ")" - err := kv.RunInNewTxn(context.Background(), ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, sctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) mockJobID, err := m.GenGlobalID() if err != nil { diff --git a/table/temptable/ddl.go b/table/temptable/ddl.go index fddd0bbbeabf5..ccad2b7b0214c 100644 --- a/table/temptable/ddl.go +++ b/table/temptable/ddl.go @@ -161,7 +161,8 @@ func newTemporaryTableFromTableInfo(sctx sessionctx.Context, tbInfo *model.Table // Local temporary table uses a real table ID. // We could mock a table ID, but the mocked ID might be identical to an existing // real table, and then we'll get into trouble. - err := kv.RunInNewTxn(context.Background(), sctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnCacheTable) + err := kv.RunInNewTxn(ctx, sctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) tblID, err := m.GenGlobalID() if err != nil { diff --git a/telemetry/BUILD.bazel b/telemetry/BUILD.bazel index 73ae1814d3308..c97153a77d4c0 100644 --- a/telemetry/BUILD.bazel +++ b/telemetry/BUILD.bazel @@ -21,6 +21,7 @@ go_library( "//config", "//domain/infosync", "//infoschema", + "//kv", "//metrics", "//parser/model", "//parser/mysql", @@ -56,7 +57,9 @@ go_test( embed = [":telemetry"], deps = [ "//config", + "//kv", "//session", + "//sessionctx", "//sessionctx/variable", "//testkit", "//testkit/testsetup", diff --git a/telemetry/data.go b/telemetry/data.go index ddf574e3bf1ed..048d458797966 100644 --- a/telemetry/data.go +++ b/telemetry/data.go @@ -15,8 +15,10 @@ package telemetry import ( + "context" "time" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" ) @@ -31,21 +33,22 @@ type telemetryData struct { SlowQueryStats *slowQueryStats `json:"slowQueryStats"` } -func generateTelemetryData(ctx sessionctx.Context, trackingID string) telemetryData { +func generateTelemetryData(sctx sessionctx.Context, trackingID string) telemetryData { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTelemetry) r := telemetryData{ ReportTimestamp: time.Now().Unix(), TrackingID: trackingID, } - if h, err := getClusterHardware(ctx); err == nil { + if h, err := getClusterHardware(ctx, sctx); err == nil { r.Hardware = h } - if i, err := getClusterInfo(ctx); err == nil { + if i, err := getClusterInfo(ctx, sctx); err == nil { r.Instances = i } - if f, err := getFeatureUsage(ctx); err == nil { + if f, err := getFeatureUsage(ctx, sctx); err == nil { r.FeatureUsage = f } - if s, err := getSlowQueryStats(ctx); err == nil { + if s, err := getSlowQueryStats(ctx, sctx); err == nil { r.SlowQueryStats = s } diff --git a/telemetry/data_cluster_hardware.go b/telemetry/data_cluster_hardware.go index d357e9243fd0b..b2b4a197ec273 100644 --- a/telemetry/data_cluster_hardware.go +++ b/telemetry/data_cluster_hardware.go @@ -67,9 +67,9 @@ func normalizeFieldName(name string) string { return strcase.ToLowerCamel(name) } -func getClusterHardware(ctx sessionctx.Context) ([]*clusterHardwareItem, error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT TYPE, INSTANCE, DEVICE_TYPE, DEVICE_NAME, NAME, VALUE FROM information_schema.cluster_hardware`) +func getClusterHardware(ctx context.Context, sctx sessionctx.Context) ([]*clusterHardwareItem, error) { + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT TYPE, INSTANCE, DEVICE_TYPE, DEVICE_NAME, NAME, VALUE FROM information_schema.cluster_hardware`) if err != nil { return nil, errors.Trace(err) } diff --git a/telemetry/data_cluster_info.go b/telemetry/data_cluster_info.go index 40f87bccdfd3d..7cda111e785ca 100644 --- a/telemetry/data_cluster_info.go +++ b/telemetry/data_cluster_info.go @@ -34,10 +34,10 @@ type clusterInfoItem struct { UpTime string `json:"upTime,omitempty"` } -func getClusterInfo(ctx sessionctx.Context) ([]*clusterInfoItem, error) { +func getClusterInfo(ctx context.Context, sctx sessionctx.Context) ([]*clusterInfoItem, error) { // Explicitly list all field names instead of using `*` to avoid potential leaking sensitive info when adding new fields in future. - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT TYPE, INSTANCE, STATUS_ADDRESS, VERSION, GIT_HASH, START_TIME, UPTIME FROM information_schema.cluster_info`) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT TYPE, INSTANCE, STATUS_ADDRESS, VERSION, GIT_HASH, START_TIME, UPTIME FROM information_schema.cluster_info`) if err != nil { return nil, errors.Trace(err) } diff --git a/telemetry/data_feature_usage.go b/telemetry/data_feature_usage.go index 622f25c4b2430..af5412afbc910 100644 --- a/telemetry/data_feature_usage.go +++ b/telemetry/data_feature_usage.go @@ -56,23 +56,23 @@ type placementPolicyUsage struct { NumPartitionWithExplicitPolicies uint64 `json:"numPartitionWithExplicitPolicies"` } -func getFeatureUsage(ctx sessionctx.Context) (*featureUsage, error) { +func getFeatureUsage(ctx context.Context, sctx sessionctx.Context) (*featureUsage, error) { var usage featureUsage var err error - usage.NewClusterIndex, usage.ClusterIndex, err = getClusterIndexUsageInfo(ctx) + usage.NewClusterIndex, usage.ClusterIndex, err = getClusterIndexUsageInfo(ctx, sctx) if err != nil { logutil.BgLogger().Info(err.Error()) return nil, err } // transaction related feature - usage.Txn = getTxnUsageInfo(ctx) + usage.Txn = getTxnUsageInfo(sctx) usage.CTE = getCTEUsageInfo() - usage.AutoCapture = getAutoCaptureUsageInfo(ctx) + usage.AutoCapture = getAutoCaptureUsageInfo(sctx) - collectFeatureUsageFromInfoschema(ctx, &usage) + collectFeatureUsageFromInfoschema(sctx, &usage) usage.NonTransactionalUsage = getNonTransactionalUsage() @@ -142,12 +142,12 @@ type NewClusterIndexUsage struct { } // getClusterIndexUsageInfo gets the ClusterIndex usage information. It's exported for future test. -func getClusterIndexUsageInfo(ctx sessionctx.Context) (ncu *NewClusterIndexUsage, cu *ClusterIndexUsage, err error) { +func getClusterIndexUsageInfo(ctx context.Context, sctx sessionctx.Context) (ncu *NewClusterIndexUsage, cu *ClusterIndexUsage, err error) { var newUsage NewClusterIndexUsage - exec := ctx.(sqlexec.RestrictedSQLExecutor) + exec := sctx.(sqlexec.RestrictedSQLExecutor) // query INFORMATION_SCHEMA.tables to get the latest table information about ClusterIndex - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, ` + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, ` SELECT TIDB_PK_TYPE FROM information_schema.tables WHERE table_schema not in ('INFORMATION_SCHEMA', 'METRICS_SCHEMA', 'PERFORMANCE_SCHEMA', 'mysql')`) @@ -168,7 +168,7 @@ func getClusterIndexUsageInfo(ctx sessionctx.Context) (ncu *NewClusterIndexUsage } }() - err = ctx.RefreshTxnCtx(context.TODO()) + err = sctx.RefreshTxnCtx(ctx) if err != nil { return nil, nil, err } diff --git a/telemetry/data_slow_query.go b/telemetry/data_slow_query.go index e32940e09be6a..7b1b9ef00d440 100644 --- a/telemetry/data_slow_query.go +++ b/telemetry/data_slow_query.go @@ -62,8 +62,8 @@ var ( slowQueryLock sync.Mutex ) -func getSlowQueryStats(ctx sessionctx.Context) (*slowQueryStats, error) { - slowQueryBucket, err := getSlowQueryBucket(ctx) +func getSlowQueryStats(ctx context.Context, sctx sessionctx.Context) (*slowQueryStats, error) { + slowQueryBucket, err := getSlowQueryBucket(sctx) if err != nil { logutil.BgLogger().Info(err.Error()) return nil, err @@ -73,9 +73,9 @@ func getSlowQueryStats(ctx sessionctx.Context) (*slowQueryStats, error) { } // getSlowQueryBucket generates the delta SlowQueryBucket to report -func getSlowQueryBucket(ctx sessionctx.Context) (*SlowQueryBucket, error) { +func getSlowQueryBucket(sctx sessionctx.Context) (*SlowQueryBucket, error) { // update currentSQBInfo first, then gen delta - if err := updateCurrentSQB(ctx); err != nil { + if err := updateCurrentSQB(sctx); err != nil { return nil, err } delta := calculateDeltaSQB() @@ -83,7 +83,7 @@ func getSlowQueryBucket(ctx sessionctx.Context) (*SlowQueryBucket, error) { } // updateCurrentSQB records current slow query buckets -func updateCurrentSQB(ctx sessionctx.Context) (err error) { +func updateCurrentSQB(sctx sessionctx.Context) (err error) { defer func() { if r := recover(); r != nil { err = pingcapErrors.Errorf(fmt.Sprintln(r)) diff --git a/telemetry/main_test.go b/telemetry/main_test.go index c3d817ebeabee..cee65c94b707e 100644 --- a/telemetry/main_test.go +++ b/telemetry/main_test.go @@ -15,17 +15,24 @@ package telemetry import ( + "context" "testing" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/testkit/testsetup" "go.uber.org/goleak" ) var ( - GetFeatureUsage = getFeatureUsage GetTxnUsageInfo = getTxnUsageInfo ) +func GetFeatureUsage(sctx sessionctx.Context) (*featureUsage, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTelemetry) + return getFeatureUsage(ctx, sctx) +} + func TestMain(m *testing.M) { testsetup.SetupForCommonTest() diff --git a/telemetry/telemetry.go b/telemetry/telemetry.go index 3f383b3d426e8..645aad0e90c08 100644 --- a/telemetry/telemetry.go +++ b/telemetry/telemetry.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/logutil" @@ -122,7 +123,8 @@ func reportUsageData(ctx sessionctx.Context, etcdClient *clientv3.Client) (bool, // TODO: We should use the context from domain, so that when request is blocked for a long time it will not // affect TiDB shutdown. - reqCtx, cancel := context.WithTimeout(context.Background(), uploadTimeout) + reqCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTelemetry) + reqCtx, cancel := context.WithTimeout(reqCtx, uploadTimeout) defer cancel() req, err := http.NewRequestWithContext(reqCtx, "POST", apiEndpoint, bytes.NewReader(rawJSON)) diff --git a/util/admin/admin.go b/util/admin/admin.go index dee2df5b209c2..27b889c127fa0 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -45,7 +45,8 @@ type RecordData struct { } func getCount(exec sqlexec.RestrictedSQLExecutor, snapshot uint64, sql string, args ...interface{}) (int64, error) { - rows, _, err := exec.ExecRestrictedSQL(context.Background(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(snapshot)}, sql, args...) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnAdmin) + rows, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(snapshot)}, sql, args...) if err != nil { return 0, errors.Trace(err) } diff --git a/util/execdetails/execdetails.go b/util/execdetails/execdetails.go index 5805b4e08bc70..23ef49ff56ce1 100644 --- a/util/execdetails/execdetails.go +++ b/util/execdetails/execdetails.go @@ -153,7 +153,7 @@ func (d ExecDetails) String() string { parts = append(parts, BackoffTypesStr+": "+fmt.Sprintf("%v", commitDetails.Mu.BackoffTypes)) } commitDetails.Mu.Unlock() - resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) + resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLock.ResolveLockTime) if resolveLockTime > 0 { parts = append(parts, ResolveLockTimeStr+": "+strconv.FormatFloat(time.Duration(resolveLockTime).Seconds(), 'f', -1, 64)) } @@ -245,7 +245,7 @@ func (d ExecDetails) ToZapFields() (fields []zap.Field) { fields = append(fields, zap.String("backoff_types", fmt.Sprintf("%v", commitDetails.Mu.BackoffTypes))) } commitDetails.Mu.Unlock() - resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) + resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLock.ResolveLockTime) if resolveLockTime > 0 { fields = append(fields, zap.String("resolve_lock_time", fmt.Sprintf("%v", strconv.FormatFloat(time.Duration(resolveLockTime).Seconds(), 'f', -1, 64)+"s"))) } @@ -862,9 +862,9 @@ func (e *RuntimeStatsWithCommit) String() string { buf.WriteString("}") } e.Commit.Mu.Unlock() - if e.Commit.ResolveLockTime > 0 { + if e.Commit.ResolveLock.ResolveLockTime > 0 { buf.WriteString(", resolve_lock: ") - buf.WriteString(FormatDuration(time.Duration(e.Commit.ResolveLockTime))) + buf.WriteString(FormatDuration(time.Duration(e.Commit.ResolveLock.ResolveLockTime))) } prewriteRegionNum := atomic.LoadInt32(&e.Commit.PrewriteRegionNum) @@ -903,9 +903,9 @@ func (e *RuntimeStatsWithCommit) String() string { buf.WriteString(", keys:") buf.WriteString(strconv.FormatInt(int64(e.LockKeys.LockKeys), 10)) } - if e.LockKeys.ResolveLockTime > 0 { + if e.LockKeys.ResolveLock.ResolveLockTime > 0 { buf.WriteString(", resolve_lock:") - buf.WriteString(FormatDuration(time.Duration(e.LockKeys.ResolveLockTime))) + buf.WriteString(FormatDuration(time.Duration(e.LockKeys.ResolveLock.ResolveLockTime))) } if e.LockKeys.BackoffTime > 0 { buf.WriteString(", backoff: {time: ") diff --git a/util/execdetails/execdetails_test.go b/util/execdetails/execdetails_test.go index 190d42322e4da..b13a09725c942 100644 --- a/util/execdetails/execdetails_test.go +++ b/util/execdetails/execdetails_test.go @@ -47,11 +47,13 @@ func TestString(t *testing.T) { "backoff2", }, }, - ResolveLockTime: 1000000000, // 10^9 ns = 1s WriteKeys: 1, WriteSize: 1, PrewriteRegionNum: 1, TxnRetry: 1, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 1000000000, // 10^9 ns = 1s + }, }, ScanDetail: &util.ScanDetail{ ProcessedKeys: 10, @@ -188,11 +190,13 @@ func TestRuntimeStatsWithCommit(t *testing.T) { CommitBackoffTime: int64(time.Second), BackoffTypes: []string{"backoff1", "backoff2", "backoff1"}, }, - ResolveLockTime: int64(time.Second), WriteKeys: 3, WriteSize: 66, PrewriteRegionNum: 5, TxnRetry: 2, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: int64(time.Second), + }, } stats := &RuntimeStatsWithCommit{ Commit: commitDetail, @@ -201,11 +205,10 @@ func TestRuntimeStatsWithCommit(t *testing.T) { require.Equal(t, expect, stats.String()) lockDetail := &util.LockKeysDetails{ - TotalTime: time.Second, - RegionNum: 2, - LockKeys: 10, - ResolveLockTime: int64(time.Second * 2), - BackoffTime: int64(time.Second * 3), + TotalTime: time.Second, + RegionNum: 2, + LockKeys: 10, + BackoffTime: int64(time.Second * 3), Mu: struct { sync.Mutex BackoffTypes []string @@ -217,6 +220,9 @@ func TestRuntimeStatsWithCommit(t *testing.T) { LockRPCTime: int64(time.Second * 5), LockRPCCount: 50, RetryCount: 2, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: int64(time.Second * 2), + }, } stats = &RuntimeStatsWithCommit{ LockKeys: lockDetail, diff --git a/util/gcutil/BUILD.bazel b/util/gcutil/BUILD.bazel index f56fff242ab1b..fc6c882078726 100644 --- a/util/gcutil/BUILD.bazel +++ b/util/gcutil/BUILD.bazel @@ -6,6 +6,7 @@ go_library( importpath = "github.com/pingcap/tidb/util/gcutil", visibility = ["//visibility:public"], deps = [ + "//kv", "//parser/model", "//sessionctx", "//sessionctx/variable", diff --git a/util/gcutil/gcutil.go b/util/gcutil/gcutil.go index 8c60534f9c265..0d3ae7da53ee2 100644 --- a/util/gcutil/gcutil.go +++ b/util/gcutil/gcutil.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -70,9 +71,10 @@ func ValidateSnapshotWithGCSafePoint(snapshotTS, safePointTS uint64) error { } // GetGCSafePoint loads GC safe point time from mysql.tidb. -func GetGCSafePoint(ctx sessionctx.Context) (uint64, error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.Background(), nil, selectVariableValueSQL, "tikv_gc_safe_point") +func GetGCSafePoint(sctx sessionctx.Context) (uint64, error) { + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, selectVariableValueSQL, "tikv_gc_safe_point") if err != nil { return 0, errors.Trace(err) } diff --git a/util/stmtsummary/statement_summary.go b/util/stmtsummary/statement_summary.go index 9271f5c274ace..b0689df045d54 100644 --- a/util/stmtsummary/statement_summary.go +++ b/util/stmtsummary/statement_summary.go @@ -753,7 +753,7 @@ func (ssElement *stmtSummaryByDigestElement) add(sei *StmtExecInfo, intervalSeco if commitDetails.GetCommitTsTime > ssElement.maxGetCommitTsTime { ssElement.maxGetCommitTsTime = commitDetails.GetCommitTsTime } - resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) + resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLock.ResolveLockTime) ssElement.sumResolveLockTime += resolveLockTime if resolveLockTime > ssElement.maxResolveLockTime { ssElement.maxResolveLockTime = resolveLockTime diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index a84eb8799af07..041de2989241f 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -114,8 +114,8 @@ func TestAddStatement(t *testing.T) { maxLocalLatchTime: stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime, sumCommitBackoffTime: stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, maxCommitBackoffTime: stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, - sumResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, - maxResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + sumResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, + maxResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, sumWriteKeys: int64(stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys), maxWriteKeys: stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, sumWriteSize: int64(stmtExecInfo1.ExecDetail.CommitDetail.WriteSize), @@ -191,11 +191,13 @@ func TestAddStatement(t *testing.T) { CommitBackoffTime: 1000, BackoffTypes: []string{boTxnLockName}, }, - ResolveLockTime: 10000, WriteKeys: 100000, WriteSize: 1000000, PrewriteRegionNum: 100, TxnRetry: 10, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 10000, + }, }, ScanDetail: &util.ScanDetail{ TotalKeys: 6000, @@ -256,8 +258,8 @@ func TestAddStatement(t *testing.T) { expectedSummaryElement.sumCommitBackoffTime += stmtExecInfo2.ExecDetail.CommitDetail.Mu.CommitBackoffTime expectedSummaryElement.maxCommitBackoffTime = stmtExecInfo2.ExecDetail.CommitDetail.Mu.CommitBackoffTime stmtExecInfo2.ExecDetail.CommitDetail.Mu.Unlock() - expectedSummaryElement.sumResolveLockTime += stmtExecInfo2.ExecDetail.CommitDetail.ResolveLockTime - expectedSummaryElement.maxResolveLockTime = stmtExecInfo2.ExecDetail.CommitDetail.ResolveLockTime + expectedSummaryElement.sumResolveLockTime += stmtExecInfo2.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime + expectedSummaryElement.maxResolveLockTime = stmtExecInfo2.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime expectedSummaryElement.sumWriteKeys += int64(stmtExecInfo2.ExecDetail.CommitDetail.WriteKeys) expectedSummaryElement.maxWriteKeys = stmtExecInfo2.ExecDetail.CommitDetail.WriteKeys expectedSummaryElement.sumWriteSize += int64(stmtExecInfo2.ExecDetail.CommitDetail.WriteSize) @@ -321,11 +323,13 @@ func TestAddStatement(t *testing.T) { CommitBackoffTime: 100, BackoffTypes: []string{boTxnLockName}, }, - ResolveLockTime: 1000, WriteKeys: 10000, WriteSize: 100000, PrewriteRegionNum: 10, TxnRetry: 1, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 1000, + }, }, ScanDetail: &util.ScanDetail{ TotalKeys: 600, @@ -370,7 +374,7 @@ func TestAddStatement(t *testing.T) { stmtExecInfo3.ExecDetail.CommitDetail.Mu.Lock() expectedSummaryElement.sumCommitBackoffTime += stmtExecInfo3.ExecDetail.CommitDetail.Mu.CommitBackoffTime stmtExecInfo3.ExecDetail.CommitDetail.Mu.Unlock() - expectedSummaryElement.sumResolveLockTime += stmtExecInfo3.ExecDetail.CommitDetail.ResolveLockTime + expectedSummaryElement.sumResolveLockTime += stmtExecInfo3.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime expectedSummaryElement.sumWriteKeys += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteKeys) expectedSummaryElement.sumWriteSize += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteSize) expectedSummaryElement.sumPrewriteRegionNum += int64(stmtExecInfo3.ExecDetail.CommitDetail.PrewriteRegionNum) @@ -605,11 +609,13 @@ func generateAnyExecInfo() *StmtExecInfo { CommitBackoffTime: 200, BackoffTypes: []string{boTxnLockName}, }, - ResolveLockTime: 2000, WriteKeys: 20000, WriteSize: 200000, PrewriteRegionNum: 20, TxnRetry: 2, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 2000, + }, }, ScanDetail: &util.ScanDetail{ TotalKeys: 1000, @@ -781,7 +787,7 @@ func TestToDatum(t *testing.T) { int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, - stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, @@ -829,7 +835,7 @@ func TestToDatum(t *testing.T) { int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, - stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize,