diff --git a/pkg/disttask/framework/storage/task_table.go b/pkg/disttask/framework/storage/task_table.go index aa8f82262faff..1fb3e1b0746cd 100644 --- a/pkg/disttask/framework/storage/task_table.go +++ b/pkg/disttask/framework/storage/task_table.go @@ -802,7 +802,7 @@ func (mgr *TaskManager) GetAllSubtasks(ctx context.Context) ([]*proto.SubtaskBas // a stuck issue if the new version TiDB has less than 16 CPU count. // We don't adjust the concurrency in subtask table because this field does not exist in v7.5.0. // For details, see https://github.com/pingcap/tidb/issues/50894. -// For the following versions, there is a check when submiting a new task. This function should be a no-op. +// For the following versions, there is a check when submitting a new task. This function should be a no-op. func (mgr *TaskManager) AdjustTaskOverflowConcurrency(ctx context.Context, se sessionctx.Context) error { cpuCount, err := mgr.getCPUCountOfNode(ctx, se) if err != nil { diff --git a/pkg/domain/domain.go b/pkg/domain/domain.go index 54fb27c0de2e2..6b675dc0b9bb1 100644 --- a/pkg/domain/domain.go +++ b/pkg/domain/domain.go @@ -277,7 +277,7 @@ func (do *Domain) loadInfoSchema(startTS uint64) (infoschema.InfoSchema, bool, i // 1. Not first time bootstrap loading, which needs a full load. // 2. It is newer than the current one, so it will be "the current one" after this function call. // 3. There are less 100 diffs. - // 4. No regenrated schema diff. + // 4. No regenerated schema diff. startTime := time.Now() if currentSchemaVersion != 0 && neededSchemaVersion > currentSchemaVersion && neededSchemaVersion-currentSchemaVersion < LoadSchemaDiffVersionGapThreshold { is, relatedChanges, diffTypes, err := do.tryLoadSchemaDiffs(m, currentSchemaVersion, neededSchemaVersion, startTS) @@ -511,7 +511,7 @@ func (do *Domain) InfoSchema() infoschema.InfoSchema { // GetSnapshotInfoSchema gets a snapshot information schema. func (do *Domain) GetSnapshotInfoSchema(snapshotTS uint64) (infoschema.InfoSchema, error) { - // if the snapshotTS is new enough, we can get infoschema directly through sanpshotTS. + // if the snapshotTS is new enough, we can get infoschema directly through snapshotTS. if is := do.infoCache.GetBySnapshotTS(snapshotTS); is != nil { return is, nil } @@ -613,7 +613,7 @@ func (do *Domain) Reload() error { is, hitCache, oldSchemaVersion, changes, err := do.loadInfoSchema(version) if err != nil { if version = getFlashbackStartTSFromErrorMsg(err); version != 0 { - // use the lastest available version to create domain + // use the latest available version to create domain version-- is, hitCache, oldSchemaVersion, changes, err = do.loadInfoSchema(version) } @@ -949,7 +949,7 @@ func (do *Domain) loadSchemaInLoop(ctx context.Context, lease time.Duration) { } // The schema maybe changed, must reload schema then the schema validator can restart. exitLoop := do.mustReload() - // domain is cosed. + // domain is closed. if exitLoop { logutil.BgLogger().Error("domain is closed, exit loadSchemaInLoop") return @@ -968,7 +968,7 @@ func (do *Domain) loadSchemaInLoop(ctx context.Context, lease time.Duration) { } // mustRestartSyncer tries to restart the SchemaSyncer. -// It returns until it's successful or the domain is stoped. +// It returns until it's successful or the domain is stopped. func (do *Domain) mustRestartSyncer(ctx context.Context) error { syncer := do.ddl.SchemaSyncer() @@ -2098,7 +2098,7 @@ func (do *Domain) GetHistoricalStatsWorker() *HistoricalStatsWorker { return do.historicalStatsWorker } -// EnableDumpHistoricalStats used to control whether enbale dump stats for unit test +// EnableDumpHistoricalStats used to control whether enable dump stats for unit test var enableDumpHistoricalStats atomic.Bool // StartHistoricalStatsWorker start historical workers running diff --git a/pkg/domain/plan_replayer.go b/pkg/domain/plan_replayer.go index ff67d64663929..9e0dd80efa40c 100644 --- a/pkg/domain/plan_replayer.go +++ b/pkg/domain/plan_replayer.go @@ -501,7 +501,7 @@ func (h *planReplayerTaskDumpHandle) GetWorker() *planReplayerTaskDumpWorker { return h.workers[0] } -// Close make finished flag ture +// Close make finished flag true func (h *planReplayerTaskDumpHandle) Close() { close(h.taskCH) } diff --git a/pkg/domain/resourcegroup/runaway.go b/pkg/domain/resourcegroup/runaway.go index b593776df15ec..045286236ec80 100644 --- a/pkg/domain/resourcegroup/runaway.go +++ b/pkg/domain/resourcegroup/runaway.go @@ -317,7 +317,7 @@ func (rm *RunawayManager) addWatchList(record *QuarantineRecord, ttl time.Durati rm.queryLock.Lock() defer rm.queryLock.Unlock() if item != nil { - // check the ID because of the eariler scan. + // check the ID because of the earlier scan. if item.ID == record.ID { return } diff --git a/pkg/domain/ru_stats.go b/pkg/domain/ru_stats.go index 42dd328f922be..68a308ac3bfc9 100644 --- a/pkg/domain/ru_stats.go +++ b/pkg/domain/ru_stats.go @@ -34,7 +34,7 @@ import ( const ( maxRetryCount int = 10 ruStatsInterval time.Duration = 24 * time.Hour - // only keep stats rows for last 3 monthes(92 days at most). + // only keep stats rows for last 3 months(92 days at most). ruStatsGCDuration time.Duration = 92 * ruStatsInterval gcBatchSize int64 = 1000 ) diff --git a/pkg/domain/runaway.go b/pkg/domain/runaway.go index e5fa14e85198d..b67d941bd2d18 100644 --- a/pkg/domain/runaway.go +++ b/pkg/domain/runaway.go @@ -240,7 +240,7 @@ func (do *Domain) RemoveRunawayWatch(recordID int64) error { func (do *Domain) runawayRecordFlushLoop() { defer util.Recover(metrics.LabelDomain, "runawayRecordFlushLoop", nil, false) - // this times is used to batch flushing rocords, with 1s duration, + // this times is used to batch flushing records, with 1s duration, // we can guarantee a watch record can be seen by the user within 1s. runawayRecordFluashTimer := time.NewTimer(runawayRecordFlushInterval) runawayRecordGCTicker := time.NewTicker(runawayRecordGCInterval) diff --git a/pkg/domain/schema_validator.go b/pkg/domain/schema_validator.go index c018085d0141f..5a5b1e3a22618 100644 --- a/pkg/domain/schema_validator.go +++ b/pkg/domain/schema_validator.go @@ -112,7 +112,7 @@ func (s *schemaValidator) Restart() { defer s.mux.Unlock() s.isStarted = true if s.do != nil { - // When this instance reconnects PD, we should record the latest schema verion after mustReload(), + // When this instance reconnects PD, we should record the latest schema version after mustReload(), // to prevent write txns using a stale schema version by aborting them before commit. // However, the problem still exists for read-only txns. s.restartSchemaVer = s.do.InfoSchema().SchemaMetaVersion() diff --git a/pkg/executor/admin.go b/pkg/executor/admin.go index d3af1daa48444..09695922cd98c 100644 --- a/pkg/executor/admin.go +++ b/pkg/executor/admin.go @@ -285,7 +285,7 @@ func (e *RecoverIndexExec) buildTableScan(ctx context.Context, txn kv.Transactio return nil, err } - // Actually, with limitCnt, the match datas maybe only in one region, so let the concurrency to be 1, + // Actually, with limitCnt, the match data maybe only in one region, so let the concurrency to be 1, // avoid unnecessary region scan. kvReq.Concurrency = 1 result, err := distsql.Select(ctx, e.Ctx().GetDistSQLCtx(), kvReq, e.columnsTypes()) diff --git a/pkg/executor/builder.go b/pkg/executor/builder.go index 6f80aefaaf3f1..763c01b8fecb2 100644 --- a/pkg/executor/builder.go +++ b/pkg/executor/builder.go @@ -3682,7 +3682,7 @@ func buildTableReq(b *executorBuilder, schemaLen int, plans []base.PhysicalPlan) // buildIndexReq is designed to create a DAG for index request. // If len(ByItems) != 0 means index request should return related columns -// to sort result rows in TiDB side for parition tables. +// to sort result rows in TiDB side for partition tables. func buildIndexReq(ctx sessionctx.Context, columns []*model.IndexColumn, handleLen int, plans []base.PhysicalPlan) (dagReq *tipb.DAGRequest, err error) { indexReq, err := builder.ConstructDAGReq(ctx, plans, kv.TiKV) if err != nil { @@ -4574,7 +4574,7 @@ func buildRangesForIndexJoin(ctx sessionctx.Context, lookUpContents []*join.Inde } } if cwc == nil { - // A deep copy is need here because the old []*range.Range is overwriten + // A deep copy is need here because the old []*range.Range is overwritten for _, ran := range ranges { retRanges = append(retRanges, ran.Clone()) } diff --git a/pkg/executor/distsql.go b/pkg/executor/distsql.go index 57750abe30390..bb7911f779ea9 100644 --- a/pkg/executor/distsql.go +++ b/pkg/executor/distsql.go @@ -650,7 +650,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< kvRanges = e.partitionKVRanges } // When len(kvrange) = 1, no sorting is required, - // so remove byItems and non-necessary output colums + // so remove byItems and non-necessary output columns if len(kvRanges) == 1 { e.dagPB.OutputOffsets = e.dagPB.OutputOffsets[len(e.byItems):] e.byItems = nil @@ -738,7 +738,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< return nil } -// startTableWorker launchs some background goroutines which pick tasks from workCh and execute the task. +// startTableWorker launches some background goroutines which pick tasks from workCh and execute the task. func (e *IndexLookUpExecutor) startTableWorker(ctx context.Context, workCh <-chan *lookupTableTask) { lookupConcurrencyLimit := e.Ctx().GetSessionVars().IndexLookupConcurrency() e.tblWorkerWg.Add(lookupConcurrencyLimit) diff --git a/pkg/executor/infoschema_reader.go b/pkg/executor/infoschema_reader.go index b3ddabc37fa98..f648cf46500fc 100644 --- a/pkg/executor/infoschema_reader.go +++ b/pkg/executor/infoschema_reader.go @@ -535,7 +535,7 @@ func (e *memtableRetriever) setDataFromReferConst(sctx sessionctx.Context, schem func (e *memtableRetriever) updateStatsCacheIfNeed() bool { for _, col := range e.columns { - // only the following columns need stats cahce. + // only the following columns need stats cache. if col.Name.O == "AVG_ROW_LENGTH" || col.Name.O == "DATA_LENGTH" || col.Name.O == "INDEX_LENGTH" || col.Name.O == "TABLE_ROWS" { return true } diff --git a/pkg/executor/load_data.go b/pkg/executor/load_data.go index 45b96d092052a..a299a10d70cd0 100644 --- a/pkg/executor/load_data.go +++ b/pkg/executor/load_data.go @@ -356,7 +356,7 @@ type commitTask struct { rows [][]types.Datum } -// processStream always trys to build a parser from channel and process it. When +// processStream always tries to build a parser from channel and process it. When // it returns nil, it means all data is read. func (w *encodeWorker) processStream( ctx context.Context, diff --git a/pkg/executor/slow_query.go b/pkg/executor/slow_query.go index fb1e3373342e1..2934825240996 100644 --- a/pkg/executor/slow_query.go +++ b/pkg/executor/slow_query.go @@ -928,7 +928,7 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co } // If we want to get the end time from a compressed file, - // we need uncompress the whole file which is very slow and consume a lot of memeory. + // we need uncompress the whole file which is very slow and consume a lot of memory. if !compressed { // Get the file end time. fileEndTime, err := e.getFileEndTime(ctx, file) diff --git a/pkg/expression/builtin_compare.go b/pkg/expression/builtin_compare.go index 54b39f099c7c1..d20797b8cdaf8 100644 --- a/pkg/expression/builtin_compare.go +++ b/pkg/expression/builtin_compare.go @@ -368,7 +368,7 @@ func ResolveType4Between(args [3]Expression) types.EvalType { return cmpTp } -// GLCmpStringMode represents Greatest/Least interal string comparison mode +// GLCmpStringMode represents Greatest/Least integral string comparison mode type GLCmpStringMode uint8 const ( @@ -448,7 +448,7 @@ func (c *greatestFunctionClass) getFunction(ctx BuildContext, args []Expression) resTp := resFieldType.EvalType() argTp := resTp if cmpStringMode != GLCmpStringDirectly { - // Args are temporal and string mixed, we cast all args as string and parse it to temporal mannualy to compare. + // Args are temporal and string mixed, we cast all args as string and parse it to temporal manually to compare. argTp = types.ETString } else if resTp == types.ETJson { unsupportedJSONComparison(ctx, args) @@ -761,7 +761,7 @@ func (c *leastFunctionClass) getFunction(ctx BuildContext, args []Expression) (s resTp := resFieldType.EvalType() argTp := resTp if cmpStringMode != GLCmpStringDirectly { - // Args are temporal and string mixed, we cast all args as string and parse it to temporal mannualy to compare. + // Args are temporal and string mixed, we cast all args as string and parse it to temporal manually to compare. argTp = types.ETString } else if resTp == types.ETJson { unsupportedJSONComparison(ctx, args) @@ -1373,7 +1373,7 @@ func isTemporalColumn(expr Expression) bool { // tryToConvertConstantInt tries to convert a constant with other type to a int constant. // isExceptional indicates whether the 'int column [cmp] const' might be true/false. -// If isExceptional is true, ExecptionalVal is returned. Or, CorrectVal is returned. +// If isExceptional is true, ExceptionalVal is returned. Or, CorrectVal is returned. // CorrectVal: The computed result. If the constant can be converted to int without exception, return the val. Else return 'con'(the input). // ExceptionalVal : It is used to get more information to check whether 'int column [cmp] const' is true/false // @@ -1412,7 +1412,7 @@ func tryToConvertConstantInt(ctx BuildContext, targetFieldType *types.FieldType, // RefineComparedConstant changes a non-integer constant argument to its ceiling or floor result by the given op. // isExceptional indicates whether the 'int column [cmp] const' might be true/false. -// If isExceptional is true, ExecptionalVal is returned. Or, CorrectVal is returned. +// If isExceptional is true, ExceptionalVal is returned. Or, CorrectVal is returned. // CorrectVal: The computed result. If the constant can be converted to int without exception, return the val. Else return 'con'(the input). // ExceptionalVal : It is used to get more information to check whether 'int column [cmp] const' is true/false // diff --git a/pkg/expression/builtin_encryption_vec.go b/pkg/expression/builtin_encryption_vec.go index cb23047dd2c92..29b19fe596f37 100644 --- a/pkg/expression/builtin_encryption_vec.go +++ b/pkg/expression/builtin_encryption_vec.go @@ -694,7 +694,7 @@ func (b *builtinAesEncryptSig) vecEvalString(ctx EvalContext, input *chunk.Chunk } // NOTE: we can't use GetBytes, because in AESEncryptWithECB padding is automatically - // added to str and this will damange the data layout in chunk.Column + // added to str and this will damage the data layout in chunk.Column str := []byte(strBuf.GetString(i)) cipherText, err := encrypt.AESEncryptWithECB(str, key) if err != nil { diff --git a/pkg/expression/builtin_miscellaneous.go b/pkg/expression/builtin_miscellaneous.go index 64960909ff590..6952a13285936 100644 --- a/pkg/expression/builtin_miscellaneous.go +++ b/pkg/expression/builtin_miscellaneous.go @@ -624,7 +624,7 @@ func (b *builtinInetNtoaSig) evalString(ctx EvalContext, row chunk.Row) (string, binary.BigEndian.PutUint32(ip, uint32(val)) ipv4 := ip.To4() if ipv4 == nil { - // Not a vaild ipv4 address. + // Not a valid ipv4 address. return "", true, nil } diff --git a/pkg/expression/builtin_miscellaneous_vec.go b/pkg/expression/builtin_miscellaneous_vec.go index 6efde23250a29..12f27d632f19c 100644 --- a/pkg/expression/builtin_miscellaneous_vec.go +++ b/pkg/expression/builtin_miscellaneous_vec.go @@ -52,7 +52,7 @@ func (b *builtinInetNtoaSig) vecEvalString(ctx EvalContext, input *chunk.Chunk, binary.BigEndian.PutUint32(ip, uint32(val)) ipv4 := ip.To4() if ipv4 == nil { - // Not a vaild ipv4 address. + // Not a valid ipv4 address. result.AppendNull() continue } diff --git a/pkg/expression/builtin_string.go b/pkg/expression/builtin_string.go index d001d050d1785..fd9c10fd110e2 100644 --- a/pkg/expression/builtin_string.go +++ b/pkg/expression/builtin_string.go @@ -1751,7 +1751,7 @@ type trimFunctionClass struct { // getFunction sets trim built-in function signature. // The syntax of trim in mysql is 'TRIM([{BOTH | LEADING | TRAILING} [remstr] FROM] str), TRIM([remstr FROM] str)', -// but we wil convert it into trim(str), trim(str, remstr) and trim(str, remstr, direction) in AST. +// but we will convert it into trim(str), trim(str, remstr) and trim(str, remstr, direction) in AST. func (c *trimFunctionClass) getFunction(ctx BuildContext, args []Expression) (builtinFunc, error) { if err := c.verifyArgs(args); err != nil { return nil, err diff --git a/pkg/expression/builtin_time.go b/pkg/expression/builtin_time.go index 7b1d0d6527e3f..79f812b01afe3 100644 --- a/pkg/expression/builtin_time.go +++ b/pkg/expression/builtin_time.go @@ -63,10 +63,10 @@ const ( // GET_FORMAT location. ) var ( - // durationPattern checks whether a string matchs the format of duration. + // durationPattern checks whether a string matches the format of duration. durationPattern = regexp.MustCompile(`^\s*[-]?(((\d{1,2}\s+)?0*\d{0,3}(:0*\d{1,2}){0,2})|(\d{1,7}))?(\.\d*)?\s*$`) - // timestampPattern checks whether a string matchs the format of timestamp. + // timestampPattern checks whether a string matches the format of timestamp. timestampPattern = regexp.MustCompile(`^\s*0*\d{1,4}([^\d]0*\d{1,2}){2}\s+(0*\d{0,2}([^\d]0*\d{1,2}){2})?(\.\d*)?\s*$`) // datePattern determine whether to match the format of date. diff --git a/pkg/expression/builtin_time_test.go b/pkg/expression/builtin_time_test.go index f251301e3504a..088bd826b1785 100644 --- a/pkg/expression/builtin_time_test.go +++ b/pkg/expression/builtin_time_test.go @@ -845,7 +845,7 @@ func TestNowAndUTCTimestamp(t *testing.T) { ts := x.now() require.NoError(t, err) mt := v.GetMysqlTime() - // we canot use a constant value to check timestamp funcs, so here + // we cannot use a constant value to check timestamp funcs, so here // just to check the fractional seconds part and the time delta. require.False(t, strings.Contains(mt.String(), ".")) require.LessOrEqual(t, ts.Sub(gotime(mt, ts.Location())), 5*time.Second) diff --git a/pkg/expression/column.go b/pkg/expression/column.go index 70abaaba8cd14..b0ea394b4ff61 100644 --- a/pkg/expression/column.go +++ b/pkg/expression/column.go @@ -276,7 +276,7 @@ func (col *Column) EqualColumn(expr Expression) bool { return false } -// EqualByExprAndID extends Equal by comparing virual expression +// EqualByExprAndID extends Equal by comparing virtual expression func (col *Column) EqualByExprAndID(ctx EvalContext, expr Expression) bool { if newCol, ok := expr.(*Column); ok { expr, isOk := col.VirtualExpr.(*ScalarFunction) diff --git a/pkg/expression/expression.go b/pkg/expression/expression.go index d6b6ba9a6da6a..eeba4d6ca88dc 100644 --- a/pkg/expression/expression.go +++ b/pkg/expression/expression.go @@ -219,7 +219,7 @@ type Expression interface { // resolveIndices is called inside the `ResolveIndices` It will perform on the expression itself. resolveIndices(schema *Schema) error - // ResolveIndicesByVirtualExpr resolves indices by the given schema in terms of virual expression. It will copy the original expression and return the copied one. + // ResolveIndicesByVirtualExpr resolves indices by the given schema in terms of virtual expression. It will copy the original expression and return the copied one. ResolveIndicesByVirtualExpr(ctx EvalContext, schema *Schema) (Expression, bool) // resolveIndicesByVirtualExpr is called inside the `ResolveIndicesByVirtualExpr` It will perform on the expression itself. diff --git a/pkg/expression/util.go b/pkg/expression/util.go index 129ef2418f74a..28a5e6cd53274 100644 --- a/pkg/expression/util.go +++ b/pkg/expression/util.go @@ -997,7 +997,7 @@ func Contains(exprs []Expression, e Expression) bool { // ExtractFiltersFromDNFs checks whether the cond is DNF. If so, it will get the extracted part and the remained part. // The original DNF will be replaced by the remained part or just be deleted if remained part is nil. -// And the extracted part will be appended to the end of the orignal slice. +// And the extracted part will be appended to the end of the original slice. func ExtractFiltersFromDNFs(ctx BuildContext, conditions []Expression) []Expression { var allExtracted []Expression for i := len(conditions) - 1; i >= 0; i-- { @@ -1249,7 +1249,7 @@ func GetStringFromConstant(ctx EvalContext, value Expression) (string, bool, err return str, false, nil } -// GetIntFromConstant gets an interger value from the Constant expression. +// GetIntFromConstant gets an integer value from the Constant expression. func GetIntFromConstant(ctx EvalContext, value Expression) (int, bool, error) { str, isNull, err := GetStringFromConstant(ctx, value) if err != nil || isNull { @@ -1494,7 +1494,7 @@ func RemoveMutableConst(ctx BuildContext, exprs []Expression) (err error) { case *Constant: v.ParamMarker = nil if v.DeferredExpr != nil { // evaluate and update v.Value to convert v to a complete immutable constant. - // TODO: remove or hide DefferedExpr since it's too dangerous (hard to be consistent with v.Value all the time). + // TODO: remove or hide DeferredExpr since it's too dangerous (hard to be consistent with v.Value all the time). v.Value, err = v.DeferredExpr.Eval(ctx.GetEvalCtx(), chunk.Row{}) if err != nil { return err diff --git a/pkg/session/bootstrap_test.go b/pkg/session/bootstrap_test.go index 44ec357e49658..ed938f0a7d227 100644 --- a/pkg/session/bootstrap_test.go +++ b/pkg/session/bootstrap_test.go @@ -781,7 +781,7 @@ func TestIndexMergeUpgradeFrom300To540(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(ver300), ver) - // We are now in 3.0.0, check tidb_enable_index_merge shoudle not exist. + // We are now in 3.0.0, check tidb_enable_index_merge should not exist. res := MustExecToRecodeSet(t, seV3, fmt.Sprintf("select * from mysql.GLOBAL_VARIABLES where variable_name='%s'", variable.TiDBEnableIndexMerge)) chk := res.NewChunk(nil) err = res.Next(ctx, chk) diff --git a/pkg/session/bootstraptest/bootstrap_upgrade_test.go b/pkg/session/bootstraptest/bootstrap_upgrade_test.go index 0a57808f69cc1..8ae3019a093ac 100644 --- a/pkg/session/bootstraptest/bootstrap_upgrade_test.go +++ b/pkg/session/bootstraptest/bootstrap_upgrade_test.go @@ -474,7 +474,7 @@ func checkDDLJobExecSucc(t *testing.T, se sessiontypes.Session, jobID int64) { // TestUpgradeVersionForSystemPausedJob tests mock the first upgrade failed, and it has a mock system DDL in queue. // Then we do re-upgrade(This operation will pause all DDL jobs by the system). func TestUpgradeVersionForSystemPausedJob(t *testing.T) { - // Mock a general and a reorg job in boostrap. + // Mock a general and a reorg job in bootstrap. mock := true session.WithMockUpgrade = &mock session.MockUpgradeToVerLatestKind = session.MockSimpleUpgradeToVerLatest diff --git a/pkg/session/session.go b/pkg/session/session.go index de666390dd2d2..ccfc9fb278017 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -747,7 +747,7 @@ func (s *session) commitTxnWithTemporaryData(ctx context.Context, txn kv.Transac return nil } -// errIsNoisy is used to filter DUPLCATE KEY errors. +// errIsNoisy is used to filter DUPLICATE KEY errors. // These can observed by users in INFORMATION_SCHEMA.CLIENT_ERRORS_SUMMARY_GLOBAL instead. // // The rationale for filtering these errors is because they are "client generated errors". i.e. @@ -2340,7 +2340,7 @@ const ExecStmtVarKey ExecStmtVarKeyType = 0 // execStmtResult is the return value of ExecuteStmt and it implements the sqlexec.RecordSet interface. // Why we need a struct to wrap a RecordSet and provide another RecordSet? // This is because there are so many session state related things that definitely not belongs to the original -// RecordSet, so this struct exists and RecordSet.Close() is overrided handle that. +// RecordSet, so this struct exists and RecordSet.Close() is overridden to handle that. type execStmtResult struct { sqlexec.RecordSet se *session diff --git a/pkg/session/test/session_test.go b/pkg/session/test/session_test.go index ca7d262260b80..db5134cc9fcce 100644 --- a/pkg/session/test/session_test.go +++ b/pkg/session/test/session_test.go @@ -287,7 +287,7 @@ func TestParseWithParams(t *testing.T) { se := tk.Session() exec := se.GetRestrictedSQLExecutor() - // test compatibility with ExcuteInternal + // test compatibility with ExecuteInternal _, err := exec.ParseWithParams(context.TODO(), "SELECT 4") require.NoError(t, err) diff --git a/pkg/session/test/txn/txn_test.go b/pkg/session/test/txn/txn_test.go index 42732cc26dba3..44844ccbcdcc5 100644 --- a/pkg/session/test/txn/txn_test.go +++ b/pkg/session/test/txn/txn_test.go @@ -89,7 +89,7 @@ func TestAutocommit(t *testing.T) { tk.MustQuery("select count(*) from t where id = 1").Check(testkit.Rows("0")) tk.MustQuery("select @@global.autocommit").Check(testkit.Rows("1")) - // When the transaction is committed because of switching mode, the session set statement shold succeed. + // When the transaction is committed because of switching mode, the session set statement should succeed. tk.MustExec("set autocommit = 0") tk.MustExec("begin") tk.MustExec("insert into t values (1)") @@ -341,7 +341,7 @@ func TestTxnRetryErrMsg(t *testing.T) { } func TestSetTxnScope(t *testing.T) { - // Check the default value of @@tidb_enable_local_txn and @@txn_scope whitout configuring the zone label. + // Check the default value of @@tidb_enable_local_txn and @@txn_scope without configuring the zone label. store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") diff --git a/pkg/sessionctx/binloginfo/binloginfo.go b/pkg/sessionctx/binloginfo/binloginfo.go index 433384fce36f0..3a427bdfc396b 100644 --- a/pkg/sessionctx/binloginfo/binloginfo.go +++ b/pkg/sessionctx/binloginfo/binloginfo.go @@ -171,7 +171,7 @@ func WaitBinlogRecover(timeout time.Duration) error { } } -// SkippedCommitterCount returns the number of alive committers whick skipped the binlog writing. +// SkippedCommitterCount returns the number of alive committers which skipped the binlog writing. func SkippedCommitterCount() int32 { return atomic.LoadInt32(&skippedCommitterCounter) } diff --git a/pkg/sessionctx/variable/session.go b/pkg/sessionctx/variable/session.go index a9a6377e33e48..b0e2e8e0f967a 100644 --- a/pkg/sessionctx/variable/session.go +++ b/pkg/sessionctx/variable/session.go @@ -1204,7 +1204,7 @@ type SessionVars struct { // PresumeKeyNotExists indicates lazy existence checking is enabled. PresumeKeyNotExists bool - // EnableParallelApply indicates that thether to use parallel apply. + // EnableParallelApply indicates that whether to use parallel apply. EnableParallelApply bool // EnableRedactLog indicates that whether redact log. Possible values are 'OFF', 'ON', 'MARKER'. @@ -1291,7 +1291,7 @@ type SessionVars struct { // ReadStaleness indicates the staleness duration for the following query ReadStaleness time.Duration - // cachedStmtCtx is used to optimze the object allocation. + // cachedStmtCtx is used to optimize the object allocation. cachedStmtCtx [2]stmtctx.StatementContext // Rng stores the rand_seed1 and rand_seed2 for Rand() function @@ -1486,7 +1486,7 @@ type SessionVars struct { shardRand *rand.Rand // Resource group name - // NOTE: all statement relate opeartion should use StmtCtx.ResourceGroupName instead. + // NOTE: all statement relate operation should use StmtCtx.ResourceGroupName instead. ResourceGroupName string // PessimisticTransactionFairLocking controls whether fair locking for pessimistic transaction @@ -1627,7 +1627,7 @@ func (s *SessionVars) IsPlanReplayerCaptureEnabled() bool { return s.EnablePlanReplayerCapture || s.EnablePlanReplayedContinuesCapture } -// GetChunkAllocator returns a vaid chunk allocator. +// GetChunkAllocator returns a valid chunk allocator. func (s *SessionVars) GetChunkAllocator() chunk.Allocator { if s.chunkPool == nil { return chunk.NewEmptyAllocator() @@ -3096,7 +3096,7 @@ const ( SlowLogCopBackoffPrefix = "Cop_backoff_" // SlowLogMemMax is the max number bytes of memory used in this statement. SlowLogMemMax = "Mem_max" - // SlowLogDiskMax is the nax number bytes of disk used in this statement. + // SlowLogDiskMax is the max number bytes of disk used in this statement. SlowLogDiskMax = "Disk_max" // SlowLogPrepared is used to indicate whether this sql execute in prepare. SlowLogPrepared = "Prepared" @@ -3681,7 +3681,7 @@ func (rfType RuntimeFilterType) String() string { // RuntimeFilterTypeStringToType convert RuntimeFilterTypeNameString to RuntimeFilterType // If name is legal, it will return Runtime Filter Type and true // Else, it will return -1 and false -// The second param means the convert is ok or not. Ture is ok, false means it is illegal name +// The second param means the convert is ok or not. True is ok, false means it is illegal name // At present, we only support two names: "IN" and "MIN_MAX" func RuntimeFilterTypeStringToType(name string) (RuntimeFilterType, bool) { switch name { @@ -3696,7 +3696,7 @@ func RuntimeFilterTypeStringToType(name string) (RuntimeFilterType, bool) { // ToRuntimeFilterType convert session var value to RuntimeFilterType list // If sessionVarValue is legal, it will return RuntimeFilterType list and true -// The second param means the convert is ok or not. Ture is ok, false means it is illegal value +// The second param means the convert is ok or not. True is ok, false means it is illegal value // The legal value should be comma-separated, eg: "IN,MIN_MAX" func ToRuntimeFilterType(sessionVarValue string) ([]RuntimeFilterType, bool) { typeNameList := strings.Split(sessionVarValue, ",") @@ -3744,7 +3744,7 @@ func (rfMode RuntimeFilterMode) String() string { // RuntimeFilterModeStringToMode convert RuntimeFilterModeString to RuntimeFilterMode // If name is legal, it will return Runtime Filter Mode and true // Else, it will return -1 and false -// The second param means the convert is ok or not. Ture is ok, false means it is illegal name +// The second param means the convert is ok or not. True is ok, false means it is illegal name // At present, we only support one name: "OFF", "LOCAL" func RuntimeFilterModeStringToMode(name string) (RuntimeFilterMode, bool) { switch name { diff --git a/pkg/sessionctx/variable/session_test.go b/pkg/sessionctx/variable/session_test.go index 610b86f253346..843af33e70c8a 100644 --- a/pkg/sessionctx/variable/session_test.go +++ b/pkg/sessionctx/variable/session_test.go @@ -151,7 +151,7 @@ func TestSlowLogFormat(t *testing.T) { seVar.ConnectionInfo = &variable.ConnectionInfo{ClientIP: "192.168.0.1"} seVar.ConnectionID = 1 seVar.SessionAlias = "aliasabc" - // the out put of the loged CurrentDB should be 'test', should be to lower cased. + // the output of the logged CurrentDB should be 'test', should be to lower cased. seVar.CurrentDB = "TeST" seVar.InRestrictedSQL = true seVar.StmtCtx.WaitLockLeaseTime = 1 diff --git a/pkg/sessionctx/variable/variable.go b/pkg/sessionctx/variable/variable.go index a05dcd792fe4c..5eb94f379a0f4 100644 --- a/pkg/sessionctx/variable/variable.go +++ b/pkg/sessionctx/variable/variable.go @@ -145,7 +145,7 @@ type SysVar struct { // IsHintUpdatableVerified indicate whether we've confirmed that SET_VAR() hint is worked for this hint. IsHintUpdatableVerified bool // Deprecated: Hidden previously meant that the variable still responds to SET but doesn't show up in SHOW VARIABLES - // However, this feature is no longer used. All variables are visble. + // However, this feature is no longer used. All variables are visible. Hidden bool // Aliases is a list of sysvars that should also be updated when this sysvar is updated. // Updating aliases calls the SET function of the aliases, but does not update their aliases (preventing SET recursion) diff --git a/pkg/store/copr/batch_coprocessor.go b/pkg/store/copr/batch_coprocessor.go index 752be5f16d9e4..34a051a01b5b8 100644 --- a/pkg/store/copr/batch_coprocessor.go +++ b/pkg/store/copr/batch_coprocessor.go @@ -162,7 +162,7 @@ func selectRegion(storeID uint64, candidateRegionInfos []RegionInfo, selected [] selected[idx] = true regionInfos = append(regionInfos, candidateRegionInfos[idx]) } - // Remove regions that has beed selected. + // Remove regions that has been selected. storeID2RegionIndex[storeID] = regionIndexes[i:] return regionInfos } @@ -207,7 +207,7 @@ func checkBatchCopTaskBalance(storeTasks map[uint64]*batchCopTask, balanceContin // In fact, not absolutely continuous is required, regions' range are closed to store in a TiFlash segment is enough for internal read optimization. // // First, sort candidateRegionInfos by their key ranges. -// Second, build a storeID2RegionIndex data structure to fastly locate regions of a store (avoid scanning candidateRegionInfos repeatly). +// Second, build a storeID2RegionIndex data structure to fastly locate regions of a store (avoid scanning candidateRegionInfos repeatedly). // Third, each store will take balanceContinuousRegionCount from the sorted candidateRegionInfos. These regions are stored very close to each other in TiFlash. // Fourth, if the region count is not balance between TiFlash, it may fallback to the original balance logic. func balanceBatchCopTaskWithContinuity(storeTaskMap map[uint64]*batchCopTask, candidateRegionInfos []RegionInfo, balanceContinuousRegionCount int64) ([]*batchCopTask, int) { diff --git a/pkg/store/copr/coprocessor.go b/pkg/store/copr/coprocessor.go index 14b5b1fea4ac0..2c92300843363 100644 --- a/pkg/store/copr/coprocessor.go +++ b/pkg/store/copr/coprocessor.go @@ -356,7 +356,7 @@ func buildCopTasks(bo *Backoffer, ranges *KeyRanges, opt *buildCopTaskOpt) ([]*c } }) - // TODO(youjiali1995): is there any request type that needn't be splitted by buckets? + // TODO(youjiali1995): is there any request type that needn't be split by buckets? locs, err := cache.SplitKeyRangesByBuckets(bo, ranges) if err != nil { return nil, errors.Trace(err) @@ -1530,7 +1530,7 @@ func (worker *copIteratorWorker) handleBatchCopResponse(bo *Backoffer, rpcCtx *t }() appendRemainTasks := func(tasks ...*copTask) { if remainTasks == nil { - // allocate size fo remain length + // allocate size of remain length remainTasks = make([]*copTask, 0, len(tasks)) } remainTasks = append(remainTasks, tasks...) diff --git a/pkg/store/copr/coprocessor_test.go b/pkg/store/copr/coprocessor_test.go index a3fef83fd785b..d66437a27633d 100644 --- a/pkg/store/copr/coprocessor_test.go +++ b/pkg/store/copr/coprocessor_test.go @@ -226,7 +226,7 @@ func TestBuildTasksByBuckets(t *testing.T) { } } - // serveral ranges per bucket + // several ranges per bucket // region: nil---------------------------n-----------x-----------nil // buckets: nil-----c-------g-------k-----n----t------x-----------nil // ranges: nil-a b-c d-e f-g h-i j-k-l m-n diff --git a/pkg/store/copr/region_cache.go b/pkg/store/copr/region_cache.go index eb64e29dde464..4b0ceff622df9 100644 --- a/pkg/store/copr/region_cache.go +++ b/pkg/store/copr/region_cache.go @@ -60,7 +60,7 @@ func (c *RegionCache) SplitRegionRanges(bo *Backoffer, keyRanges []kv.KeyRange, return ret, nil } -// LocationKeyRanges wrapps a real Location in PD and its logical ranges info. +// LocationKeyRanges wraps a real Location in PD and its logical ranges info. type LocationKeyRanges struct { // Location is the real location in PD. Location *tikv.KeyLocation diff --git a/pkg/store/driver/error/error.go b/pkg/store/driver/error/error.go index 763d4a1b88517..e2bd2c18cfd24 100644 --- a/pkg/store/driver/error/error.go +++ b/pkg/store/driver/error/error.go @@ -43,7 +43,7 @@ var ( ErrQueryInterrupted = dbterror.ClassTiKV.NewStd(errno.ErrQueryInterrupted) // ErrTiKVMaxTimestampNotSynced is the error that tikv's max timestamp is not synced. ErrTiKVMaxTimestampNotSynced = dbterror.ClassTiKV.NewStd(errno.ErrTiKVMaxTimestampNotSynced) - // ErrLockAcquireFailAndNoWaitSet is the error that acquire the lock failed while no wait is setted. + // ErrLockAcquireFailAndNoWaitSet is the error that acquire the lock failed while no wait is set. ErrLockAcquireFailAndNoWaitSet = dbterror.ClassTiKV.NewStd(errno.ErrLockAcquireFailAndNoWaitSet) ErrResolveLockTimeout = dbterror.ClassTiKV.NewStd(errno.ErrResolveLockTimeout) // ErrLockWaitTimeout is the error that wait for the lock is timeout. diff --git a/pkg/store/gcworker/gc_worker_test.go b/pkg/store/gcworker/gc_worker_test.go index 70ba2b0f4e428..7961ed695700a 100644 --- a/pkg/store/gcworker/gc_worker_test.go +++ b/pkg/store/gcworker/gc_worker_test.go @@ -699,7 +699,7 @@ func TestDeleteRangesFailure(t *testing.T) { sendReqCh := make(chan SentReq, 20) - // The request sent to the specified key and store wil fail. + // The request sent to the specified key and store will fail. var ( failKey []byte failStore *metapb.Store diff --git a/pkg/store/helper/helper.go b/pkg/store/helper/helper.go index 417218ab87ca2..30eed39953e45 100644 --- a/pkg/store/helper/helper.go +++ b/pkg/store/helper/helper.go @@ -640,7 +640,7 @@ func isBehindKeyRange(x withKeyRange, _, endKey string) bool { return endKey != "" && x.GetStartKey() >= endKey } -// TableInfoWithKeyRange stores table or index informations with its key range. +// TableInfoWithKeyRange stores table or index information with its key range. type TableInfoWithKeyRange struct { *TableInfo StartKey string diff --git a/pkg/store/mockstore/unistore/config/config-template.toml b/pkg/store/mockstore/unistore/config/config-template.toml index 369cf1c63e55b..f760b611c0840 100644 --- a/pkg/store/mockstore/unistore/config/config-template.toml +++ b/pkg/store/mockstore/unistore/config/config-template.toml @@ -92,5 +92,5 @@ region-split-keys = 960000 # transactions encounter locks, in milliseconds wait-for-lock-timeout = 1000 -# The duration between waking up lock waiter, in miliseconds +# The duration between waking up lock waiter, in milliseconds wake-up-delay-duration = 100 diff --git a/pkg/store/mockstore/unistore/cophandler/mpp.go b/pkg/store/mockstore/unistore/cophandler/mpp.go index 8cb5116063266..4b0e2b9059e4f 100644 --- a/pkg/store/mockstore/unistore/cophandler/mpp.go +++ b/pkg/store/mockstore/unistore/cophandler/mpp.go @@ -680,7 +680,7 @@ type ExchangerTunnel struct { ErrCh chan error } -// RecvChunk recive tipb chunk +// RecvChunk receive tipb chunk func (tunnel *ExchangerTunnel) RecvChunk() (tipbChunk *tipb.Chunk, err error) { tipbChunk = <-tunnel.DataCh select { diff --git a/pkg/store/mockstore/unistore/tikv/deadlock.go b/pkg/store/mockstore/unistore/tikv/deadlock.go index fd4e90e35f9e5..70119f05d2942 100644 --- a/pkg/store/mockstore/unistore/tikv/deadlock.go +++ b/pkg/store/mockstore/unistore/tikv/deadlock.go @@ -194,7 +194,7 @@ func (dt *DetectorClient) handleRemoteTask(requestType deadlockPb.DeadlockReques dt.sendCh <- detectReq } -// CleanUp processes cleaup task on local detector +// CleanUp processes cleanup task on local detector // user interfaces func (dt *DetectorClient) CleanUp(startTs uint64) { dt.handleRemoteTask(deadlockPb.DeadlockRequestType_CleanUp, startTs, 0, 0, diagnosticContext{}) diff --git a/pkg/store/mockstore/unistore/tikv/mvcc_test.go b/pkg/store/mockstore/unistore/tikv/mvcc_test.go index 9cfd00d5a06c7..11303185d5303 100644 --- a/pkg/store/mockstore/unistore/tikv/mvcc_test.go +++ b/pkg/store/mockstore/unistore/tikv/mvcc_test.go @@ -1592,7 +1592,7 @@ func TestAccessCommittedLocks(t *testing.T) { v2 := []byte("v2") MustPrewritePut(k2, k2, v2, 70, store) - // lock for ingore + // lock for ignore k3 := []byte("t3") v3 := []byte("v3") MustPrewritePut(k3, k3, v3, 80, store)