Skip to content

Commit

Permalink
Squashed 'br/' changes from cf621d7c0..3b1308e89
Browse files Browse the repository at this point in the history
3b1308e89 lightning: evaluate all generated columns even if they are virtual (pingcap#1407)
1b0e54c2f lightning: check and restore pd scheduler even if our task failed (pingcap#1336)
073d3596c lightning: fix the bug that calculate unfinished ranges may miss some range (pingcap#1413)
82ff927df backup: refine backup log for tracing backup tasks (pingcap#1335)
e907562e8 lightning: make create table ddl compatible with clustered index (pingcap#1364)
b6cab4b27 lightning: auto configure for parallel import (pingcap#1387)

git-subtree-dir: br
git-subtree-split: 3b1308e89de39325c3f0aa8931aebdf56db663f2
3pointer committed Aug 6, 2021

Verified

This commit was signed with the committer’s verified signature.
1 parent f5053bc commit cc25ce9
Showing 36 changed files with 1,130 additions and 420 deletions.
3 changes: 1 addition & 2 deletions cmd/br/debug.go
Original file line number Diff line number Diff line change
@@ -11,8 +11,6 @@ import (
"path"
"reflect"

"github.com/pingcap/br/pkg/metautil"

"github.com/gogo/protobuf/proto"
"github.com/pingcap/errors"
backuppb "github.com/pingcap/kvproto/pkg/backup"
@@ -24,6 +22,7 @@ import (

berrors "github.com/pingcap/br/pkg/errors"
"github.com/pingcap/br/pkg/logutil"
"github.com/pingcap/br/pkg/metautil"
"github.com/pingcap/br/pkg/mock/mockid"
"github.com/pingcap/br/pkg/restore"
"github.com/pingcap/br/pkg/rtree"
6 changes: 3 additions & 3 deletions go.mod1
Original file line number Diff line number Diff line change
@@ -28,16 +28,16 @@ require (
github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd
github.com/pingcap/kvproto v0.0.0-20210722091755-91a52cd9e8db
github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7
github.com/pingcap/parser v0.0.0-20210707071004-31c87e37af5c
github.com/pingcap/tidb v0.0.0-20210727021217-150fe0c37755
github.com/pingcap/parser v0.0.0-20210805052952-eda5e763a66e
github.com/pingcap/tidb v1.1.0-beta.0.20210805015907-39aae6a455d5
github.com/pingcap/tidb-tools v5.0.3+incompatible
github.com/pingcap/tipb v0.0.0-20210708040514-0f154bb0dc0f
github.com/prometheus/client_golang v1.5.1
github.com/prometheus/client_model v0.2.0
github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210726054004-ce977e34b0dd
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210727120905-55155ad2e543
github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d
github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457
github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0
15 changes: 7 additions & 8 deletions go.sum1
Original file line number Diff line number Diff line change
@@ -457,7 +457,6 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb
github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w=
github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/kvproto v0.0.0-20210527074428-73468940541b/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/kvproto v0.0.0-20210712050333-b66fdbd6bfd5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/kvproto v0.0.0-20210722091755-91a52cd9e8db h1:PSW6P83KZi5WopPBiecU286PWMSl2rvxCBZT94iBX+I=
github.com/pingcap/kvproto v0.0.0-20210722091755-91a52cd9e8db/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
@@ -466,13 +465,14 @@ github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIf
github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7 h1:k2BbABz9+TNpYRwsCCFS8pEEnFVOdbgEjL/kTlLuzZQ=
github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM=
github.com/pingcap/parser v0.0.0-20210525032559-c37778aff307/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw=
github.com/pingcap/parser v0.0.0-20210707071004-31c87e37af5c h1:FPBMwDTtMW25zyLLBrIMUnM7Zl/WtLxR9LSEiaL5hII=
github.com/pingcap/parser v0.0.0-20210707071004-31c87e37af5c/go.mod h1:Ek0mLKEqUGnQqBw1JnYrJQxsguU433DU68yUbsoeJ7s=
github.com/pingcap/parser v0.0.0-20210802034743-dd9b189324ce/go.mod h1:Ek0mLKEqUGnQqBw1JnYrJQxsguU433DU68yUbsoeJ7s=
github.com/pingcap/parser v0.0.0-20210805052952-eda5e763a66e h1:EymNdGyPn7q/iR4gsa6QFvRbTsrk4p5vGEtJ3JCWE1Q=
github.com/pingcap/parser v0.0.0-20210805052952-eda5e763a66e/go.mod h1:Ek0mLKEqUGnQqBw1JnYrJQxsguU433DU68yUbsoeJ7s=
github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=
github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 h1:A9KL9R+lWSVPH8IqUuH1QSTRJ5FGoY1bT2IcfPKsWD8=
github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3/go.mod h1:tckvA041UWP+NqYzrJ3fMgC/Hw9wnmQ/tUkp/JaHly8=
github.com/pingcap/tidb v0.0.0-20210727021217-150fe0c37755 h1:1TxBadczCQGoOtmKh0VPRiT60tTH0qFLFaNRJO45KIk=
github.com/pingcap/tidb v0.0.0-20210727021217-150fe0c37755/go.mod h1:HdSBIf/qkvB7PSVEzr3vO++xrTKO5E8WjWMr3nTMLtY=
github.com/pingcap/tidb v1.1.0-beta.0.20210805015907-39aae6a455d5 h1:mADjBffSIHXec5mB+rUS+pfGzA/JRUSnkgYc7v0fSwc=
github.com/pingcap/tidb v1.1.0-beta.0.20210805015907-39aae6a455d5/go.mod h1:8n0VPQrelzJ09VYFg1eyvh7kGn3CmoWomwKXvDxDd2Y=
github.com/pingcap/tidb-dashboard v0.0.0-20210512074702-4ee3e3909d5e/go.mod h1:7HnQAeqKOuJwCBUeglCUel7SjW6fPNnoXawuv+6Q6Ek=
github.com/pingcap/tidb-tools v5.0.3+incompatible h1:vYMrW9ux+3HRMeRZ1fUOjy2nyiodtuVyAyK270EKBEs=
github.com/pingcap/tidb-tools v5.0.3+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
@@ -586,9 +586,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK
github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210723073051-3d747cd203ba/go.mod h1:+bOiuuZZUqIq19EqyhTWQFaB0PeXLOh/il1vnVZx3Tk=
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210726054004-ce977e34b0dd h1:Wi2kmhetf3CmflXquPSNNxTaFbpHg0nM4X36BqEG2y0=
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210726054004-ce977e34b0dd/go.mod h1:+bOiuuZZUqIq19EqyhTWQFaB0PeXLOh/il1vnVZx3Tk=
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210727120905-55155ad2e543 h1:Yi/Xn7NdbxicB/4Ve3myyqtEkqiVaAjWYUDTTETBdFg=
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210727120905-55155ad2e543/go.mod h1:LP8O6zZXAWKU781u1nt/v9nC0hxPipPxOxSoZT9Goqg=
github.com/tikv/pd v1.1.0-beta.0.20210609101029-3ba158cf41a4 h1:QljEYXHc2krYg6f1zol6f7Ut9QBDtK1UVm9UZOT0YFE=
github.com/tikv/pd v1.1.0-beta.0.20210609101029-3ba158cf41a4/go.mod h1:4AiyUYyIG4cA7P+xDU/Mep0yo4Hb+2IfFstiOSKFbJ4=
github.com/tklauser/go-sysconf v0.3.4 h1:HT8SVixZd3IzLdfs/xlpq0jeSfTX57g1v6wB1EuzV7M=
64 changes: 31 additions & 33 deletions pkg/backup/client.go
Original file line number Diff line number Diff line change
@@ -423,10 +423,12 @@ func (bc *Client) BackupRanges(
// we collect all files in a single goroutine to avoid thread safety issues.
workerPool := utils.NewWorkerPool(concurrency, "Ranges")
eg, ectx := errgroup.WithContext(ctx)
for _, r := range ranges {
for id, r := range ranges {
id := id
sk, ek := r.StartKey, r.EndKey
workerPool.ApplyOnErrorGroup(eg, func() error {
err := bc.BackupRange(ectx, sk, ek, req, metaWriter, progressCallBack)
elctx := logutil.ContextWithField(ectx, logutil.RedactAny("range-sn", id))
err := bc.BackupRange(elctx, sk, ek, req, metaWriter, progressCallBack)
if err != nil {
return errors.Trace(err)
}
@@ -448,15 +450,14 @@ func (bc *Client) BackupRange(
start := time.Now()
defer func() {
elapsed := time.Since(start)
log.Info("backup range finished", zap.Duration("take", elapsed))
logutil.CL(ctx).Info("backup range finished", zap.Duration("take", elapsed))
key := "range start:" + hex.EncodeToString(startKey) + " end:" + hex.EncodeToString(endKey)
if err != nil {
summary.CollectFailureUnit(key, err)
}
}()
log.Info("backup started",
logutil.Key("startKey", startKey),
logutil.Key("endKey", endKey),
logutil.CL(ctx).Info("backup started",
logutil.Key("startKey", startKey), logutil.Key("endKey", endKey),
zap.Uint64("rateLimit", req.RateLimit),
zap.Uint32("concurrency", req.Concurrency))

@@ -477,7 +478,7 @@ func (bc *Client) BackupRange(
if err != nil {
return errors.Trace(err)
}
log.Info("finish backup push down", zap.Int("Ok", results.Len()))
logutil.CL(ctx).Info("finish backup push down", zap.Int("small-range-count", results.Len()))

// Find and backup remaining ranges.
// TODO: test fine grained backup.
@@ -492,12 +493,12 @@ func (bc *Client) BackupRange(
progressCallBack(RangeUnit)

if req.IsRawKv {
log.Info("backup raw ranges",
logutil.CL(ctx).Info("raw ranges backed up",
logutil.Key("startKey", startKey),
logutil.Key("endKey", endKey),
zap.String("cf", req.Cf))
} else {
log.Info("backup time range",
logutil.CL(ctx).Info("time range backed up",
zap.Reflect("StartVersion", req.StartVersion),
zap.Reflect("EndVersion", req.EndVersion))
}
@@ -592,7 +593,7 @@ func (bc *Client) fineGrainedBackup(
if len(incomplete) == 0 {
return nil
}
log.Info("start fine grained backup", zap.Int("incomplete", len(incomplete)))
logutil.CL(ctx).Info("start fine grained backup", zap.Int("incomplete", len(incomplete)))
// Step2, retry backup on incomplete range
respCh := make(chan *backuppb.BackupResponse, 4)
errCh := make(chan error, 4)
@@ -649,12 +650,12 @@ func (bc *Client) fineGrainedBackup(
break selectLoop
}
if resp.Error != nil {
log.Panic("unexpected backup error",
logutil.CL(ctx).Panic("unexpected backup error",
zap.Reflect("error", resp.Error))
}
log.Info("put fine grained range",
logutil.Key("startKey", resp.StartKey),
logutil.Key("endKey", resp.EndKey),
logutil.CL(ctx).Info("put fine grained range",
logutil.Key("fine-grained-range-start", resp.StartKey),
logutil.Key("fine-grained-range-end", resp.EndKey),
)
rangeTree.Put(resp.StartKey, resp.EndKey, resp.Files)

@@ -782,11 +783,11 @@ func (bc *Client) handleFineGrained(
if berrors.Is(err, berrors.ErrFailedToConnect) {
// When the leader store is died,
// 20s for the default max duration before the raft election timer fires.
log.Warn("failed to connect to store, skipping", logutil.ShortError(err), zap.Uint64("storeID", storeID))
logutil.CL(ctx).Warn("failed to connect to store, skipping", logutil.ShortError(err), zap.Uint64("storeID", storeID))
return 20000, nil
}

log.Error("fail to connect store", zap.Uint64("StoreID", storeID))
logutil.CL(ctx).Error("fail to connect store", zap.Uint64("StoreID", storeID))
return 0, errors.Annotatef(err, "failed to connect to store %d", storeID)
}
hasProgress := false
@@ -813,17 +814,17 @@ func (bc *Client) handleFineGrained(
return nil
},
func() (backuppb.BackupClient, error) {
log.Warn("reset the connection in handleFineGrained", zap.Uint64("storeID", storeID))
logutil.CL(ctx).Warn("reset the connection in handleFineGrained", zap.Uint64("storeID", storeID))
return bc.mgr.ResetBackupClient(ctx, storeID)
})
if err != nil {
if berrors.Is(err, berrors.ErrFailedToConnect) {
// When the leader store is died,
// 20s for the default max duration before the raft election timer fires.
log.Warn("failed to connect to store, skipping", logutil.ShortError(err), zap.Uint64("storeID", storeID))
logutil.CL(ctx).Warn("failed to connect to store, skipping", logutil.ShortError(err), zap.Uint64("storeID", storeID))
return 20000, nil
}
log.Error("failed to send fine-grained backup", zap.Uint64("storeID", storeID), logutil.ShortError(err))
logutil.CL(ctx).Error("failed to send fine-grained backup", zap.Uint64("storeID", storeID), logutil.ShortError(err))
return 0, errors.Annotatef(err, "failed to send fine-grained backup [%s, %s)",
redact.Key(req.StartKey), redact.Key(req.EndKey))
}
@@ -841,6 +842,7 @@ func (bc *Client) handleFineGrained(
// Stop receiving response if respFn returns error.
func SendBackup(
ctx context.Context,
// the `storeID` seems only used for logging now, maybe we can remove it then?
storeID uint64,
client backuppb.BackupClient,
req backuppb.BackupRequest,
@@ -859,14 +861,11 @@ func SendBackup(
var errReset error
backupLoop:
for retry := 0; retry < backupRetryTimes; retry++ {
log.Info("try backup",
logutil.Key("startKey", req.StartKey),
logutil.Key("endKey", req.EndKey),
zap.Uint64("storeID", storeID),
logutil.CL(ctx).Info("try backup",
zap.Int("retry time", retry),
)
failpoint.Inject("hint-backup-start", func(v failpoint.Value) {
log.Info("failpoint hint-backup-start injected, " +
logutil.CL(ctx).Info("failpoint hint-backup-start injected, " +
"process will notify the shell.")
if sigFile, ok := v.(string); ok {
file, err := os.Create(sigFile)
@@ -882,13 +881,13 @@ backupLoop:
bcli, err := client.Backup(ctx, &req)
failpoint.Inject("reset-retryable-error", func(val failpoint.Value) {
if val.(bool) {
log.Debug("failpoint reset-retryable-error injected.")
logutil.CL(ctx).Debug("failpoint reset-retryable-error injected.")
err = status.Error(codes.Unavailable, "Unavailable error")
}
})
failpoint.Inject("reset-not-retryable-error", func(val failpoint.Value) {
if val.(bool) {
log.Debug("failpoint reset-not-retryable-error injected.")
logutil.CL(ctx).Debug("failpoint reset-not-retryable-error injected.")
err = status.Error(codes.Unknown, "Your server was haunted hence doesn't work, meow :3")
}
})
@@ -902,7 +901,7 @@ backupLoop:
}
continue
}
log.Error("fail to backup", zap.Uint64("StoreID", storeID),
logutil.CL(ctx).Error("fail to backup", zap.Uint64("StoreID", storeID),
zap.Int("retry time", retry))
return berrors.ErrFailedToConnect.Wrap(err).GenWithStack("failed to create backup stream to store %d", storeID)
}
@@ -912,9 +911,8 @@ backupLoop:
resp, err := bcli.Recv()
if err != nil {
if errors.Cause(err) == io.EOF { // nolint:errorlint
log.Info("backup streaming finish",
zap.Uint64("StoreID", storeID),
zap.Int("retry time", retry))
logutil.CL(ctx).Info("backup streaming finish",
zap.Int("retry-time", retry))
break backupLoop
}
if isRetryableError(err) {
@@ -931,9 +929,9 @@ backupLoop:
}

// TODO: handle errors in the resp.
log.Info("range backuped",
logutil.Key("startKey", resp.GetStartKey()),
logutil.Key("endKey", resp.GetEndKey()))
logutil.CL(ctx).Info("range backed up",
logutil.Key("small-range-start-key", resp.GetStartKey()),
logutil.Key("small-range-end-key", resp.GetEndKey()))
err = respFn(resp)
if err != nil {
return errors.Trace(err)
34 changes: 17 additions & 17 deletions pkg/backup/push.go
Original file line number Diff line number Diff line change
@@ -12,7 +12,6 @@ import (
"github.com/pingcap/failpoint"
backuppb "github.com/pingcap/kvproto/pkg/backup"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/log"
"go.uber.org/zap"

berrors "github.com/pingcap/br/pkg/errors"
@@ -67,30 +66,31 @@ func (push *pushDown) pushBackup(
// Push down backup tasks to all tikv instances.
res := rtree.NewRangeTree()
failpoint.Inject("noop-backup", func(_ failpoint.Value) {
log.Warn("skipping normal backup, jump to fine-grained backup, meow :3", logutil.Key("start-key", req.StartKey), logutil.Key("end-key", req.EndKey))
logutil.CL(ctx).Warn("skipping normal backup, jump to fine-grained backup, meow :3", logutil.Key("start-key", req.StartKey), logutil.Key("end-key", req.EndKey))
failpoint.Return(res, nil)
})

wg := new(sync.WaitGroup)
for _, s := range stores {
store := s
storeID := s.GetId()
lctx := logutil.ContextWithField(ctx, zap.Uint64("store-id", storeID))
if s.GetState() != metapb.StoreState_Up {
log.Warn("skip store", zap.Uint64("StoreID", storeID), zap.Stringer("State", s.GetState()))
logutil.CL(lctx).Warn("skip store", zap.Stringer("State", s.GetState()))
continue
}
client, err := push.mgr.GetBackupClient(ctx, storeID)
client, err := push.mgr.GetBackupClient(lctx, storeID)
if err != nil {
// BR should be able to backup even some of stores disconnected.
// The regions managed by this store can be retried at fine-grained backup then.
log.Warn("fail to connect store, skipping", zap.Uint64("StoreID", storeID), zap.Error(err))
logutil.CL(lctx).Warn("fail to connect store, skipping", zap.Error(err))
return res, nil
}
wg.Add(1)
go func() {
defer wg.Done()
err := SendBackup(
ctx, storeID, client, req,
lctx, storeID, client, req,
func(resp *backuppb.BackupResponse) error {
// Forward all responses (including error).
push.respCh <- responseAndStore{
@@ -100,8 +100,8 @@ func (push *pushDown) pushBackup(
return nil
},
func() (backuppb.BackupClient, error) {
log.Warn("reset the connection in push", zap.Uint64("storeID", storeID))
return push.mgr.ResetBackupClient(ctx, storeID)
logutil.CL(lctx).Warn("reset the connection in push")
return push.mgr.ResetBackupClient(lctx, storeID)
})
// Disconnected stores can be ignored.
if err != nil {
@@ -128,14 +128,14 @@ func (push *pushDown) pushBackup(
}
failpoint.Inject("backup-storage-error", func(val failpoint.Value) {
msg := val.(string)
log.Debug("failpoint backup-storage-error injected.", zap.String("msg", msg))
logutil.CL(ctx).Debug("failpoint backup-storage-error injected.", zap.String("msg", msg))
resp.Error = &backuppb.Error{
Msg: msg,
}
})
failpoint.Inject("tikv-rw-error", func(val failpoint.Value) {
msg := val.(string)
log.Debug("failpoint tikv-rw-error injected.", zap.String("msg", msg))
logutil.CL(ctx).Debug("failpoint tikv-rw-error injected.", zap.String("msg", msg))
resp.Error = &backuppb.Error{
Msg: msg,
}
@@ -151,28 +151,28 @@ func (push *pushDown) pushBackup(
errPb := resp.GetError()
switch v := errPb.Detail.(type) {
case *backuppb.Error_KvError:
log.Warn("backup occur kv error", zap.Reflect("error", v))
logutil.CL(ctx).Warn("backup occur kv error", zap.Reflect("error", v))

case *backuppb.Error_RegionError:
log.Warn("backup occur region error", zap.Reflect("error", v))
logutil.CL(ctx).Warn("backup occur region error", zap.Reflect("error", v))

case *backuppb.Error_ClusterIdError:
log.Error("backup occur cluster ID error", zap.Reflect("error", v))
logutil.CL(ctx).Error("backup occur cluster ID error", zap.Reflect("error", v))
return res, errors.Annotatef(berrors.ErrKVClusterIDMismatch, "%v", errPb)
default:
if utils.MessageIsRetryableStorageError(errPb.GetMsg()) {
log.Warn("backup occur storage error", zap.String("error", errPb.GetMsg()))
logutil.CL(ctx).Warn("backup occur storage error", zap.String("error", errPb.GetMsg()))
continue
}
if utils.MessageIsNotFoundStorageError(errPb.GetMsg()) {
errMsg := fmt.Sprintf("File or directory not found error occurs on TiKV Node(store id: %v; Address: %s)", store.GetId(), redact.String(store.GetAddress()))
log.Error("", zap.String("error", berrors.ErrKVStorage.Error()+": "+errMsg),
logutil.CL(ctx).Error("", zap.String("error", berrors.ErrKVStorage.Error()+": "+errMsg),
zap.String("work around", "please ensure br and tikv node share a same disk and the user of br and tikv has same uid."))
}

if utils.MessageIsPermissionDeniedStorageError(errPb.GetMsg()) {
errMsg := fmt.Sprintf("I/O permission denied error occurs on TiKV Node(store id: %v; Address: %s)", store.GetId(), redact.String(store.GetAddress()))
log.Error("", zap.String("error", berrors.ErrKVStorage.Error()+": "+errMsg),
logutil.CL(ctx).Error("", zap.String("error", berrors.ErrKVStorage.Error()+": "+errMsg),
zap.String("work around", "please ensure tikv has permission to read from & write to the storage."))
}
return res, berrors.ErrKVStorage
@@ -182,7 +182,7 @@ func (push *pushDown) pushBackup(
if !berrors.Is(err, berrors.ErrFailedToConnect) {
return res, errors.Annotatef(err, "failed to backup range [%s, %s)", redact.Key(req.StartKey), redact.Key(req.EndKey))
}
log.Warn("skipping disconnected stores", logutil.ShortError(err))
logutil.CL(ctx).Warn("skipping disconnected stores", logutil.ShortError(err))
return res, nil
}
}
Loading

0 comments on commit cc25ce9

Please sign in to comment.