diff --git a/.golangci.yml b/.golangci.yml index bee10879c..1aeced02b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,17 +5,21 @@ run: linters-settings: lll: line-length: 120 + staticcheck: + checks: + - all + - '-SA1019' # it is okay to use math/rand at times linters: disable-all: true enable: - # - errcheck - # - ineffassign - # - gas + - errcheck + - ineffassign + # - gas - gofmt - # - gosimple - # - govet - # - lll - # - unused - # - staticcheck + - gosimple + - govet + - lll + - unused + - staticcheck - goimports diff --git a/backup.go b/backup.go index 9af409027..fcc8a1086 100644 --- a/backup.go +++ b/backup.go @@ -79,8 +79,7 @@ func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) { var valCopy []byte if !item.IsDeletedOrExpired() { // No need to copy value, if item is deleted or expired. - var err error - err = item.Value(func(val []byte) error { + err := item.Value(func(val []byte) error { valCopy = a.Copy(val) return nil }) diff --git a/badger/cmd/flatten.go b/badger/cmd/flatten.go index 66095520e..7fbfc6386 100644 --- a/badger/cmd/flatten.go +++ b/badger/cmd/flatten.go @@ -67,7 +67,7 @@ func flatten(cmd *cobra.Command, args []string) error { if err != nil { return err } - if fo.compressionType < 0 || fo.compressionType > 2 { + if fo.compressionType > 2 { return errors.Errorf( "compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)") } diff --git a/badger/cmd/rotate_test.go b/badger/cmd/rotate_test.go index 54519bbef..b718f0b7a 100644 --- a/badger/cmd/rotate_test.go +++ b/badger/cmd/rotate_test.go @@ -108,9 +108,9 @@ func TestRotatePlainTextToEncrypted(t *testing.T) { db, err := badger.Open(opts) require.NoError(t, err) - db.Update(func(txn *badger.Txn) error { + require.NoError(t, db.Update(func(txn *badger.Txn) error { return txn.Set([]byte("foo"), []byte("bar")) - }) + })) require.NoError(t, db.Close()) @@ -140,7 +140,7 @@ func TestRotatePlainTextToEncrypted(t *testing.T) { db, err = badger.Open(opts) require.NoError(t, err) - db.View(func(txn *badger.Txn) error { + require.NoError(t, db.View(func(txn *badger.Txn) error { iopt := badger.DefaultIteratorOptions it := txn.NewIterator(iopt) defer it.Close() @@ -150,6 +150,6 @@ func TestRotatePlainTextToEncrypted(t *testing.T) { } require.Equal(t, 1, count) return nil - }) + })) require.NoError(t, db.Close()) } diff --git a/badger/cmd/stream.go b/badger/cmd/stream.go index 58e6c4535..a7d64f15f 100644 --- a/badger/cmd/stream.go +++ b/badger/cmd/stream.go @@ -85,7 +85,7 @@ func stream(cmd *cobra.Command, args []string) error { WithEncryptionKey(encKey) // Options for output DB. - if so.compressionType < 0 || so.compressionType > 2 { + if so.compressionType > 2 { return errors.Errorf( "compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)") } @@ -126,6 +126,7 @@ func stream(cmd *cobra.Command, args []string) error { f, err := os.OpenFile(so.outFile, os.O_RDWR|os.O_CREATE, 0666) y.Check(err) _, err = stream.Backup(f, 0) + y.Check(err) } fmt.Println("Done.") return err diff --git a/badger/cmd/write_bench.go b/badger/cmd/write_bench.go index b1865e1b1..d45f82c4e 100644 --- a/badger/cmd/write_bench.go +++ b/badger/cmd/write_bench.go @@ -314,7 +314,7 @@ func writeBench(cmd *cobra.Command, args []string) error { } c.SignalAndWait() - fmt.Printf(db.LevelsToString()) + fmt.Println(db.LevelsToString()) return err } @@ -401,7 +401,7 @@ func reportStats(c *z.Closer, db *badger.DB) { humanize.IBytes(uint64(z.NumAllocBytes()))) if count%10 == 0 { - fmt.Printf(db.LevelsToString()) + fmt.Println(db.LevelsToString()) } } } diff --git a/db.go b/db.go index 3e98be4d0..d4f1e06ae 100644 --- a/db.go +++ b/db.go @@ -49,10 +49,6 @@ var ( bannedNsKey = []byte("!badger!banned") // For storing the banned namespaces. ) -const ( - maxNumSplits = 128 -) - type closers struct { updateSize *z.Closer compactors *z.Closer @@ -1872,7 +1868,10 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches } c := z.NewCloser(1) - s := db.pub.newSubscriber(c, matches) + s, err := db.pub.newSubscriber(c, matches) + if err != nil { + return y.Wrapf(err, "while creating a new subscriber") + } slurp := func(batch *pb.KVList) error { for { select { @@ -1926,11 +1925,6 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches } } -// shouldEncrypt returns bool, which tells whether to encrypt or not. -func (db *DB) shouldEncrypt() bool { - return len(db.opt.EncryptionKey) > 0 -} - func (db *DB) syncDir(dir string) error { if db.opt.InMemory { return nil @@ -1971,7 +1965,7 @@ func (db *DB) StreamDB(outOptions Options) error { defer outDB.Close() writer := outDB.NewStreamWriter() if err := writer.Prepare(); err != nil { - y.Wrapf(err, "cannot create stream writer in out DB at %s", outDir) + return y.Wrapf(err, "cannot create stream writer in out DB at %s", outDir) } // Stream contents of DB to the output DB. diff --git a/db2_test.go b/db2_test.go index 6cb88aa12..a6e184e6b 100644 --- a/db2_test.go +++ b/db2_test.go @@ -883,7 +883,7 @@ func TestMaxVersion(t *testing.T) { rand.Read(k) // Create multiple version of the same key. for i := 1; i <= N; i++ { - wb.SetEntryAt(&Entry{Key: k}, uint64(i)) + require.NoError(t, wb.SetEntryAt(&Entry{Key: k}, uint64(i))) } require.NoError(t, wb.Flush()) @@ -906,7 +906,7 @@ func TestMaxVersion(t *testing.T) { // This will create commits from 1 to N. for i := 1; i <= N; i++ { - wb.SetEntryAt(&Entry{Key: []byte(fmt.Sprintf("%d", i))}, uint64(i)) + require.NoError(t, wb.SetEntryAt(&Entry{Key: []byte(fmt.Sprintf("%d", i))}, uint64(i))) } require.NoError(t, wb.Flush()) @@ -1001,12 +1001,12 @@ func TestKeyCount(t *testing.T) { write := func(kvs *pb.KVList) error { buf := z.NewBuffer(1<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() for _, kv := range kvs.Kv { KVToBuffer(kv, buf) } - writer.Write(buf) + require.NoError(t, writer.Write(buf)) return nil } diff --git a/db_test.go b/db_test.go index 0535adcb2..19d81c5ff 100644 --- a/db_test.go +++ b/db_test.go @@ -610,7 +610,7 @@ func TestGetMore(t *testing.T) { } require.NoError(t, txn.Commit()) } - db.validate() + require.NoError(t, db.validate()) for i := 0; i < n; i++ { if (i % 10000) == 0 { // Display some progress. Right now, it's not very fast with no caching. @@ -643,7 +643,7 @@ func TestExistsMore(t *testing.T) { } require.NoError(t, txn.Commit()) } - db.validate() + require.NoError(t, db.validate()) for i := 0; i < n; i++ { if (i % 1000) == 0 { @@ -673,7 +673,7 @@ func TestExistsMore(t *testing.T) { } require.NoError(t, txn.Commit()) } - db.validate() + require.NoError(t, db.validate()) for i := 0; i < n; i++ { if (i % 10000) == 0 { // Display some progress. Right now, it's not very fast with no caching. @@ -1231,7 +1231,7 @@ func TestDiscardVersionsBelow(t *testing.T) { opts.PrefetchValues = false // Verify that there are 4 versions, and record 3rd version (2nd from top in iteration) - db.View(func(txn *Txn) error { + require.NoError(t, db.View(func(txn *Txn) error { it := txn.NewIterator(opts) defer it.Close() var count int @@ -1245,7 +1245,7 @@ func TestDiscardVersionsBelow(t *testing.T) { } require.Equal(t, 4, count) return nil - }) + })) // Set new version and discard older ones. err := db.Update(func(txn *Txn) error { @@ -1255,7 +1255,7 @@ func TestDiscardVersionsBelow(t *testing.T) { // Verify that there are only 2 versions left, and versions // below ts have been deleted. - db.View(func(txn *Txn) error { + require.NoError(t, db.View(func(txn *Txn) error { it := txn.NewIterator(opts) defer it.Close() var count int @@ -1269,7 +1269,7 @@ func TestDiscardVersionsBelow(t *testing.T) { } require.Equal(t, 1, count) return nil - }) + })) }) } @@ -1478,7 +1478,7 @@ func TestGetSetDeadlock(t *testing.T) { timeout, done := time.After(10*time.Second), make(chan bool) go func() { - db.Update(func(txn *Txn) error { + require.NoError(t, db.Update(func(txn *Txn) error { item, err := txn.Get(key) require.NoError(t, err) err = item.Value(nil) // This take a RLock on file @@ -1488,7 +1488,7 @@ func TestGetSetDeadlock(t *testing.T) { require.NoError(t, txn.SetEntry(NewEntry(key, val))) require.NoError(t, txn.SetEntry(NewEntry([]byte("key2"), val))) return nil - }) + })) done <- true }() @@ -1818,9 +1818,9 @@ func TestMinReadTs(t *testing.T) { db.orc.readMark.Done(uint64(20)) // Because we called readTs. for i := 0; i < 10; i++ { - db.View(func(txn *Txn) error { + require.NoError(t, db.View(func(txn *Txn) error { return nil - }) + })) } time.Sleep(time.Millisecond) require.Equal(t, uint64(20), db.orc.readMark.DoneUntil()) @@ -2089,7 +2089,7 @@ func TestVerifyChecksum(t *testing.T) { st := 0 buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() for i := 0; i < 1000; i++ { key := make([]byte, 8) binary.BigEndian.PutUint64(key, uint64(i)) @@ -2153,12 +2153,12 @@ func TestWriteInemory(t *testing.T) { item, err := txn.Get([]byte(fmt.Sprintf("key%d", j))) require.NoError(t, err) expected := []byte(fmt.Sprintf("val%d", j)) - item.Value(func(val []byte) error { + require.NoError(t, item.Value(func(val []byte) error { require.Equal(t, expected, val, "Invalid value for key %q. expected: %q, actual: %q", item.Key(), expected, val) return nil - }) + })) } return nil }) @@ -2242,7 +2242,7 @@ func TestOpenDBReadOnly(t *testing.T) { var count int read := func() { count = 0 - db.View(func(txn *Txn) error { + require.NoError(t, db.View(func(txn *Txn) error { it := txn.NewIterator(DefaultIteratorOptions) defer it.Close() for it.Rewind(); it.Valid(); it.Next() { @@ -2254,7 +2254,7 @@ func TestOpenDBReadOnly(t *testing.T) { count++ } return nil - }) + })) } read() require.Equal(t, 10, count) diff --git a/iterator_test.go b/iterator_test.go index b54ec28d3..4eca18595 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -147,7 +147,7 @@ func TestIterateSinceTs(t *testing.T) { iopt := DefaultIteratorOptions iopt.SinceTs = sinceTs - db.View(func(txn *Txn) error { + require.NoError(t, db.View(func(txn *Txn) error { it := txn.NewIterator(iopt) defer it.Close() @@ -156,7 +156,7 @@ func TestIterateSinceTs(t *testing.T) { require.GreaterOrEqual(t, i.Version(), sinceTs) } return nil - }) + })) }) } diff --git a/levels.go b/levels.go index 8ddfae7a9..798b2f660 100644 --- a/levels.go +++ b/levels.go @@ -998,20 +998,14 @@ func containsPrefix(table *table.Table, prefix []byte) bool { // In table iterator's Seek, we assume that key has version in last 8 bytes. We set // version=0 (ts=math.MaxUint64), so that we don't skip the key prefixed with prefix. ti.Seek(y.KeyWithTs(prefix, math.MaxUint64)) - if bytes.HasPrefix(ti.Key(), prefix) { - return true - } - return false + return bytes.HasPrefix(ti.Key(), prefix) } if bytes.Compare(prefix, smallValue) > 0 && bytes.Compare(prefix, largeValue) < 0 { // There may be a case when table contains [0x0000,...., 0xffff]. If we are searching for // k=0x0011, we should not directly infer that k is present. It may not be present. - if !isPresent() { - return false - } - return true + return isPresent() } return false @@ -1426,8 +1420,8 @@ func (s *levelsController) runCompactDef(id, l int, cd compactDef) (err error) { cd.splits = append(cd.splits, keyRange{}) } - // Table should never be moved directly between levels, always be rewritten to allow discarding - // invalid versions. + // Table should never be moved directly between levels, + // always be rewritten to allow discarding invalid versions. newTables, decr, err := s.compactBuildTables(l, cd) if err != nil { diff --git a/levels_test.go b/levels_test.go index 5c4ec113c..1e4448837 100644 --- a/levels_test.go +++ b/levels_test.go @@ -131,7 +131,7 @@ func TestCheckOverlap(t *testing.T) { } func getAllAndCheck(t *testing.T, db *DB, expected []keyValVersion) { - db.View(func(txn *Txn) error { + require.NoError(t, db.View(func(txn *Txn) error { opt := DefaultIteratorOptions opt.AllVersions = true opt.InternalAccess = true @@ -157,8 +157,7 @@ func getAllAndCheck(t *testing.T, db *DB, expected []keyValVersion) { } require.Equal(t, len(expected), i, "keys examined should be equal to keys expected") return nil - }) - + })) } func TestCompaction(t *testing.T) { @@ -783,7 +782,7 @@ func TestL1Stall(t *testing.T) { go func() { tab := createEmptyTable(db) require.NoError(t, db.lc.addLevel0Table(tab)) - tab.DecrRef() + require.NoError(t, tab.DecrRef()) done <- true }() time.Sleep(time.Second) @@ -792,7 +791,7 @@ func TestL1Stall(t *testing.T) { // Drop two tables from Level 0 so that addLevel0Table can make progress. Earlier table // count was 4 which is equal to L0 stall count. toDrop := db.lc.levels[0].tables[:2] - decrRefs(toDrop) + require.NoError(t, decrRefs(toDrop)) db.lc.levels[0].tables = db.lc.levels[0].tables[2:] db.lc.levels[0].Unlock() @@ -849,7 +848,7 @@ func TestL0Stall(t *testing.T) { go func() { tab := createEmptyTable(db) require.NoError(t, db.lc.addLevel0Table(tab)) - tab.DecrRef() + require.NoError(t, tab.DecrRef()) done <- true }() // Let it stall for a second. @@ -1024,7 +1023,7 @@ func TestKeyVersions(t *testing.T) { runBadgerTest(t, &inMemoryOpt, func(t *testing.T, db *DB) { writer := db.newWriteBatch(false) for i := 0; i < 10; i++ { - writer.Set([]byte(fmt.Sprintf("%05d", i)), []byte("foo")) + require.NoError(t, writer.Set([]byte(fmt.Sprintf("%05d", i)), []byte("foo"))) } require.NoError(t, writer.Flush()) require.Equal(t, 2, len(db.Ranges(nil, 10000))) @@ -1034,7 +1033,7 @@ func TestKeyVersions(t *testing.T) { runBadgerTest(t, &inMemoryOpt, func(t *testing.T, db *DB) { writer := db.newWriteBatch(false) for i := 0; i < 100000; i++ { - writer.Set([]byte(fmt.Sprintf("%05d", i)), []byte("foo")) + require.NoError(t, writer.Set([]byte(fmt.Sprintf("%05d", i)), []byte("foo"))) } require.NoError(t, writer.Flush()) require.Equal(t, 11, len(db.Ranges(nil, 10000))) @@ -1044,7 +1043,7 @@ func TestKeyVersions(t *testing.T) { runBadgerTest(t, &inMemoryOpt, func(t *testing.T, db *DB) { writer := db.newWriteBatch(false) for i := 0; i < 10000; i++ { - writer.Set([]byte(fmt.Sprintf("%05d", i)), []byte("foo")) + require.NoError(t, writer.Set([]byte(fmt.Sprintf("%05d", i)), []byte("foo"))) } require.NoError(t, writer.Flush()) require.Equal(t, 1, len(db.Ranges([]byte("a"), 10000))) @@ -1151,7 +1150,7 @@ func TestTableContainsPrefix(t *testing.T) { } tbl := buildTable([]string{"key1", "key3", "key31", "key32", "key4"}) - defer tbl.DecrRef() + defer func() { require.NoError(t, tbl.DecrRef()) }() require.True(t, containsPrefix(tbl, []byte("key"))) require.True(t, containsPrefix(tbl, []byte("key1"))) diff --git a/managed_db_test.go b/managed_db_test.go index 1c7e0e3c0..8b9dedea5 100644 --- a/managed_db_test.go +++ b/managed_db_test.go @@ -280,7 +280,7 @@ func TestDropReadOnly(t *testing.T) { require.Equal(t, err, ErrWindowsNotSupported) } else { require.NoError(t, err) - require.Panics(t, func() { db2.DropAll() }) + require.Panics(t, func() { _ = db2.DropAll() }) require.NoError(t, db2.Close()) } } @@ -527,7 +527,7 @@ func TestDropPrefixReadOnly(t *testing.T) { require.Equal(t, err, ErrWindowsNotSupported) } else { require.NoError(t, err) - require.Panics(t, func() { db2.DropPrefix([]byte("key0")) }) + require.Panics(t, func() { _ = db2.DropPrefix([]byte("key0")) }) require.NoError(t, db2.Close()) } } diff --git a/manifest_test.go b/manifest_test.go index 528029c22..b445ca048 100644 --- a/manifest_test.go +++ b/manifest_test.go @@ -17,7 +17,6 @@ package badger import ( - "context" "fmt" "io/ioutil" "math/rand" @@ -29,9 +28,7 @@ import ( "time" "github.com/stretchr/testify/require" - otrace "go.opencensus.io/trace" - "github.com/dgraph-io/badger/v3/options" "github.com/dgraph-io/badger/v3/pb" "github.com/dgraph-io/badger/v3/table" "github.com/dgraph-io/badger/v3/y" @@ -55,7 +52,7 @@ func TestManifestBasic(t *testing.T) { txnSet(t, kv, k, k, 0x00) } txnSet(t, kv, []byte("testkey"), []byte("testval"), 0x05) - kv.validate() + require.NoError(t, kv.validate()) require.NoError(t, kv.Close()) } @@ -115,17 +112,6 @@ func key(prefix string, i int) string { return prefix + fmt.Sprintf("%04d", i) } -func buildTestTable(t *testing.T, prefix string, n int, opts table.Options) *table.Table { - y.AssertTrue(n <= 10000) - keyValues := make([][]string, n) - for i := 0; i < n; i++ { - k := key(prefix, i) - v := fmt.Sprintf("%d", i) - keyValues[i] = []string{k, v} - } - return buildTable(t, keyValues, opts) -} - // TODO - Move these to somewhere where table package can also use it. // keyValues is n by 2 where n is number of pairs. func buildTable(t *testing.T, keyValues [][]string, bopts table.Options) *table.Table { @@ -158,58 +144,6 @@ func buildTable(t *testing.T, keyValues [][]string, bopts table.Options) *table. return tbl } -func TestOverlappingKeyRangeError(t *testing.T) { - dir, err := ioutil.TempDir("", "badger-test") - require.NoError(t, err) - defer removeDir(dir) - kv, err := Open(DefaultOptions(dir)) - require.NoError(t, err) - defer kv.Close() - - lh0 := newLevelHandler(kv, 0) - lh1 := newLevelHandler(kv, 1) - opts := table.Options{ChkMode: options.OnTableAndBlockRead} - t1 := buildTestTable(t, "k", 2, opts) - defer t1.DecrRef() - - done := lh0.tryAddLevel0Table(t1) - require.Equal(t, true, done) - _, span := otrace.StartSpan(context.Background(), "Badger.Compaction") - span.Annotatef(nil, "Compaction level: %v", lh0) - cd := compactDef{ - thisLevel: lh0, - nextLevel: lh1, - span: span, - t: kv.lc.levelTargets(), - } - cd.t.baseLevel = 1 - - manifest := createManifest() - lc, err := newLevelsController(kv, &manifest) - require.NoError(t, err) - done = lc.fillTablesL0(&cd) - require.Equal(t, true, done) - lc.runCompactDef(-1, 0, cd) - span.End() - - _, span = otrace.StartSpan(context.Background(), "Badger.Compaction") - span.Annotatef(nil, "Compaction level: %v", lh0) - t2 := buildTestTable(t, "l", 2, opts) - defer t2.DecrRef() - done = lh0.tryAddLevel0Table(t2) - require.Equal(t, true, done) - - cd = compactDef{ - thisLevel: lh0, - nextLevel: lh1, - span: span, - t: kv.lc.levelTargets(), - } - cd.t.baseLevel = 1 - lc.fillTablesL0(&cd) - lc.runCompactDef(-1, 0, cd) -} - func TestManifestRewrite(t *testing.T) { dir, err := ioutil.TempDir("", "badger-test") require.NoError(t, err) diff --git a/memtable.go b/memtable.go index ccb5fc079..a7c0d0b51 100644 --- a/memtable.go +++ b/memtable.go @@ -148,8 +148,6 @@ func (db *DB) openMemTable(fid, flags int) (*memTable, error) { return mt, y.Wrapf(err, "while updating skiplist") } -var errExpectingNewFile = errors.New("Expecting to create a new file, but found an existing file") - func (db *DB) newMemTable() (*memTable, error) { mt, err := db.openMemTable(db.nextMemFid, os.O_CREATE|os.O_RDWR) if err == z.NewFile { diff --git a/merge_test.go b/merge_test.go index 1bfd8139b..90368bdf3 100644 --- a/merge_test.go +++ b/merge_test.go @@ -42,12 +42,9 @@ func TestGetMergeOperator(t *testing.T) { m := db.GetMergeOperator(key, add, 200*time.Millisecond) defer m.Stop() - err := m.Add(uint64ToBytes(1)) - require.NoError(t, err) - m.Add(uint64ToBytes(2)) - require.NoError(t, err) - m.Add(uint64ToBytes(3)) - require.NoError(t, err) + require.NoError(t, m.Add(uint64ToBytes(1))) + require.NoError(t, m.Add(uint64ToBytes(2))) + require.NoError(t, m.Add(uint64ToBytes(3))) res, err := m.Get() require.NoError(t, err) @@ -64,12 +61,12 @@ func TestGetMergeOperator(t *testing.T) { m := db.GetMergeOperator([]byte("fooprefix"), add, 2*time.Millisecond) defer m.Stop() - require.Nil(t, m.Add([]byte("A"))) - require.Nil(t, m.Add([]byte("B"))) - require.Nil(t, m.Add([]byte("C"))) + require.NoError(t, m.Add([]byte("A"))) + require.NoError(t, m.Add([]byte("B"))) + require.NoError(t, m.Add([]byte("C"))) value, err := m.Get() - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, "ABC", string(value)) }) }) @@ -79,12 +76,9 @@ func TestGetMergeOperator(t *testing.T) { m := db.GetMergeOperator(key, add, 500*time.Millisecond) defer m.Stop() - err := m.Add(uint64ToBytes(1)) - require.NoError(t, err) - m.Add(uint64ToBytes(2)) - require.NoError(t, err) - m.Add(uint64ToBytes(3)) - require.NoError(t, err) + require.NoError(t, m.Add(uint64ToBytes(1))) + require.NoError(t, m.Add(uint64ToBytes(2))) + require.NoError(t, m.Add(uint64ToBytes(3))) res, err := m.Get() require.NoError(t, err) @@ -97,25 +91,21 @@ func TestGetMergeOperator(t *testing.T) { runBadgerTest(t, nil, func(t *testing.T, db *DB) { m := db.GetMergeOperator(key, add, 200*time.Millisecond) - err := m.Add(uint64ToBytes(1)) - require.NoError(t, err) - m.Add(uint64ToBytes(2)) - require.NoError(t, err) - m.Add(uint64ToBytes(3)) - require.NoError(t, err) + require.NoError(t, m.Add(uint64ToBytes(1))) + require.NoError(t, m.Add(uint64ToBytes(2))) + require.NoError(t, m.Add(uint64ToBytes(3))) m.Stop() res, err := m.Get() require.NoError(t, err) require.Equal(t, uint64(6), bytesToUint64(res)) - db.Update(func(txn *Txn) error { + require.NoError(t, db.Update(func(txn *Txn) error { return txn.Delete(key) - }) + })) m = db.GetMergeOperator(key, add, 200*time.Millisecond) - err = m.Add(uint64ToBytes(1)) - require.NoError(t, err) + require.NoError(t, m.Add(uint64ToBytes(1))) m.Stop() res, err = m.Get() @@ -129,12 +119,9 @@ func TestGetMergeOperator(t *testing.T) { runBadgerTest(t, nil, func(t *testing.T, db *DB) { m := db.GetMergeOperator(key, add, 1*time.Second) - err := m.Add(uint64ToBytes(1)) - require.NoError(t, err) - m.Add(uint64ToBytes(2)) - require.NoError(t, err) - m.Add(uint64ToBytes(3)) - require.NoError(t, err) + require.NoError(t, m.Add(uint64ToBytes(1))) + require.NoError(t, m.Add(uint64ToBytes(2))) + require.NoError(t, m.Add(uint64ToBytes(3))) m.Stop() res, err := m.Get() diff --git a/publisher.go b/publisher.go index 66105fec5..736a13d39 100644 --- a/publisher.go +++ b/publisher.go @@ -117,7 +117,7 @@ func (p *publisher) publishUpdates(reqs requests) { } } -func (p *publisher) newSubscriber(c *z.Closer, matches []pb.Match) subscriber { +func (p *publisher) newSubscriber(c *z.Closer, matches []pb.Match) (subscriber, error) { p.Lock() defer p.Unlock() ch := make(chan *pb.KVList, 1000) @@ -134,9 +134,11 @@ func (p *publisher) newSubscriber(c *z.Closer, matches []pb.Match) subscriber { } p.subscribers[id] = s for _, m := range matches { - p.indexer.AddMatch(m, id) + if err := p.indexer.AddMatch(m, id); err != nil { + return subscriber{}, err + } } - return s + return s, nil } // cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB. @@ -145,7 +147,7 @@ func (p *publisher) cleanSubscribers() { defer p.Unlock() for id, s := range p.subscribers { for _, m := range s.matches { - p.indexer.DeleteMatch(m, id) + _ = p.indexer.DeleteMatch(m, id) } delete(p.subscribers, id) s.subCloser.SignalAndWait() @@ -157,7 +159,7 @@ func (p *publisher) deleteSubscriber(id uint64) { defer p.Unlock() if s, ok := p.subscribers[id]; ok { for _, m := range s.matches { - p.indexer.DeleteMatch(m, id) + _ = p.indexer.DeleteMatch(m, id) } } delete(p.subscribers, id) diff --git a/publisher_test.go b/publisher_test.go index 73b2f28e3..4f0316424 100644 --- a/publisher_test.go +++ b/publisher_test.go @@ -112,10 +112,10 @@ func TestPublisherOrdering(t *testing.T) { }() subWg.Wait() for i := 0; i < 5; i++ { - db.Update(func(txn *Txn) error { + require.NoError(t, db.Update(func(txn *Txn) error { e := NewEntry([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i))) return txn.SetEntry(e) - }) + })) } wg.Wait() for i := 0; i < 5; i++ { @@ -154,12 +154,12 @@ func TestMultiplePrefix(t *testing.T) { } }() subWg.Wait() - db.Update(func(txn *Txn) error { + require.NoError(t, db.Update(func(txn *Txn) error { return txn.SetEntry(NewEntry([]byte("key"), []byte("value"))) - }) - db.Update(func(txn *Txn) error { + })) + require.NoError(t, db.Update(func(txn *Txn) error { return txn.SetEntry(NewEntry([]byte("hello"), []byte("badger"))) - }) + })) wg.Wait() }) } diff --git a/stream.go b/stream.go index b97b116f0..90ed90d5d 100644 --- a/stream.go +++ b/stream.go @@ -181,7 +181,7 @@ func (st *Stream) produceKVs(ctx context.Context, threadId int) error { defer func() { // The outList variable changes. So, we need to evaluate the variable in the defer. DO NOT // call `defer outList.Release()`. - outList.Release() + _ = outList.Release() }() iterate := func(kr keyRange) error { @@ -295,7 +295,7 @@ func (st *Stream) streamKVs(ctx context.Context) error { now := time.Now() sendBatch := func(batch *z.Buffer) error { - defer batch.Release() + defer func() { _ = batch.Release() }() sz := uint64(batch.LenNoPadding()) if sz == 0 { return nil @@ -432,7 +432,7 @@ func (st *Stream) Orchestrate(ctx context.Context) error { defer func() { // If due to some error, we have buffers left in kvChan, we should release them. for buf := range st.kvChan { - buf.Release() + _ = buf.Release() } }() diff --git a/stream_writer_test.go b/stream_writer_test.go index c4922be15..2180804a7 100644 --- a/stream_writer_test.go +++ b/stream_writer_test.go @@ -176,7 +176,7 @@ func TestStreamWriter3(t *testing.T) { y.Check2(rand.Read(value)) counter := 0 buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() for i := 0; i < noOfKeys; i++ { key := make([]byte, 8) binary.BigEndian.PutUint64(key, uint64(counter)) @@ -273,7 +273,7 @@ func TestStreamWriter4(t *testing.T) { } buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() KVToBuffer(&pb.KV{ Key: []byte("key-1"), Value: []byte("value-1"), @@ -298,7 +298,7 @@ func TestStreamWriter5(t *testing.T) { copy(right[1:], []byte("break")) buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() KVToBuffer(&pb.KV{ Key: left, Value: []byte("val"), @@ -337,7 +337,7 @@ func TestStreamWriter6(t *testing.T) { // Setting keycount below 32 would cause this test to fail. keyCount := 40 buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() for i := range str { for j := 0; j < keyCount; j++ { ver++ @@ -378,7 +378,7 @@ func TestStreamWriterCancel(t *testing.T) { str := []string{"a", "a", "b", "b", "c", "c"} ver := 1 buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() for i := range str { kv := &pb.KV{ Key: bytes.Repeat([]byte(str[i]), int(db.opt.BaseTableSize)), @@ -412,7 +412,7 @@ func TestStreamDone(t *testing.T) { rand.Read(val[:]) for i := 0; i < 10; i++ { buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() kv1 := &pb.KV{ Key: []byte(fmt.Sprintf("%d", i)), Value: val[:], @@ -453,7 +453,7 @@ func TestSendOnClosedStream(t *testing.T) { var val [10]byte rand.Read(val[:]) buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() kv1 := &pb.KV{ Key: []byte(fmt.Sprintf("%d", 1)), Value: val[:], @@ -476,7 +476,7 @@ func TestSendOnClosedStream(t *testing.T) { }() // Send once stream is closed. buf1 := z.NewBuffer(10<<20, "test") - defer buf1.Release() + defer func() { require.NoError(t, buf1.Release()) }() kv1 = &pb.KV{ Key: []byte(fmt.Sprintf("%d", 2)), Value: val[:], @@ -484,7 +484,7 @@ func TestSendOnClosedStream(t *testing.T) { StreamId: uint32(1), } KVToBuffer(kv1, buf1) - sw.Write(buf1) + require.NoError(t, sw.Write(buf1)) } func TestSendOnClosedStream2(t *testing.T) { @@ -503,7 +503,7 @@ func TestSendOnClosedStream2(t *testing.T) { var val [10]byte rand.Read(val[:]) buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() kv1 := &pb.KV{ Key: []byte(fmt.Sprintf("%d", 1)), Value: val[:], @@ -550,7 +550,7 @@ func TestStreamWriterEncrypted(t *testing.T) { value := []byte("myvalue") buf := z.NewBuffer(10<<20, "test") - defer buf.Release() + defer func() { require.NoError(t, buf.Release()) }() KVToBuffer(&pb.KV{ Key: key, Value: value, diff --git a/structs.go b/structs.go index c17f818cf..f51c4a655 100644 --- a/structs.go +++ b/structs.go @@ -174,6 +174,7 @@ func (e *Entry) skipVlogAndSetThreshold(threshold int64) bool { return int64(len(e.Value)) < e.valThreshold } +//nolint:unused func (e Entry) print(prefix string) { fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d", prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value)) diff --git a/table/builder.go b/table/builder.go index da5a23a0f..3debe08e2 100644 --- a/table/builder.go +++ b/table/builder.go @@ -84,7 +84,6 @@ type Builder struct { uncompressedSize uint32 lenOffsets uint32 - estimatedSize uint32 keyHashes []uint32 // Used for building the bloomfilter. opts *Options maxVersion uint64 @@ -291,7 +290,6 @@ func (b *Builder) finishBlock() { if b.blockChan != nil { b.blockChan <- b.curBlock } - return } func (b *Builder) shouldFinishBlock(key []byte, value y.ValueStruct) bool { diff --git a/table/merge_iterator.go b/table/merge_iterator.go index 789a24fd7..70ccc0858 100644 --- a/table/merge_iterator.go +++ b/table/merge_iterator.go @@ -115,7 +115,7 @@ func (mi *MergeIterator) fix() { case cmp < 0: // Small is less than bigger(). if mi.reverse { mi.swapSmall() - } else { + } else { //nolint:staticcheck // we don't need to do anything. Small already points to the smallest. } return diff --git a/table/table.go b/table/table.go index 1838fe7e6..6c5704feb 100644 --- a/table/table.go +++ b/table/table.go @@ -379,7 +379,7 @@ func (t *Table) initBiggestAndSmallest() error { checksum := &pb.Checksum{} readPos -= checksumLen buf = t.readNoFail(readPos, checksumLen) - proto.Unmarshal(buf, checksum) + _ = proto.Unmarshal(buf, checksum) fmt.Fprintf(&debugBuf, "checksum: %+v ", checksum) // Read index size from the footer. @@ -828,7 +828,7 @@ func (t *Table) decompress(b *block) error { return errors.New("Unsupported compression type") } - if b.freeMe == true { + if b.freeMe { z.Free(src) b.freeMe = false } diff --git a/table/table_test.go b/table/table_test.go index e2f0fcb92..183d8b62a 100644 --- a/table/table_test.go +++ b/table/table_test.go @@ -90,7 +90,7 @@ func TestTableIterator(t *testing.T) { t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", n, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() it := table.NewIterator(0) defer it.Close() count := 0 @@ -111,7 +111,7 @@ func TestSeekToFirst(t *testing.T) { t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", n, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() it := table.NewIterator(0) defer it.Close() it.seekToFirst() @@ -128,7 +128,7 @@ func TestSeekToLast(t *testing.T) { t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", n, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() it := table.NewIterator(0) defer it.Close() it.seekToLast() @@ -148,7 +148,7 @@ func TestSeekToLast(t *testing.T) { func TestSeek(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "k", 10000, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() it := table.NewIterator(0) defer it.Close() @@ -182,7 +182,7 @@ func TestSeek(t *testing.T) { func TestSeekForPrev(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "k", 10000, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() it := table.NewIterator(0) defer it.Close() @@ -219,7 +219,7 @@ func TestIterateFromStart(t *testing.T) { t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", n, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() ti := table.NewIterator(0) defer ti.Close() ti.reset() @@ -245,7 +245,7 @@ func TestIterateFromEnd(t *testing.T) { t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", n, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() ti := table.NewIterator(0) defer ti.Close() ti.reset() @@ -267,7 +267,7 @@ func TestIterateFromEnd(t *testing.T) { func TestTable(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", 10000, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() ti := table.NewIterator(0) defer ti.Close() kid := 1010 @@ -293,7 +293,7 @@ func TestTable(t *testing.T) { func TestIterateBackAndForth(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", 10000, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() seek := y.KeyWithTs([]byte(key("key", 1010)), 0) it := table.NewIterator(0) @@ -333,7 +333,7 @@ func TestIterateBackAndForth(t *testing.T) { func TestUniIterator(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", 10000, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() { it := table.NewIterator(0) defer it.Close() @@ -368,7 +368,7 @@ func TestConcatIteratorOneTable(t *testing.T) { {"k2", "a2"}, }, opts) - defer tbl.DecrRef() + defer func() { require.NoError(t, tbl.DecrRef()) }() it := NewConcatIterator([]*Table{tbl}, 0) defer it.Close() @@ -387,9 +387,9 @@ func TestConcatIterator(t *testing.T) { tbl := buildTestTable(t, "keya", 10000, opts) tbl2 := buildTestTable(t, "keyb", 10000, opts) tbl3 := buildTestTable(t, "keyc", 10000, opts) - defer tbl.DecrRef() - defer tbl2.DecrRef() - defer tbl3.DecrRef() + defer func() { require.NoError(t, tbl.DecrRef()) }() + defer func() { require.NoError(t, tbl2.DecrRef()) }() + defer func() { require.NoError(t, tbl2.DecrRef()) }() { it := NewConcatIterator([]*Table{tbl, tbl2, tbl3}, 0) @@ -480,8 +480,8 @@ func TestMergingIterator(t *testing.T) { {"k4", "a4"}, {"k5", "a5"}, } - defer tbl1.DecrRef() - defer tbl2.DecrRef() + defer func() { require.NoError(t, tbl1.DecrRef()) }() + defer func() { require.NoError(t, tbl2.DecrRef()) }() it1 := tbl1.NewIterator(0) it2 := NewConcatIterator([]*Table{tbl2}, 0) it := NewMergeIterator([]y.Iterator{it1, it2}, false) @@ -525,8 +525,8 @@ func TestMergingIteratorReversed(t *testing.T) { {"k2", "a2"}, {"k1", "a1"}, } - defer tbl1.DecrRef() - defer tbl2.DecrRef() + defer func() { require.NoError(t, tbl1.DecrRef()) }() + defer func() { require.NoError(t, tbl2.DecrRef()) }() it1 := tbl1.NewIterator(REVERSED) it2 := NewConcatIterator([]*Table{tbl2}, REVERSED) it := NewMergeIterator([]y.Iterator{it1, it2}, true) @@ -555,8 +555,8 @@ func TestMergingIteratorTakeOne(t *testing.T) { }, opts) t2 := buildTable(t, [][]string{{"l1", "b1"}}, opts) - defer t1.DecrRef() - defer t2.DecrRef() + defer func() { require.NoError(t, t1.DecrRef()) }() + defer func() { require.NoError(t, t2.DecrRef()) }() it1 := NewConcatIterator([]*Table{t1}, 0) it2 := NewConcatIterator([]*Table{t2}, 0) @@ -599,8 +599,8 @@ func TestMergingIteratorTakeTwo(t *testing.T) { {"k2", "a2"}, }, opts) - defer t1.DecrRef() - defer t2.DecrRef() + defer func() { require.NoError(t, t1.DecrRef()) }() + defer func() { require.NoError(t, t2.DecrRef()) }() it1 := NewConcatIterator([]*Table{t1}, 0) it2 := NewConcatIterator([]*Table{t2}, 0) @@ -656,7 +656,7 @@ func TestTableBigValues(t *testing.T) { filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Int63()) tbl, err := CreateTable(filename, builder) require.NoError(t, err, "unable to open table") - defer tbl.DecrRef() + defer func() { require.NoError(t, tbl.DecrRef()) }() itr := tbl.NewIterator(0) require.True(t, itr.Valid()) @@ -705,7 +705,7 @@ var cacheConfig = ristretto.Config{ func BenchmarkRead(b *testing.B) { n := int(5 * 1e6) tbl := getTableForBenchmarks(b, n, nil) - defer tbl.DecrRef() + defer func() { _ = tbl.DecrRef() }() b.ResetTimer() // Iterate b.N times over the entire table. @@ -724,7 +724,7 @@ func BenchmarkReadAndBuild(b *testing.B) { var cache, _ = ristretto.NewCache(&cacheConfig) tbl := getTableForBenchmarks(b, n, cache) - defer tbl.DecrRef() + defer func() { _ = tbl.DecrRef() }() b.ResetTimer() // Iterate b.N times over the entire table. @@ -770,7 +770,7 @@ func BenchmarkReadMerged(b *testing.B) { y.Check(err) builder.Close() tables = append(tables, tbl) - defer tbl.DecrRef() + defer func() { _ = tbl.DecrRef() }() } b.ResetTimer() @@ -814,7 +814,7 @@ func BenchmarkChecksum(b *testing.B) { func BenchmarkRandomRead(b *testing.B) { n := int(5 * 1e6) tbl := getTableForBenchmarks(b, n, nil) - defer tbl.DecrRef() + defer func() { _ = tbl.DecrRef() }() r := rand.New(rand.NewSource(time.Now().Unix())) @@ -870,7 +870,7 @@ func TestMain(m *testing.M) { func TestDoesNotHaveRace(t *testing.T) { opts := getTestTableOptions() table := buildTestTable(t, "key", 10000, opts) - defer table.DecrRef() + defer func() { require.NoError(t, table.DecrRef()) }() var wg sync.WaitGroup wg.Add(5) diff --git a/trie/trie.go b/trie/trie.go index 331207b5c..ec2434d4b 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -69,7 +69,7 @@ func parseIgnoreBytes(ig string) ([]bool, error) { if len(r) == 0 || len(r) > 2 { return out, fmt.Errorf("Invalid range: %s", each) } - start, end := -1, -1 + start, end := -1, -1 //nolint:ineffassign if len(r) == 2 { idx, err := strconv.Atoi(strings.TrimSpace(r[1])) if err != nil { diff --git a/trie/trie_test.go b/trie/trie_test.go index 40a1e4574..1f55ecb6d 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -65,20 +65,20 @@ func TestTrieDelete(t *testing.T) { t.Logf("Num nodes: %d", numNodes(trie.root)) - trie.Delete([]byte("hello"), 4) + require.NoError(t, trie.Delete([]byte("hello"), 4)) t.Logf("Num nodes: %d", numNodes(trie.root)) require.Equal(t, map[uint64]struct{}{5: {}, 1: {}, 3: {}}, trie.Get([]byte("hello"))) - trie.Delete(nil, 5) + require.NoError(t, trie.Delete(nil, 5)) t.Logf("Num nodes: %d", numNodes(trie.root)) require.Equal(t, map[uint64]struct{}{1: {}, 3: {}}, trie.Get([]byte("hello"))) - trie.Delete([]byte("hello"), 1) - trie.Delete([]byte("hello"), 3) - trie.Delete([]byte("hello"), 4) - trie.Delete([]byte("hello"), 5) - trie.Delete([]byte("hello"), 6) + require.NoError(t, trie.Delete([]byte("hello"), 1)) + require.NoError(t, trie.Delete([]byte("hello"), 3)) + require.NoError(t, trie.Delete([]byte("hello"), 4)) + require.NoError(t, trie.Delete([]byte("hello"), 5)) + require.NoError(t, trie.Delete([]byte("hello"), 6)) require.Equal(t, 1, numNodes(trie.root)) t.Logf("Num nodes: %d", numNodes(trie.root)) diff --git a/txn_test.go b/txn_test.go index 61d592e2d..12b950391 100644 --- a/txn_test.go +++ b/txn_test.go @@ -50,7 +50,7 @@ func TestTxnSimple(t *testing.T) { return nil })) - require.Panics(t, func() { txn.CommitAt(100, nil) }) + require.Panics(t, func() { _ = txn.CommitAt(100, nil) }) require.NoError(t, txn.Commit()) }) } @@ -368,7 +368,7 @@ func TestTxnIterationEdgeCase(t *testing.T) { // b4 (del) txn = db.NewTransaction(true) - txn.Delete(kb) + require.NoError(t, txn.Delete(kb)) require.NoError(t, txn.Commit()) require.Equal(t, uint64(4), db.orc.readTs()) @@ -452,7 +452,7 @@ func TestTxnIterationEdgeCase2(t *testing.T) { // b4 (del) txn = db.NewTransaction(true) - txn.Delete(kb) + require.NoError(t, txn.Delete(kb)) require.NoError(t, txn.Commit()) require.Equal(t, uint64(4), db.orc.readTs()) @@ -750,7 +750,7 @@ func TestManagedDB(t *testing.T) { } require.Panics(t, func() { - db.Update(func(tx *Txn) error { return nil }) + _ = db.Update(func(tx *Txn) error { return nil }) }) err = db.View(func(tx *Txn) error { return nil }) @@ -821,7 +821,8 @@ func TestManagedDB(t *testing.T) { // Write data to same key, causing a conflict txn = db.NewTransactionAt(10, true) txnb := db.NewTransactionAt(10, true) - txnb.Get(key(0)) + _, err := txnb.Get(key(0)) + require.NoError(t, err) require.NoError(t, txn.SetEntry(NewEntry(key(0), val(0)))) require.NoError(t, txnb.SetEntry(NewEntry(key(0), val(1)))) require.NoError(t, txn.CommitAt(11, nil)) diff --git a/value.go b/value.go index fb2fdca72..87fa1d64c 100644 --- a/value.go +++ b/value.go @@ -55,7 +55,7 @@ const ( bitTxn byte = 1 << 6 // Set if the entry is part of a txn. bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log. - mi int64 = 1 << 20 + mi int64 = 1 << 20 //nolint:unused // size of vlog header. // +----------------+------------------+ @@ -66,7 +66,6 @@ const ( var errStop = errors.New("Stop iteration") var errTruncate = errors.New("Do truncate") -var errDeleteVlogFile = errors.New("Delete vlog file") type logEntry func(e Entry, vp valuePointer) error @@ -251,7 +250,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { } wb = append(wb, ne) size += es - } else { + } else { //nolint:staticcheck // It might be possible that the entry read from LSM Tree points to // an older vlog file. This can happen in the following situation. // Assume DB is opened with @@ -820,7 +819,9 @@ func (vlog *valueLog) write(reqs []*request) error { endOffset := atomic.AddUint32(&vlog.writableLogOffset, n) // Increase the file size if we cannot accommodate this entry. if int(endOffset) >= len(curlf.Data) { - curlf.Truncate(int64(endOffset)) + if err := curlf.Truncate(int64(endOffset)); err != nil { + return err + } } start := int(endOffset - n) @@ -1056,12 +1057,6 @@ func discardEntry(e Entry, vs y.ValueStruct, db *DB) bool { return false } -type reason struct { - total float64 - discard float64 - count int -} - func (vlog *valueLog) doRunGC(lf *logFile) error { _, span := otrace.StartSpan(context.Background(), "Badger.GC") span.Annotatef(nil, "GC rewrite for: %v", lf.path) diff --git a/value_test.go b/value_test.go index 69f02e171..e138a6c79 100644 --- a/value_test.go +++ b/value_test.go @@ -18,6 +18,7 @@ package badger import ( "bytes" + "errors" "fmt" "io/ioutil" "math" @@ -53,7 +54,7 @@ func TestDynamicValueThreshold(t *testing.T) { } b := new(request) b.Entries = []*Entry{e1} - log.write([]*request{b}) + require.NoError(t, log.write([]*request{b})) } t.Logf("value threshold is %d \n", log.db.valueThreshold()) } @@ -69,7 +70,7 @@ func TestDynamicValueThreshold(t *testing.T) { } b := new(request) b.Entries = []*Entry{e1} - log.write([]*request{b}) + require.NoError(t, log.write([]*request{b})) } t.Logf("value threshold is %d \n", log.db.valueThreshold()) } @@ -104,7 +105,7 @@ func TestValueBasic(t *testing.T) { b := new(request) b.Entries = []*Entry{e1, e2} - log.write([]*request{b}) + require.NoError(t, log.write([]*request{b})) require.Len(t, b.Ptrs, 2) t.Logf("Pointer written: %+v %+v\n", b.Ptrs[0], b.Ptrs[1]) @@ -192,7 +193,7 @@ func TestValueGCManaged(t *testing.T) { } db.SetDiscardTs(math.MaxUint32) - db.Flatten(3) + require.NoError(t, db.Flatten(3)) for i := 0; i < 100; i++ { // Try at max 100 times to GC even a single value log file. @@ -241,7 +242,7 @@ func TestValueGC(t *testing.T) { // return true // }) - kv.vlog.rewrite(lf) + require.NoError(t, kv.vlog.rewrite(lf)) for i := 45; i < 100; i++ { key := []byte(fmt.Sprintf("key%d", i)) @@ -299,7 +300,7 @@ func TestValueGC2(t *testing.T) { // return true // }) - kv.vlog.rewrite(lf) + require.NoError(t, kv.vlog.rewrite(lf)) for i := 0; i < 5; i++ { key := []byte(fmt.Sprintf("key%d", i)) require.NoError(t, kv.View(func(txn *Txn) error { @@ -397,7 +398,7 @@ func TestValueGC3(t *testing.T) { logFile := kv.vlog.filesMap[kv.vlog.sortedFids()[0]] kv.vlog.filesLock.RUnlock() - kv.vlog.rewrite(logFile) + require.NoError(t, kv.vlog.rewrite(logFile)) it.Next() require.True(t, it.Valid()) item = it.Item() @@ -452,8 +453,8 @@ func TestValueGC4(t *testing.T) { // return true // }) - kv.vlog.rewrite(lf0) - kv.vlog.rewrite(lf1) + require.NoError(t, kv.vlog.rewrite(lf0)) + require.NoError(t, kv.vlog.rewrite(lf1)) require.NoError(t, kv.Close()) @@ -812,6 +813,7 @@ func TestPenultimateMemCorruption(t *testing.T) { require.Zero(t, len(db0.imm)) db0.imm = append(db0.imm, db0.mt) db0.mt, err = db0.newMemTable() + require.NoError(t, err) h.writeRange(3, 7) // 00002.mem @@ -826,7 +828,7 @@ func TestPenultimateMemCorruption(t *testing.T) { if i == 1 { // This should corrupt the last entry in the first memtable (that is entry number 2) wal := db0.imm[0].wal - wal.Fd.WriteAt([]byte{0}, int64(wal.writeAt-1)) + _, err = wal.Fd.WriteAt([]byte{0}, int64(wal.writeAt-1)) require.NoError(t, err) // We have corrupted the file. We can remove it. If we don't remove // the imm here, the db.close in defer will crash since db0.mt != @@ -943,7 +945,9 @@ func TestBug578(t *testing.T) { // Run value log GC a few times. for i := 0; i < 5; i++ { - db.RunValueLogGC(0.5) + if err := db.RunValueLogGC(0.5); err != nil && !errors.Is(ErrNoRewrite, err) { + require.NoError(t, err) + } } h.readRange(0, 10) } @@ -980,12 +984,12 @@ func BenchmarkReadWrite(b *testing.B) { var ptrs []valuePointer - vl.write([]*request{bl}) + _ = vl.write([]*request{bl}) ptrs = append(ptrs, bl.Ptrs...) f := rand.Float32() if f < rw { - vl.write([]*request{bl}) + _ = vl.write([]*request{bl}) } else { ln := len(ptrs) @@ -1233,10 +1237,11 @@ func TestValueLogMeta(t *testing.T) { require.Equal(t, 1, len(fids)) // vlog entries must not have txn meta. - db.vlog.filesMap[fids[0]].iterate(true, 0, func(e Entry, vp valuePointer) error { + _, err = db.vlog.filesMap[fids[0]].iterate(true, 0, func(e Entry, vp valuePointer) error { require.Zero(t, e.meta&(bitTxn|bitFinTxn)) return nil }) + require.NoError(t, err) // Entries in LSM tree must have txn bit of meta set txn = db.NewTransaction(false) @@ -1264,6 +1269,7 @@ func TestFirstVlogFile(t *testing.T) { opt := DefaultOptions(dir) db, err := Open(opt) + require.NoError(t, err) defer db.Close() fids := db.vlog.sortedFids() diff --git a/y/encrypt_test.go b/y/encrypt_test.go index 91d50acb1..ded05828a 100644 --- a/y/encrypt_test.go +++ b/y/encrypt_test.go @@ -26,19 +26,19 @@ import ( func TestXORBlock(t *testing.T) { key := make([]byte, 32) - rand.Read(key) + _, _ = rand.Read(key) var iv []byte { b, err := aes.NewCipher(key) require.NoError(t, err) iv = make([]byte, b.BlockSize()) - rand.Read(iv) + _, _ = rand.Read(iv) t.Logf("Using %d size IV\n", len(iv)) } src := make([]byte, 1024) - rand.Read(src) + _, _ = rand.Read(src) dst := make([]byte, 1024) err := XORBlock(dst, src, key, iv) diff --git a/y/y_test.go b/y/y_test.go index fc19d27e2..ce4cab2b2 100644 --- a/y/y_test.go +++ b/y/y_test.go @@ -34,7 +34,7 @@ func BenchmarkBuffer(b *testing.B) { b.Run(fmt.Sprintf("page-size-%d", pageSize), func(b *testing.B) { pageBuffer := NewPageBuffer(pageSize) for i := 0; i < b.N; i++ { - pageBuffer.Write(btw[:]) + _, _ = pageBuffer.Write(btw[:]) } }) })