From dd82278a6811272f7ab8a5d1adb338d82e8d5bf9 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Thu, 7 Jul 2022 00:12:35 -0400
Subject: [PATCH 01/21] basic box read db perf test
---
ledger/accountdb_test.go | 71 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index b4d1c0e636..fd8110a3d3 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -2806,3 +2806,74 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
a.Equal(makeResourcesData(uint64(0)), upd[0].data)
}
}
+
+func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
+ totalBoxes := 100000
+ batchCount := 1000
+ boxSize := 4 * 8096
+ initDatabase := func() (db.Pair, func(), error) {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ dbs, fn := dbOpenTest(b, false)
+ setDbLogging(b, dbs)
+ cleanup := func() {
+ cleanupTestDb(dbs, fn, false)
+ }
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ _, err = accountsInit(tx, make(map[basics.Address]basics.AccountData), proto)
+ require.NoError(b, err)
+ err = tx.Commit()
+ require.NoError(b, err)
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false)
+ require.NoError(b, err)
+
+ // insert 1M boxes, in batches of 1000
+ for batch := 0; batch <= batchCount; batch++ {
+ fmt.Printf("%d / %d boxes written\n", totalBoxes*batch/batchCount, totalBoxes)
+
+ tx, err = dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ writer, err := makeAccountsSQLWriter(tx, false, false, true, false)
+ require.NoError(b, err)
+ for boxIdx := 0; boxIdx < totalBoxes/batchCount; boxIdx++ {
+ err = writer.upsertKvPair(fmt.Sprintf("%d-%d", batch, boxIdx), string(make([]byte, boxSize)))
+ require.NoError(b, err)
+ }
+
+ err = tx.Commit()
+ require.NoError(b, err)
+ writer.close()
+ }
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeFull, true)
+ return dbs, cleanup, err
+ }
+
+ dbs, cleanup, err := initDatabase()
+ require.NoError(b, err)
+ defer cleanup()
+
+ b.ResetTimer()
+ b.StopTimer()
+ cnt := 0
+ for i := 0; i < batchCount; i++ {
+ for j := 0; j < totalBoxes/batchCount; j++ {
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ lookupStmt, err := tx.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+
+ b.StartTimer()
+ _, err = lookupStmt.Exec(fmt.Sprintf("%d-%d", i, j))
+ require.NoError(b, err)
+ err = tx.Commit()
+ require.NoError(b, err)
+ b.StopTimer()
+ cnt++
+ if cnt >= b.N {
+ i = batchCount
+ break
+ }
+ }
+ }
+}
From 55c114a92eedfdd0090abb3a5b780e6a2d0295a8 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Thu, 7 Jul 2022 14:08:22 -0400
Subject: [PATCH 02/21] improve test and add sub benchmark to gauge sqlite
caching performance improvement
---
ledger/accountdb_test.go | 73 +++++++++++++++++++++++++++-------------
1 file changed, 50 insertions(+), 23 deletions(-)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index fd8110a3d3..2dcd7a0376 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -24,7 +24,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "github.com/algorand/go-algorand/data/transactions/logic"
"math/rand"
"os"
"sort"
@@ -33,6 +32,8 @@ import (
"testing"
"time"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
@@ -2809,7 +2810,7 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
totalBoxes := 100000
- batchCount := 1000
+ batchCount := 100
boxSize := 4 * 8096
initDatabase := func() (db.Pair, func(), error) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
@@ -2828,7 +2829,7 @@ func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false)
require.NoError(b, err)
- // insert 1M boxes, in batches of 1000
+ // insert 100k boxes, in batches of 1000
for batch := 0; batch <= batchCount; batch++ {
fmt.Printf("%d / %d boxes written\n", totalBoxes*batch/batchCount, totalBoxes)
@@ -2853,27 +2854,53 @@ func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
require.NoError(b, err)
defer cleanup()
- b.ResetTimer()
- b.StopTimer()
- cnt := 0
- for i := 0; i < batchCount; i++ {
- for j := 0; j < totalBoxes/batchCount; j++ {
- tx, err := dbs.Wdb.Handle.Begin()
- require.NoError(b, err)
- lookupStmt, err := tx.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
+ b.Run("ReadBoxes", func(b *testing.B) {
+ cnt := 0
+ for i := 0; i < batchCount; i++ {
+ for j := 0; j < totalBoxes/batchCount; j++ {
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
- b.StartTimer()
- _, err = lookupStmt.Exec(fmt.Sprintf("%d-%d", i, j))
- require.NoError(b, err)
- err = tx.Commit()
- require.NoError(b, err)
- b.StopTimer()
- cnt++
- if cnt >= b.N {
- i = batchCount
- break
+ var pv persistedValue
+ var v sql.NullString
+
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d-%d", i, j))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ cnt++
+ if cnt >= b.N {
+ return
+ }
}
}
- }
+ })
+
+ b.Run("ReadBoxesDuplicate", func(b *testing.B) {
+ cnt := 0
+ for i := 0; i < batchCount; i++ {
+ for j := 0; j < totalBoxes/batchCount; j++ {
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+
+ var pv persistedValue
+ var v sql.NullString
+
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d-%d", i, j))).Scan(&pv.round, &v)
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d-%d", i, j))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ cnt++
+ if cnt >= b.N {
+ return
+ }
+ }
+ }
+ })
}
From 6707b912e96f13fa074ee1e5d19b0ba6079b09c4 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Fri, 8 Jul 2022 12:47:07 -0400
Subject: [PATCH 03/21] test
---
ledger/accountdb_test.go | 79 ++++++++++++++++++++++------------------
1 file changed, 43 insertions(+), 36 deletions(-)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 2dcd7a0376..8163b081cc 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -2830,6 +2830,7 @@ func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
require.NoError(b, err)
// insert 100k boxes, in batches of 1000
+ cnt := 0
for batch := 0; batch <= batchCount; batch++ {
fmt.Printf("%d / %d boxes written\n", totalBoxes*batch/batchCount, totalBoxes)
@@ -2838,8 +2839,9 @@ func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
writer, err := makeAccountsSQLWriter(tx, false, false, true, false)
require.NoError(b, err)
for boxIdx := 0; boxIdx < totalBoxes/batchCount; boxIdx++ {
- err = writer.upsertKvPair(fmt.Sprintf("%d-%d", batch, boxIdx), string(make([]byte, boxSize)))
+ err = writer.upsertKvPair(fmt.Sprintf("%d", cnt), string(make([]byte, boxSize)))
require.NoError(b, err)
+ cnt++
}
err = tx.Commit()
@@ -2855,51 +2857,56 @@ func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
defer cleanup()
b.Run("ReadBoxes", func(b *testing.B) {
- cnt := 0
- for i := 0; i < batchCount; i++ {
- for j := 0; j < totalBoxes/batchCount; j++ {
- lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
+ b.StopTimer()
+ for i := 0; i < totalBoxes; i++ {
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
- var pv persistedValue
- var v sql.NullString
+ var pv persistedValue
+ var v sql.NullString
- b.StartTimer()
- err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d-%d", i, j))).Scan(&pv.round, &v)
- b.StopTimer()
- require.NoError(b, err)
- require.True(b, v.Valid)
- cnt++
- if cnt >= b.N {
- return
- }
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", i))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ if i >= b.N {
+ return
}
}
})
b.Run("ReadBoxesDuplicate", func(b *testing.B) {
- cnt := 0
- for i := 0; i < batchCount; i++ {
- for j := 0; j < totalBoxes/batchCount; j++ {
- lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
+ b.StopTimer()
+ for i := 0; i < totalBoxes; i++ {
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
- var pv persistedValue
- var v sql.NullString
+ var pv persistedValue
+ var v sql.NullString
- err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d-%d", i, j))).Scan(&pv.round, &v)
- require.NoError(b, err)
- require.True(b, v.Valid)
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", i))).Scan(&pv.round, &v)
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ if i >= b.N {
+ return
+ }
+ }
- b.StartTimer()
- err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d-%d", i, j))).Scan(&pv.round, &v)
- b.StopTimer()
- require.NoError(b, err)
- require.True(b, v.Valid)
- cnt++
- if cnt >= b.N {
- return
- }
+ for i := 0; i < totalBoxes; i++ {
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+
+ var pv persistedValue
+ var v sql.NullString
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", i))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+
+ if i >= b.N {
+ return
}
}
})
From 3d4e105233777458a10e7d8c6e9c64e68489da0e Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Wed, 13 Jul 2022 11:32:48 -0400
Subject: [PATCH 04/21] benchmark across different dimensions: boxSize,
boxCount, and lookback
---
ledger/accountdb_test.go | 182 +++++++++++++++++++++------------------
1 file changed, 100 insertions(+), 82 deletions(-)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 8163b081cc..1f870cbbcb 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -2808,106 +2808,124 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
}
}
-func BenchmarkBoxDatabaseReadWrite(b *testing.B) {
- totalBoxes := 100000
+func initBoxDatabase(b *testing.B, totalBoxes, boxSize int) (db.Pair, func(), error) {
batchCount := 100
- boxSize := 4 * 8096
- initDatabase := func() (db.Pair, func(), error) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- dbs, fn := dbOpenTest(b, false)
- setDbLogging(b, dbs)
- cleanup := func() {
- cleanupTestDb(dbs, fn, false)
- }
+ if batchCount > totalBoxes {
+ batchCount = 1
+ }
- tx, err := dbs.Wdb.Handle.Begin()
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ dbs, fn := dbOpenTest(b, false)
+ setDbLogging(b, dbs)
+ cleanup := func() {
+ cleanupTestDb(dbs, fn, false)
+ }
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ _, err = accountsInit(tx, make(map[basics.Address]basics.AccountData), proto)
+ require.NoError(b, err)
+ err = tx.Commit()
+ require.NoError(b, err)
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false)
+ require.NoError(b, err)
+
+ cnt := 0
+ for batch := 0; batch <= batchCount; batch++ {
+ tx, err = dbs.Wdb.Handle.Begin()
require.NoError(b, err)
- _, err = accountsInit(tx, make(map[basics.Address]basics.AccountData), proto)
+ writer, err := makeAccountsSQLWriter(tx, false, false, true, false)
require.NoError(b, err)
+ for boxIdx := 0; boxIdx < totalBoxes/batchCount; boxIdx++ {
+ err = writer.upsertKvPair(fmt.Sprintf("%d", cnt), string(make([]byte, boxSize)))
+ require.NoError(b, err)
+ cnt++
+ }
+
err = tx.Commit()
require.NoError(b, err)
- err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false)
- require.NoError(b, err)
-
- // insert 100k boxes, in batches of 1000
- cnt := 0
- for batch := 0; batch <= batchCount; batch++ {
- fmt.Printf("%d / %d boxes written\n", totalBoxes*batch/batchCount, totalBoxes)
-
- tx, err = dbs.Wdb.Handle.Begin()
- require.NoError(b, err)
- writer, err := makeAccountsSQLWriter(tx, false, false, true, false)
- require.NoError(b, err)
- for boxIdx := 0; boxIdx < totalBoxes/batchCount; boxIdx++ {
- err = writer.upsertKvPair(fmt.Sprintf("%d", cnt), string(make([]byte, boxSize)))
- require.NoError(b, err)
- cnt++
- }
+ writer.close()
+ }
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeFull, true)
+ return dbs, cleanup, err
+}
- err = tx.Commit()
- require.NoError(b, err)
- writer.close()
+func BenchmarkBoxDatabaseRead(b *testing.B) {
+ getBoxNamePermutation := func(totalBoxes int) []int {
+ rand.Seed(time.Now().UnixNano())
+ boxNames := make([]int, totalBoxes)
+ for i := 0; i < totalBoxes; i++ {
+ boxNames[i] = i
}
- err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeFull, true)
- return dbs, cleanup, err
+ rand.Shuffle(len(boxNames), func(x, y int) { boxNames[x], boxNames[y] = boxNames[y], boxNames[x] })
+ return boxNames
}
- dbs, cleanup, err := initDatabase()
- require.NoError(b, err)
- defer cleanup()
+ boxCnt := []int{10, 1000, 100000}
+ boxSizes := []int{2, 2048, 4 * 8096}
+ for _, totalBoxes := range boxCnt {
+ for _, boxSize := range boxSizes {
+ b.Run(fmt.Sprintf("totalBoxes=%d/boxSize=%d", totalBoxes, boxSize), func(b *testing.B) {
+ b.StopTimer()
- b.Run("ReadBoxes", func(b *testing.B) {
- b.StopTimer()
- for i := 0; i < totalBoxes; i++ {
- lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
+ dbs, cleanup, err := initBoxDatabase(b, totalBoxes, boxSize)
+ require.NoError(b, err)
- var pv persistedValue
- var v sql.NullString
+ boxNames := getBoxNamePermutation(totalBoxes)
+ var v sql.NullString
+ for i := 0; i < b.N; i++ {
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+
+ var pv persistedValue
+ boxName := boxNames[i%totalBoxes]
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ }
- b.StartTimer()
- err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", i))).Scan(&pv.round, &v)
- b.StopTimer()
- require.NoError(b, err)
- require.True(b, v.Valid)
- if i >= b.N {
- return
- }
+ cleanup()
+ })
}
- })
-
- b.Run("ReadBoxesDuplicate", func(b *testing.B) {
- b.StopTimer()
- for i := 0; i < totalBoxes; i++ {
- lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
+ }
- var pv persistedValue
- var v sql.NullString
+ lookbacks := []int{2, 32, 256, 2048}
+ for _, lookback := range lookbacks {
+ for _, boxSize := range boxSizes {
+ totalBoxes := 100000
- err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", i))).Scan(&pv.round, &v)
- require.NoError(b, err)
- require.True(b, v.Valid)
- if i >= b.N {
- return
- }
- }
+ b.Run(fmt.Sprintf("lookback=%d/boxSize=%d", lookback, boxSize), func(b *testing.B) {
+ b.StopTimer()
- for i := 0; i < totalBoxes; i++ {
- lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
+ dbs, cleanup, err := initBoxDatabase(b, totalBoxes, boxSize)
+ require.NoError(b, err)
- var pv persistedValue
- var v sql.NullString
- b.StartTimer()
- err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", i))).Scan(&pv.round, &v)
- b.StopTimer()
- require.NoError(b, err)
- require.True(b, v.Valid)
+ boxNames := getBoxNamePermutation(totalBoxes)
+ var v sql.NullString
+ for i := 0; i < b.N+lookback; i++ {
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+
+ var pv persistedValue
+ boxName := boxNames[i%totalBoxes]
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+
+ if i >= lookback {
+ boxName = boxNames[(i-lookback)%totalBoxes]
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ }
+ }
- if i >= b.N {
- return
- }
+ cleanup()
+ })
}
- })
+ }
}
From 68c66cb08499cdb0a258829664ccc027e530c5b2 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Thu, 14 Jul 2022 16:27:54 -0400
Subject: [PATCH 05/21] improve cache performance
---
ledger/accountdb_test.go | 40 +++++++++++++++++++++++++++++++++-------
1 file changed, 33 insertions(+), 7 deletions(-)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 1f870cbbcb..a9e7901cb4 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -2808,6 +2808,34 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
}
}
+func BenchmarkLRUResources(b *testing.B) {
+ b.StopTimer()
+ var baseResources lruResources
+ baseResources.init(nil, 1000, 850)
+
+ var data persistedResourcesData
+ var has bool
+ addrs := make([]basics.Address, 850)
+ for i := 0; i < 850; i++ {
+ data.data.ApprovalProgram = make([]byte, 8096*4)
+ data.aidx = basics.CreatableIndex(1)
+ addrBytes := ([]byte(fmt.Sprintf("%d", i)))[:32]
+ var addr basics.Address
+ for i, b := range addrBytes {
+ addr[i] = b
+ }
+ addrs[i] = addr
+ baseResources.write(data, addr)
+ }
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ pos := i % 850
+ data, has = baseResources.read(addrs[pos], basics.CreatableIndex(1))
+ require.True(b, has)
+ }
+}
+
func initBoxDatabase(b *testing.B, totalBoxes, boxSize int) (db.Pair, func(), error) {
batchCount := 100
if batchCount > totalBoxes {
@@ -2872,11 +2900,10 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
require.NoError(b, err)
boxNames := getBoxNamePermutation(totalBoxes)
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
var v sql.NullString
for i := 0; i < b.N; i++ {
- lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
-
var pv persistedValue
boxName := boxNames[i%totalBoxes]
b.StartTimer()
@@ -2891,7 +2918,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
}
}
- lookbacks := []int{2, 32, 256, 2048}
+ lookbacks := []int{1, 32, 256, 2048}
for _, lookback := range lookbacks {
for _, boxSize := range boxSizes {
totalBoxes := 100000
@@ -2903,11 +2930,10 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
require.NoError(b, err)
boxNames := getBoxNamePermutation(totalBoxes)
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
var v sql.NullString
for i := 0; i < b.N+lookback; i++ {
- lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
- require.NoError(b, err)
-
var pv persistedValue
boxName := boxNames[i%totalBoxes]
err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
From e64f659c83ba2da46fa7117f4393b7f4644aa339 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Fri, 15 Jul 2022 11:52:08 -0400
Subject: [PATCH 06/21] comments
---
ledger/accountdb_test.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index a9e7901cb4..61e93d6e5b 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -2918,6 +2918,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
}
}
+ // test caching performance
lookbacks := []int{1, 32, 256, 2048}
for _, lookback := range lookbacks {
for _, boxSize := range boxSizes {
@@ -2940,6 +2941,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
require.NoError(b, err)
require.True(b, v.Valid)
+ // benchmark reading the potentially cached value that was read lookback boxes ago
if i >= lookback {
boxName = boxNames[(i-lookback)%totalBoxes]
b.StartTimer()
From ffe15850a887faef0ec6c578efd50a93d6675ef6 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Wed, 20 Jul 2022 12:15:21 -0400
Subject: [PATCH 07/21] implement box cache and corresponding unit tests
---
ledger/accountdb.go | 22 ++-
ledger/accountdb_test.go | 4 +-
ledger/acctupdates.go | 26 +++-
ledger/lruboxes.go | 132 ++++++++++++++++
ledger/lruboxes_test.go | 240 +++++++++++++++++++++++++++++
ledger/persistedboxes_list.go | 143 +++++++++++++++++
ledger/persistedboxes_list_test.go | 175 +++++++++++++++++++++
7 files changed, 732 insertions(+), 10 deletions(-)
create mode 100644 ledger/lruboxes.go
create mode 100644 ledger/lruboxes_test.go
create mode 100644 ledger/persistedboxes_list.go
create mode 100644 ledger/persistedboxes_list_test.go
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 480b4cf133..5c2ec9bb9e 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -207,6 +207,15 @@ func (prd *persistedResourcesData) AccountResource() ledgercore.AccountResource
return ret
}
+//msgp:ignore persistedBoxData
+type persistedBoxData struct {
+ // box value
+ value *string
+ // the round number that is associated with the resourcesData. This field is the corresponding one to the round field
+ // in persistedAccountData, and serves the same purpose.
+ round basics.Round
+}
+
// resourceDelta is used as part of the compactResourcesDeltas to describe a change to a single resource.
type resourceDelta struct {
oldResource persistedResourcesData
@@ -1968,12 +1977,7 @@ func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxRes
return
}
-type persistedValue struct {
- value *string
- round basics.Round
-}
-
-func (qs *accountsDbQueries) lookupKeyValue(key string) (pv persistedValue, err error) {
+func (qs *accountsDbQueries) lookupKeyValue(key string) (pv persistedBoxData, err error) {
err = db.Retry(func() error {
var v sql.NullString
// Cast to []byte to avoid interpretation as character string, see note in upsertKvPair
@@ -3708,3 +3712,9 @@ func (pac *persistedAccountData) before(other *persistedAccountData) bool {
func (prd *persistedResourcesData) before(other *persistedResourcesData) bool {
return prd.round < other.round
}
+
+// before compares the round numbers of two persistedBoxData and determines if the current persistedBoxData
+// happened before the other.
+func (prd *persistedBoxData) before(other *persistedBoxData) bool {
+ return prd.round < other.round
+}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 61e93d6e5b..d598caabbf 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -2904,7 +2904,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
require.NoError(b, err)
var v sql.NullString
for i := 0; i < b.N; i++ {
- var pv persistedValue
+ var pv persistedBoxData
boxName := boxNames[i%totalBoxes]
b.StartTimer()
err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
@@ -2935,7 +2935,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
require.NoError(b, err)
var v sql.NullString
for i := 0; i < b.N+lookback; i++ {
- var pv persistedValue
+ var pv persistedBoxData
boxName := boxNames[i%totalBoxes]
err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
require.NoError(b, err)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 4c2d60c6fe..f77454ae06 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -210,6 +210,9 @@ type accountUpdates struct {
// baseResources stores the most recently used resources, at exactly dbRound
baseResources lruResources
+ // baseBoxes stores the most recently used box, at exactly dbRound
+ baseBoxes lruBoxes
+
// logAccountUpdatesMetrics is a flag for enable/disable metrics logging
logAccountUpdatesMetrics bool
@@ -309,6 +312,7 @@ func (au *accountUpdates) close() {
au.baseAccounts.prune(0)
au.baseResources.prune(0)
+ au.baseBoxes.prune(0)
}
// LookupOnlineAccountData returns the online account data for a given address at a given round.
@@ -378,7 +382,13 @@ func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bo
rnd = currentDbRound + basics.Round(currentDeltaLen)
}
- // OTHER LOOKUPS USE "base" caches here.
+ // check the baseBoxes cache
+ if pbd, has := au.baseBoxes.read(key); has {
+ // we don't technically need this, since it's already in the baseBoxes, however, writing this over
+ // would ensure that we promote this field.
+ au.baseBoxes.writePending(pbd, key)
+ return pbd.value, nil
+ }
if synchronized {
au.accountsMu.RUnlock()
@@ -391,7 +401,13 @@ func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bo
persistedData, err := au.accountsq.lookupKeyValue(key)
if persistedData.round == currentDbRound {
- return persistedData.value, nil
+ if persistedData.value != nil {
+ // if we read actual data return it
+ au.baseBoxes.writePending(persistedData, key)
+ return persistedData.value, err
+ }
+ // otherwise return empty
+ return nil, err
}
// The db round is unexpected...
@@ -1040,6 +1056,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
+ au.baseBoxes.init(au.log, 0, 0) // todo
return
}
@@ -1064,6 +1081,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.flushPendingWrites()
au.baseResources.flushPendingWrites()
+ au.baseBoxes.flushPendingWrites()
for i := 0; i < delta.Accts.Len(); i++ {
addr, data := delta.Accts.GetByIdx(i)
@@ -1118,6 +1136,9 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.prune(newBaseAccountSize)
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
+ // todo:
+ newBaseBoxesSize := (len(au.kvStore) + 1)
+ au.baseBoxes.prune(newBaseBoxesSize)
if au.voters != nil {
au.voters.newBlock(blk.BlockHeader)
@@ -2046,6 +2067,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
// rowid are flushed.
au.baseAccounts.prune(0)
au.baseResources.prune(0)
+ au.baseBoxes.prune(0)
startTime := time.Now()
vacuumExitCh := make(chan struct{}, 1)
diff --git a/ledger/lruboxes.go b/ledger/lruboxes.go
new file mode 100644
index 0000000000..effa1528a7
--- /dev/null
+++ b/ledger/lruboxes.go
@@ -0,0 +1,132 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+import (
+ "github.com/algorand/go-algorand/logging"
+)
+
+//msgp:ignore cachedResourceData
+type cachedBoxData struct {
+ persistedBoxData
+
+ // box name
+ key string
+}
+
+// lruBoxes provides a storage class for the most recently used box data.
+// It doesn't have any synchronization primitive on it's own and require to be
+// syncronized by the caller.
+type lruBoxes struct {
+ // boxList contain the list of persistedBoxData, where the front ones are the most "fresh"
+ // and the ones on the back are the oldest.
+ boxList *persistedBoxesDataList
+
+ // boxes provides fast access to the various elements in the list by using the key
+ boxes map[string]*persistedBoxesDataListNode
+
+ // pendingBoxes are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these,
+ // it would call flushPendingWrites and these would be merged into the boxes/boxesList
+ pendingBoxes chan cachedBoxData
+
+ // log interface; used for logging the threshold event.
+ log logging.Logger
+
+ // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingBoxes entries
+ pendingWritesWarnThreshold int
+}
+
+// init initializes the lruBoxes for use.
+// thread locking semantics : write lock
+func (m *lruBoxes) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) {
+ m.boxList = newPersistedBoxList().allocateFreeNodes(pendingWrites)
+ m.boxes = make(map[string]*persistedBoxesDataListNode, pendingWrites)
+ m.pendingBoxes = make(chan cachedBoxData, pendingWrites)
+ m.log = log
+ m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
+}
+
+// read the persistedBoxesData object that the lruBoxes has for the given key.
+// thread locking semantics : read lock
+func (m *lruBoxes) read(key string) (data persistedBoxData, has bool) {
+ if el := m.boxes[key]; el != nil {
+ return el.Value.persistedBoxData, true
+ }
+ return persistedBoxData{}, false
+}
+
+// flushPendingWrites flushes the pending writes to the main lruBoxes cache.
+// thread locking semantics : write lock
+func (m *lruBoxes) flushPendingWrites() {
+ pendingEntriesCount := len(m.pendingBoxes)
+ if pendingEntriesCount >= m.pendingWritesWarnThreshold {
+ m.log.Warnf("lruBoxes: number of entries in pendingBoxes(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
+ }
+ for ; pendingEntriesCount > 0; pendingEntriesCount-- {
+ select {
+ case pendingBoxData := <-m.pendingBoxes:
+ m.write(pendingBoxData.persistedBoxData, pendingBoxData.key)
+ default:
+ return
+ }
+ }
+}
+
+// writePending write a single persistedBoxData entry to the pendingBoxes buffer.
+// the function doesn't block, and in case of a buffer overflow the entry would not be added.
+// thread locking semantics : no lock is required.
+func (m *lruBoxes) writePending(box persistedBoxData, key string) {
+ select {
+ case m.pendingBoxes <- cachedBoxData{persistedBoxData: box, key: key}:
+ default:
+ }
+}
+
+// write a single persistedBoxData to the lruBoxes cache.
+// when writing the entry, the round number would be used to determine if it's a newer
+// version of what's already on the cache or not. In all cases, the entry is going
+// to be promoted to the front of the list.
+// thread locking semantics : write lock
+func (m *lruBoxes) write(boxData persistedBoxData, key string) {
+ if el := m.boxes[key]; el != nil {
+ // already exists; is it a newer ?
+ if el.Value.before(&boxData) {
+ // we update with a newer version.
+ el.Value = &cachedBoxData{persistedBoxData: boxData, key: key}
+ }
+ m.boxList.moveToFront(el)
+ } else {
+ // new entry.
+ m.boxes[key] = m.boxList.pushFront(&cachedBoxData{persistedBoxData: boxData, key: key})
+ }
+}
+
+// prune adjust the current size of the lruBoxes cache, by dropping the least
+// recently used entries.
+// thread locking semantics : write lock
+func (m *lruBoxes) prune(newSize int) (removed int) {
+ for {
+ if len(m.boxes) <= newSize {
+ break
+ }
+ back := m.boxList.back()
+ delete(m.boxes, back.Value.key)
+ m.boxList.remove(back)
+ removed++
+ }
+ return
+}
diff --git a/ledger/lruboxes_test.go b/ledger/lruboxes_test.go
new file mode 100644
index 0000000000..6bf3e38a01
--- /dev/null
+++ b/ledger/lruboxes_test.go
@@ -0,0 +1,240 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestLRUBasicBoxes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseBoxes lruBoxes
+ baseBoxes.init(logging.TestingLog(t), 10, 5)
+
+ boxNum := 50
+ // write 50 boxes
+ for i := 0; i < boxNum; i++ {
+ boxValue := fmt.Sprintf("box %d value", i)
+ box := persistedBoxData{
+ value: &boxValue,
+ round: basics.Round(i),
+ }
+ baseBoxes.write(box, fmt.Sprintf("key%d", i))
+ }
+
+ // verify that all these boxes are truly there.
+ for i := 0; i < boxNum; i++ {
+ box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), box.round)
+ require.Equal(t, fmt.Sprintf("box %d value", i), *(box.value))
+ }
+
+ // verify expected missing entries
+ for i := boxNum; i < boxNum*2; i++ {
+ box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedBoxData{}, box)
+ }
+
+ baseBoxes.prune(boxNum / 2)
+
+ // verify expected (missing/existing) entries
+ for i := 0; i < boxNum*2; i++ {
+ box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
+
+ if i >= boxNum/2 && i < boxNum {
+ // expected to have it.
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), box.round)
+ require.Equal(t, fmt.Sprintf("box %d value", i), *(box.value))
+ } else {
+ require.False(t, has)
+ require.Equal(t, persistedBoxData{}, box)
+ }
+ }
+}
+
+func TestLRUBoxesPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseBoxes lruBoxes
+ boxesNum := 250
+ baseBoxes.init(logging.TestingLog(t), boxesNum*2, boxesNum)
+
+ for i := 0; i < boxesNum; i++ {
+ go func(i int) {
+ time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond)
+ boxValue := fmt.Sprintf("box %d value", i)
+ box := persistedBoxData{
+ value: &boxValue,
+ round: basics.Round(i),
+ }
+ baseBoxes.writePending(box, fmt.Sprintf("key%d", i))
+ }(i)
+ }
+ testStarted := time.Now()
+ for {
+ baseBoxes.flushPendingWrites()
+
+ // check if all boxes were loaded into "main" cache.
+ allBoxesLoaded := true
+ for i := 0; i < boxesNum; i++ {
+ _, has := baseBoxes.read(fmt.Sprintf("key%d", i))
+ if !has {
+ allBoxesLoaded = false
+ break
+ }
+ }
+ if allBoxesLoaded {
+ break
+ }
+ if time.Since(testStarted).Seconds() > 20 {
+ require.Fail(t, "failed after waiting for 20 second")
+ }
+ // not yet, keep looping.
+ }
+}
+
+type lruBoxesTestLogger struct {
+ logging.Logger
+ WarnfCallback func(string, ...interface{})
+ warnMsgCount int
+}
+
+func (cl *lruBoxesTestLogger) Warnf(s string, args ...interface{}) {
+ cl.warnMsgCount++
+}
+
+func TestLRUBoxesPendingWritesWarning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseBoxes lruBoxes
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruBoxesTestLogger{Logger: logging.TestingLog(t)}
+ baseBoxes.init(log, pendingWritesBuffer, pendingWritesThreshold)
+ for j := 0; j < 50; j++ {
+ for i := 0; i < j; i++ {
+ boxValue := fmt.Sprintf("box %d value", i)
+ box := persistedBoxData{
+ value: &boxValue,
+ round: basics.Round(i),
+ }
+ baseBoxes.writePending(box, fmt.Sprintf("key%d", i))
+ }
+ baseBoxes.flushPendingWrites()
+ if j >= pendingWritesThreshold {
+ // expect a warning in the log
+ require.Equal(t, 1+j-pendingWritesThreshold, log.warnMsgCount)
+ }
+ }
+}
+
+func TestLRUBoxesOmittedPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseBoxes lruBoxes
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruBoxesTestLogger{Logger: logging.TestingLog(t)}
+ baseBoxes.init(log, pendingWritesBuffer, pendingWritesThreshold)
+
+ for i := 0; i < pendingWritesBuffer*2; i++ {
+ boxValue := fmt.Sprintf("box %d value", i)
+ box := persistedBoxData{
+ value: &boxValue,
+ round: basics.Round(i),
+ }
+ baseBoxes.writePending(box, fmt.Sprintf("key%d", i))
+ }
+
+ baseBoxes.flushPendingWrites()
+
+ // verify that all these boxes are truly there.
+ for i := 0; i < pendingWritesBuffer; i++ {
+ box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), box.round)
+ require.Equal(t, fmt.Sprintf("box %d value", i), *(box.value))
+ }
+
+ // verify expected missing entries
+ for i := pendingWritesBuffer; i < pendingWritesBuffer*2; i++ {
+ box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedBoxData{}, box)
+ }
+}
+
+func BenchmarkLRUBoxesWrite(b *testing.B) {
+ numTestBoxes := 5000
+ // there are 2500 boxes that overlap
+ fillerBoxes := generatePersistedBoxesData(0, 97500)
+ boxes := generatePersistedBoxesData(97500-numTestBoxes/2, 97500+numTestBoxes/2)
+
+ benchLruWriteBoxes(b, fillerBoxes, boxes)
+}
+
+func benchLruWriteBoxes(b *testing.B, fillerBoxes []cachedBoxData, boxes []cachedBoxData) {
+ b.ResetTimer()
+ b.StopTimer()
+ var baseBoxes lruBoxes
+ // setting up the baseBoxes with a predefined cache size
+ baseBoxes.init(logging.TestingLog(b), baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold) // todo
+ for i := 0; i < b.N; i++ {
+ baseBoxes = fillLRUBoxes(baseBoxes, fillerBoxes)
+
+ b.StartTimer()
+ fillLRUBoxes(baseBoxes, boxes)
+ b.StopTimer()
+ baseBoxes.prune(0)
+ }
+}
+
+func fillLRUBoxes(baseBoxes lruBoxes, fillerBoxes []cachedBoxData) lruBoxes {
+ for _, entry := range fillerBoxes {
+ baseBoxes.write(entry.persistedBoxData, entry.key)
+ }
+ return baseBoxes
+}
+
+func generatePersistedBoxesData(startRound, endRound int) []cachedBoxData {
+ boxes := make([]cachedBoxData, endRound-startRound)
+ for i := startRound; i < endRound; i++ {
+ boxValue := fmt.Sprintf("box %d value", i)
+
+ boxes[i-startRound] = cachedBoxData{
+ persistedBoxData: persistedBoxData{
+ value: &boxValue,
+ round: basics.Round(i + startRound),
+ },
+ key: fmt.Sprintf("key%d", i),
+ }
+ }
+ return boxes
+}
diff --git a/ledger/persistedboxes_list.go b/ledger/persistedboxes_list.go
new file mode 100644
index 0000000000..153e06d7d1
--- /dev/null
+++ b/ledger/persistedboxes_list.go
@@ -0,0 +1,143 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+// persistedBoxesDataList represents a doubly linked list.
+// TODO: must initiate with newPersistedAccountList.
+type persistedBoxesDataList struct {
+ root persistedBoxesDataListNode // sentinel list element, only &root, root.prev, and root.next are used
+ freeList *persistedBoxesDataListNode // preallocated nodes location
+}
+
+type persistedBoxesDataListNode struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *persistedBoxesDataListNode
+
+ Value *cachedBoxData
+}
+
+func newPersistedBoxList() *persistedBoxesDataList {
+ l := new(persistedBoxesDataList)
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ // used as a helper but does not store value
+ l.freeList = new(persistedBoxesDataListNode)
+
+ return l
+}
+
+func (l *persistedBoxesDataList) insertNodeToFreeList(otherNode *persistedBoxesDataListNode) {
+ otherNode.next = l.freeList.next
+ otherNode.prev = nil
+ otherNode.Value = nil
+
+ l.freeList.next = otherNode
+}
+
+func (l *persistedBoxesDataList) getNewNode() *persistedBoxesDataListNode {
+ if l.freeList.next == nil {
+ return new(persistedBoxesDataListNode)
+ }
+ newNode := l.freeList.next
+ l.freeList.next = newNode.next
+
+ return newNode
+}
+
+func (l *persistedBoxesDataList) allocateFreeNodes(numAllocs int) *persistedBoxesDataList {
+ if l.freeList == nil {
+ return l
+ }
+ for i := 0; i < numAllocs; i++ {
+ l.insertNodeToFreeList(new(persistedBoxesDataListNode))
+ }
+
+ return l
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *persistedBoxesDataList) back() *persistedBoxesDataListNode {
+ isEmpty := func(list *persistedBoxesDataList) bool {
+ // assumes we are inserting correctly to the list - using pushFront.
+ return list.root.next == &list.root
+ }
+ if isEmpty(l) {
+ return nil
+ }
+ return l.root.prev
+}
+
+// remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *persistedBoxesDataList) remove(e *persistedBoxesDataListNode) {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+
+ l.insertNodeToFreeList(e)
+}
+
+// pushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *persistedBoxesDataList) pushFront(v *cachedBoxData) *persistedBoxesDataListNode {
+ newNode := l.getNewNode()
+ newNode.Value = v
+ return l.insertValue(newNode, &l.root)
+}
+
+// insertValue inserts e after at, increments l.len, and returns e.
+func (l *persistedBoxesDataList) insertValue(newNode *persistedBoxesDataListNode, at *persistedBoxesDataListNode) *persistedBoxesDataListNode {
+ n := at.next
+ at.next = newNode
+ newNode.prev = at
+ newNode.next = n
+ n.prev = newNode
+
+ return newNode
+}
+
+// moveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *persistedBoxesDataList) moveToFront(e *persistedBoxesDataListNode) {
+ if l.root.next == e {
+ return
+ }
+ l.move(e, &l.root)
+}
+
+// move moves e to next to at and returns e.
+func (l *persistedBoxesDataList) move(e, at *persistedBoxesDataListNode) *persistedBoxesDataListNode {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ n := at.next
+ at.next = e
+ e.prev = at
+ e.next = n
+ n.prev = e
+
+ return e
+}
diff --git a/ledger/persistedboxes_list_test.go b/ledger/persistedboxes_list_test.go
new file mode 100644
index 0000000000..021e290a54
--- /dev/null
+++ b/ledger/persistedboxes_list_test.go
@@ -0,0 +1,175 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func (l *persistedBoxesDataList) getRoot() dataListNode {
+ return &l.root
+}
+
+func (l *persistedBoxesDataListNode) getNext() dataListNode {
+ // get rid of returning nil wrapped into an interface to let i = x.getNext(); i != nil work.
+ if l.next == nil {
+ return nil
+ }
+ return l.next
+}
+
+func (l *persistedBoxesDataListNode) getPrev() dataListNode {
+ if l.prev == nil {
+ return nil
+ }
+ return l.prev
+}
+
+// inspect that the list seems like the array
+func checkListPointersBD(t *testing.T, l *persistedBoxesDataList, es []*persistedBoxesDataListNode) {
+ es2 := make([]dataListNode, len(es))
+ for i, el := range es {
+ es2[i] = el
+ }
+
+ checkListPointers(t, l, es2)
+}
+
+func TestRemoveFromListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedBoxList()
+ e1 := l.pushFront(&cachedBoxData{key: "key1"})
+ e2 := l.pushFront(&cachedBoxData{key: "key2"})
+ e3 := l.pushFront(&cachedBoxData{key: "key3"})
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e2, e1})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e1})
+ l.remove(e3)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1})
+}
+
+func TestAddingNewNodeWithAllocatedFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedBoxList().allocateFreeNodes(10)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
+ if countListSize(l.freeList) != 10 {
+ t.Errorf("free list did not allocate nodes")
+ return
+ }
+ // test elements
+ e1 := l.pushFront(&cachedBoxData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1})
+
+ if countListSize(l.freeList) != 9 {
+ t.Errorf("free list did not provide a node on new list entry")
+ return
+ }
+}
+
+func TestMultielementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedBoxList()
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
+ // test elements
+ e2 := l.pushFront(&cachedBoxData{key: "key1"})
+ e1 := l.pushFront(&cachedBoxData{key: "key2"})
+ e3 := l.pushFront(&cachedBoxData{key: "key3"})
+ e4 := l.pushFront(&cachedBoxData{key: "key4"})
+ e5 := l.pushFront(&cachedBoxData{key: "key5"})
+
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e5, e4, e3, e1, e2})
+
+ l.move(e4, e1)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e5, e3, e1, e4, e2})
+
+ l.remove(e5)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e1, e4, e2})
+
+ l.move(e1, e4) // swap in middle
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e4, e1, e2})
+
+ l.moveToFront(e4)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e4, e3, e1, e2})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e4, e3, e1})
+
+ l.moveToFront(e3) // move from middle
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e4, e1})
+
+ l.moveToFront(e1) // move from end
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1, e3, e4})
+
+ l.moveToFront(e1) // no movement
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1, e3, e4})
+
+ e2 = l.pushFront(&cachedBoxData{key: "key2"})
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1, e3, e4})
+
+ l.remove(e3) // removing from middle
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1, e4})
+
+ l.remove(e4) // removing from end
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1})
+
+ l.move(e2, e1) // swapping between two elements
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1, e2})
+
+ l.remove(e1) // removing front
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2})
+
+ l.move(e2, l.back()) // swapping element with itself.
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2})
+
+ l.remove(e2) // remove last one
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
+}
+
+func TestSingleElementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedBoxList()
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
+ e := l.pushFront(&cachedBoxData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e})
+ l.moveToFront(e)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e})
+ l.remove(e)
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
+}
+
+func TestRemovedNodeShouldBeMovedToFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedBoxList()
+ e1 := l.pushFront(&cachedBoxData{key: "key1"})
+ e2 := l.pushFront(&cachedBoxData{key: "key2"})
+
+ checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1})
+
+ e := l.back()
+ l.remove(e)
+
+ for i := l.freeList.next; i != nil; i = i.next {
+ if i == e {
+ // stopping the tst with good results:
+ return
+ }
+ }
+ t.Error("expected the removed node to appear at the freelist")
+}
From fcfd07535549bdd2290dc3bc86b6b900d3509f09 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Wed, 20 Jul 2022 13:00:50 -0400
Subject: [PATCH 08/21] resolve todos
---
ledger/accountdb.go | 2 +-
ledger/acctupdates.go | 14 +++++++++++---
2 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index e8c467ec1b..181b1e79ab 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -271,7 +271,7 @@ func (prd *persistedResourcesData) AccountResource() ledgercore.AccountResource
type persistedBoxData struct {
// box value
value *string
- // the round number that is associated with the resourcesData. This field is the corresponding one to the round field
+ // the round number that is associated with the box value. This field is the corresponding one to the round field
// in persistedAccountData, and serves the same purpose.
round basics.Round
}
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 26e963044b..ccea6e0e90 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -68,6 +68,15 @@ const baseResourcesPendingAccountsBufferSize = 100000
// is being flushed into the main base resources cache.
const baseResourcesPendingAccountsWarnThreshold = 85000
+// baseBoxesPendingAccountsBufferSize defines the size of the base boxes pending buffer size.
+// At the beginning of a new round, the entries from this buffer are being flushed into the base boxes map.
+const baseBoxesPendingAccountsBufferSize = 50000
+
+// baseBoxesPendingAccountsWarnThreshold defines the threshold at which the lruBoxes would generate a warning
+// after we've surpassed a given pending boxes size. The warning is being generated when the pending box data
+// is being flushed into the main base boxes cache.
+const baseBoxesPendingAccountsWarnThreshold = 42500
+
// initializeCachesReadaheadBlocksStream defines how many block we're going to attempt to queue for the
// initializeCaches method before it can process and store the account changes to disk.
const initializeCachesReadaheadBlocksStream = 4
@@ -927,7 +936,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
- au.baseBoxes.init(au.log, 0, 0) // todo
+ au.baseBoxes.init(au.log, baseBoxesPendingAccountsBufferSize, baseBoxesPendingAccountsWarnThreshold)
return
}
@@ -1007,8 +1016,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.prune(newBaseAccountSize)
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
-
- newBaseBoxesSize := (len(au.kvStore) + 1)
+ newBaseBoxesSize := (len(au.kvStore) + 1) + baseBoxesPendingAccountsBufferSize
au.baseBoxes.prune(newBaseBoxesSize)
}
From abaf6ca575a9589e3791ebed53cddf8ccc5f62a3 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Wed, 27 Jul 2022 13:14:24 -0400
Subject: [PATCH 09/21] remove todo
---
ledger/lruboxes_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ledger/lruboxes_test.go b/ledger/lruboxes_test.go
index 6bf3e38a01..9a318ce916 100644
--- a/ledger/lruboxes_test.go
+++ b/ledger/lruboxes_test.go
@@ -205,7 +205,7 @@ func benchLruWriteBoxes(b *testing.B, fillerBoxes []cachedBoxData, boxes []cache
b.StopTimer()
var baseBoxes lruBoxes
// setting up the baseBoxes with a predefined cache size
- baseBoxes.init(logging.TestingLog(b), baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold) // todo
+ baseBoxes.init(logging.TestingLog(b), baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
for i := 0; i < b.N; i++ {
baseBoxes = fillLRUBoxes(baseBoxes, fillerBoxes)
From e15b6da4512a1acb250dec0fcb1baa01520c01e4 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Fri, 29 Jul 2022 10:48:43 -0400
Subject: [PATCH 10/21] fill in missing pieces of cache implementation
---
ledger/accountdb.go | 7 +++++--
ledger/accountdb_test.go | 15 ++++++++++-----
ledger/acctupdates.go | 6 +++++-
ledger/acctupdates_test.go | 6 +++---
ledger/catchpointtracker_test.go | 2 +-
ledger/tracker.go | 1 +
6 files changed, 25 insertions(+), 12 deletions(-)
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 181b1e79ab..a0612be0f5 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -3401,7 +3401,7 @@ func accountsNewRound(
tx *sql.Tx,
updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, err error) {
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedBoxes map[string]persistedBoxData, err error) {
hasAccounts := updates.len() > 0
hasResources := resources.len() > 0
hasKvPairs := len(kvPairs) > 0
@@ -3439,7 +3439,7 @@ func accountsNewRoundImpl(
writer accountsWriter,
updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, err error) {
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedBoxes map[string]persistedBoxData, err error) {
updatedAccounts = make([]persistedAccountData, updates.len())
updatedAccountIdx := 0
newAddressesRowIDs := make(map[basics.Address]int64)
@@ -3637,11 +3637,14 @@ func accountsNewRoundImpl(
}
}
+ updatedBoxes = make(map[string]persistedBoxData)
for key, value := range kvPairs {
if value.data != nil {
err = writer.upsertKvPair(key, *value.data)
+ updatedBoxes[key] = persistedBoxData{value: value.data, round: lastUpdateRound}
} else {
err = writer.deleteKvPair(key)
+ updatedBoxes[key] = persistedBoxData{value: nil, round: lastUpdateRound}
}
if err != nil {
return
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index bd8c4f6d36..15a34bd9a9 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -311,7 +311,7 @@ func TestAccountDBRound(t *testing.T) {
require.NoError(t, err)
expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams)
- updatedAccts, updatesResources, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i))
+ updatedAccts, updatesResources, updatedBoxes, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
numResUpdates := 0
@@ -319,6 +319,7 @@ func TestAccountDBRound(t *testing.T) {
numResUpdates += len(rs)
}
require.Equal(t, resourceUpdatesCnt.len(), numResUpdates)
+ require.Equal(t, 0, len(updatedBoxes))
updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i))
require.NoError(t, err)
@@ -448,7 +449,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
err = outResourcesDeltas.resourcesLoadOld(tx, knownAddresses)
require.NoError(t, err)
- updatedAccts, updatesResources, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound))
+ updatedAccts, updatesResources, updatedBoxes, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound))
require.NoError(t, err)
require.Equal(t, 1, len(updatedAccts)) // we store empty even for deleted accounts
require.Equal(t,
@@ -461,6 +462,8 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
persistedResourcesData{addrid: 0, aidx: 100, data: makeResourcesData(0), round: basics.Round(lastRound)},
updatesResources[addr][0],
)
+
+ require.Equal(t, 0, len(updatedBoxes))
})
}
}
@@ -2885,12 +2888,13 @@ func TestAccountUnorderedUpdates(t *testing.T) {
t.Run(fmt.Sprintf("acct-perm-%d|res-perm-%d", i, j), func(t *testing.T) {
a := require.New(t)
mock2 := mock.clone()
- updatedAccounts, updatedResources, err := accountsNewRoundImpl(
+ updatedAccounts, updatedResources, updatedBoxes, err := accountsNewRoundImpl(
&mock2, acctVariant, resVariant, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
a.Equal(3, len(updatedAccounts))
a.Equal(3, len(updatedResources))
+ a.Equal(0, len(updatedBoxes))
})
}
}
@@ -2967,12 +2971,13 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
a.Equal(1, len(resDeltas.misses)) // (addr2, aidx) does not exist
a.Equal(2, resDeltas.len()) // (addr1, aidx) found
- updatedAccounts, updatedResources, err := accountsNewRoundImpl(
+ updatedAccounts, updatedResources, updatedBoxes, err := accountsNewRoundImpl(
&mock, acctDeltas, resDeltas, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
a.Equal(3, len(updatedAccounts))
a.Equal(2, len(updatedResources))
+ a.Equal(0, len(updatedBoxes))
// one deletion entry for pre-existing account addr1, and one entry for in-memory account addr2
// in base accounts updates and in resources updates
@@ -3266,7 +3271,7 @@ func TestAccountOnlineQueries(t *testing.T) {
err = accountsPutTotals(tx, totals, false)
require.NoError(t, err)
- updatedAccts, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, nil, nil, proto, rnd)
+ updatedAccts, _, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, nil, nil, proto, rnd)
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index ccea6e0e90..31f7eaa811 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -1643,7 +1643,7 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe
// the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
// so that we can update the base account back.
- dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
+ dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, dcc.updatedPersistedBoxes, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
if err != nil {
return err
}
@@ -1734,6 +1734,10 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
}
}
+ for key, persistedBox := range dcc.updatedPersistedBoxes {
+ au.baseBoxes.write(persistedBox, key)
+ }
+
for cidx, modCrt := range dcc.compactCreatableDeltas {
cnt := modCrt.Ndeltas
mcreat, ok := au.creatables[cidx]
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 298b03e527..a01ceab9f7 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -1204,7 +1204,7 @@ func TestListCreatables(t *testing.T) {
// sync with the database
var updates compactAccountDeltas
var resUpdates compactResourcesDeltas
- _, _, err = accountsNewRound(tx, updates, resUpdates, nil, ctbsWithDeletes, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, ctbsWithDeletes, proto, basics.Round(1))
require.NoError(t, err)
// nothing left in cache
au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
@@ -1220,7 +1220,7 @@ func TestListCreatables(t *testing.T) {
// ******* Results are obtained from the database and from the cache *******
// ******* Deletes are in the database and in the cache *******
// sync with the database. This has deletes synced to the database.
- _, _, err = accountsNewRound(tx, updates, resUpdates, nil, au.creatables, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, au.creatables, proto, basics.Round(1))
require.NoError(t, err)
// get new creatables in the cache. There will be deletes in the cache from the previous batch.
au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs,
@@ -1431,7 +1431,7 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
}
err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
return
})
require.NoError(b, err)
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 3195fa8f64..59b2f4d07a 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -385,7 +385,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
i++
}
- _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
if err != nil {
return
}
diff --git a/ledger/tracker.go b/ledger/tracker.go
index dc4603eb48..ae16b61e31 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -247,6 +247,7 @@ type deferredCommitContext struct {
updatedPersistedAccounts []persistedAccountData
updatedPersistedResources map[basics.Address][]persistedResourcesData
+ updatedPersistedBoxes map[string]persistedBoxData
compactOnlineAccountDeltas compactOnlineAccountDeltas
updatedPersistedOnlineAccounts []persistedOnlineAccountData
From 3bbee10f25b43fde2f95910d3a79ab82da222fa0 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Mon, 1 Aug 2022 15:25:47 -0400
Subject: [PATCH 11/21] exercise box cache and enforce invariants through
acctupdates test
---
ledger/acctupdates_test.go | 185 +++++++++++++++++++++++++++++++++++++
1 file changed, 185 insertions(+)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index a01ceab9f7..89f1fc737d 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -1352,6 +1352,191 @@ func TestBoxNamesByAppIDs(t *testing.T) {
}
}
+func TestBoxCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ initialBlocksCount := 1
+ accts := make(map[basics.Address]basics.AccountData)
+
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = 8
+ protoParams.SeedLookback = 1
+ protoParams.SeedRefreshInterval = 1
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestBoxNamesByAppIDs")
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, testProtocolVersion,
+ []map[basics.Address]basics.AccountData{accts},
+ )
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, _ := newAcctUpdates(t, ml, conf, ".")
+ defer au.close()
+
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+ opts := auNewBlockOpts{ledgercore.AccountDeltas{}, testProtocolVersion, protoParams, knownCreatables}
+
+ boxCnt := 1000
+ boxesPerBlock := 100
+ curBox := 0
+ var currentRound basics.Round
+ currentDBRound := basics.Round(1)
+
+ boxMap := make(map[string]string)
+ for i := 0; i < boxCnt; i++ {
+ boxMap[fmt.Sprintf("%d", i)] = fmt.Sprintf("value%d", i)
+ }
+
+ // add boxesPerBlock boxes on each iteration
+ for i := 0; i < boxCnt/boxesPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+
+ kvMods := make(map[string]*string)
+ if i < boxCnt/boxesPerBlock {
+ for j := 0; j < boxesPerBlock; j++ {
+ name := fmt.Sprintf("%d", curBox)
+ curBox++
+ val := boxMap[name]
+ kvMods[name] = &val
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ if uint64(currentRound) > protoParams.MaxBalLookback {
+ require.Equal(t, basics.Round(uint64(currentRound)-protoParams.MaxBalLookback), au.cachedDBRound)
+ } else {
+ require.Equal(t, basics.Round(0), au.cachedDBRound)
+ }
+
+ // verify cache doesn't contain the new boxes until commited to DB
+ if i < boxCnt/boxesPerBlock {
+ for name := range kvMods {
+ _, has := au.baseBoxes.read(name)
+ require.False(t, has)
+ }
+ }
+
+ // verify commited boxes appear in the box cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ startBox := (currentDBRound - 1) * basics.Round(boxesPerBlock)
+ for j := 0; j < boxesPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startBox)+uint64(j))
+ persistedValue, has := au.baseBoxes.read(name)
+ require.True(t, has)
+ require.Equal(t, boxMap[name], *persistedValue.value)
+ }
+ }
+ }
+
+ // updating inserted boxes
+ curBox = 0
+ for i := 0; i < boxCnt/boxesPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+
+ kvMods := make(map[string]*string)
+ if i < boxCnt/boxesPerBlock {
+ for j := 0; j < boxesPerBlock; j++ {
+ name := fmt.Sprintf("%d", curBox)
+ val := fmt.Sprintf("modified value%d", curBox)
+ kvMods[name] = &val
+ curBox++
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ require.Equal(t, basics.Round(uint64(currentRound)-protoParams.MaxBalLookback), au.cachedDBRound)
+
+ // verify cache doesn't contain updated box values that haven't been committed to db
+ if i < boxCnt/boxesPerBlock {
+ for name := range kvMods {
+ persistedValue, has := au.baseBoxes.read(name)
+ require.True(t, has)
+ require.Equal(t, boxMap[name], *persistedValue.value)
+ }
+ }
+
+ // verify commited updated box values appear in the box cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ lookback := basics.Round(boxCnt/boxesPerBlock + int(conf.MaxAcctLookback) + 1)
+ if currentDBRound < lookback {
+ continue
+ }
+
+ startBox := (currentDBRound - lookback) * basics.Round(boxesPerBlock)
+ for j := 0; j < boxesPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startBox)+uint64(j))
+ persistedValue, has := au.baseBoxes.read(name)
+ require.True(t, has)
+ expectedValue := fmt.Sprintf("modified value%s", name)
+ require.Equal(t, expectedValue, *persistedValue.value)
+ }
+ }
+ }
+
+ // deleting boxes
+ curBox = 0
+ for i := 0; i < boxCnt/boxesPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+
+ kvMods := make(map[string]*string)
+ if i < boxCnt/boxesPerBlock {
+ for j := 0; j < boxesPerBlock; j++ {
+ name := fmt.Sprintf("%d", curBox)
+ kvMods[name] = nil
+ curBox++
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ require.Equal(t, basics.Round(uint64(currentRound)-protoParams.MaxBalLookback), au.cachedDBRound)
+
+ // verify cache doesn't contain updated box values that haven't been committed to db
+ if i < boxCnt/boxesPerBlock {
+ for name := range kvMods {
+ persistedValue, has := au.baseBoxes.read(name)
+ require.True(t, has)
+ value := fmt.Sprintf("modified value%s", name)
+ require.Equal(t, value, *persistedValue.value)
+ }
+ }
+
+ // verify commited updated box values appear in the box cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ lookback := basics.Round(2*(boxCnt/boxesPerBlock+int(conf.MaxAcctLookback)) + 1)
+ if currentDBRound < lookback {
+ continue
+ }
+
+ startBox := (currentDBRound - lookback) * basics.Round(boxesPerBlock)
+ for j := 0; j < boxesPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startBox)+uint64(j))
+ persistedValue, has := au.baseBoxes.read(name)
+ require.True(t, has)
+ require.True(t, persistedValue.value == nil)
+ }
+ }
+ }
+}
+
func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) {
rows, err := tx.Query("SELECT rowid, address, data FROM accountbase")
if err != nil {
From f9f50b0e1214c938ee36e92c22a8e82ef6153346 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Mon, 1 Aug 2022 15:36:23 -0400
Subject: [PATCH 12/21] typo
---
ledger/lruboxes.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ledger/lruboxes.go b/ledger/lruboxes.go
index effa1528a7..eaf63256cf 100644
--- a/ledger/lruboxes.go
+++ b/ledger/lruboxes.go
@@ -20,7 +20,7 @@ import (
"github.com/algorand/go-algorand/logging"
)
-//msgp:ignore cachedResourceData
+//msgp:ignore cachedBoxData
type cachedBoxData struct {
persistedBoxData
From 6d7aca5a2ed215b090d36a8c02256d9899dc9684 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Tue, 2 Aug 2022 13:17:45 -0400
Subject: [PATCH 13/21] update old test
---
ledger/lruboxes_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ledger/lruboxes_test.go b/ledger/lruboxes_test.go
index 9a318ce916..8f3eb91068 100644
--- a/ledger/lruboxes_test.go
+++ b/ledger/lruboxes_test.go
@@ -205,7 +205,7 @@ func benchLruWriteBoxes(b *testing.B, fillerBoxes []cachedBoxData, boxes []cache
b.StopTimer()
var baseBoxes lruBoxes
// setting up the baseBoxes with a predefined cache size
- baseBoxes.init(logging.TestingLog(b), baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
+ baseBoxes.init(logging.TestingLog(b), baseBoxesPendingAccountsBufferSize, baseBoxesPendingAccountsWarnThreshold)
for i := 0; i < b.N; i++ {
baseBoxes = fillLRUBoxes(baseBoxes, fillerBoxes)
From f40283e66555baa0835b5aca70b1782c87493c3a Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Tue, 2 Aug 2022 13:20:39 -0400
Subject: [PATCH 14/21] rename threshold variable and reduce cache size
---
ledger/acctupdates.go | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 31f7eaa811..4e9d3485f8 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -68,14 +68,14 @@ const baseResourcesPendingAccountsBufferSize = 100000
// is being flushed into the main base resources cache.
const baseResourcesPendingAccountsWarnThreshold = 85000
-// baseBoxesPendingAccountsBufferSize defines the size of the base boxes pending buffer size.
+// baseBoxesPendingBufferSize defines the size of the base boxes pending buffer size.
// At the beginning of a new round, the entries from this buffer are being flushed into the base boxes map.
-const baseBoxesPendingAccountsBufferSize = 50000
+const baseBoxesPendingBufferSize = 5000
-// baseBoxesPendingAccountsWarnThreshold defines the threshold at which the lruBoxes would generate a warning
+// baseBoxesPendingWarnThreshold defines the threshold at which the lruBoxes would generate a warning
// after we've surpassed a given pending boxes size. The warning is being generated when the pending box data
// is being flushed into the main base boxes cache.
-const baseBoxesPendingAccountsWarnThreshold = 42500
+const baseBoxesPendingWarnThreshold = 4250
// initializeCachesReadaheadBlocksStream defines how many block we're going to attempt to queue for the
// initializeCaches method before it can process and store the account changes to disk.
@@ -936,7 +936,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
- au.baseBoxes.init(au.log, baseBoxesPendingAccountsBufferSize, baseBoxesPendingAccountsWarnThreshold)
+ au.baseBoxes.init(au.log, baseBoxesPendingBufferSize, baseBoxesPendingWarnThreshold)
return
}
@@ -1016,7 +1016,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.prune(newBaseAccountSize)
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
- newBaseBoxesSize := (len(au.kvStore) + 1) + baseBoxesPendingAccountsBufferSize
+ newBaseBoxesSize := (len(au.kvStore) + 1) + baseBoxesPendingBufferSize
au.baseBoxes.prune(newBaseBoxesSize)
}
From 4fb59f5c9f31714bead739de3e02209a7dff36b9 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Tue, 2 Aug 2022 13:38:13 -0400
Subject: [PATCH 15/21] unsaved changes
---
ledger/lruboxes_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ledger/lruboxes_test.go b/ledger/lruboxes_test.go
index 8f3eb91068..552c57a031 100644
--- a/ledger/lruboxes_test.go
+++ b/ledger/lruboxes_test.go
@@ -205,7 +205,7 @@ func benchLruWriteBoxes(b *testing.B, fillerBoxes []cachedBoxData, boxes []cache
b.StopTimer()
var baseBoxes lruBoxes
// setting up the baseBoxes with a predefined cache size
- baseBoxes.init(logging.TestingLog(b), baseBoxesPendingAccountsBufferSize, baseBoxesPendingAccountsWarnThreshold)
+ baseBoxes.init(logging.TestingLog(b), baseBoxesPendingBufferSize, baseBoxesPendingWarnThreshold)
for i := 0; i < b.N; i++ {
baseBoxes = fillLRUBoxes(baseBoxes, fillerBoxes)
From 25946b42b8c511286a9341ebb42657538adcfe30 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Fri, 5 Aug 2022 15:57:06 -0400
Subject: [PATCH 16/21] rename boxes to KV
---
ledger/accountdb.go | 24 +-
ledger/accountdb_test.go | 17 +-
ledger/acctupdates.go | 58 +++--
ledger/acctupdates_test.go | 144 +++++------
ledger/lruboxes.go | 132 ----------
ledger/lruboxes_test.go | 240 ------------------
ledger/lrukv.go | 132 ++++++++++
ledger/lrukv_test.go | 240 ++++++++++++++++++
ledger/persistedboxes_list_test.go | 175 -------------
...persistedboxes_list.go => persistedkvs.go} | 46 ++--
ledger/persistedkvs_test.go | 175 +++++++++++++
ledger/persistedresources_list.go | 2 +-
ledger/tracker.go | 2 +-
13 files changed, 692 insertions(+), 695 deletions(-)
delete mode 100644 ledger/lruboxes.go
delete mode 100644 ledger/lruboxes_test.go
create mode 100644 ledger/lrukv.go
create mode 100644 ledger/lrukv_test.go
delete mode 100644 ledger/persistedboxes_list_test.go
rename ledger/{persistedboxes_list.go => persistedkvs.go} (64%)
create mode 100644 ledger/persistedkvs_test.go
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index a0612be0f5..450d3a1c15 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -267,11 +267,11 @@ func (prd *persistedResourcesData) AccountResource() ledgercore.AccountResource
return ret
}
-//msgp:ignore persistedBoxData
-type persistedBoxData struct {
- // box value
+//msgp:ignore persistedKVData
+type persistedKVData struct {
+ // kv value
value *string
- // the round number that is associated with the box value. This field is the corresponding one to the round field
+ // the round number that is associated with the kv value. This field is the corresponding one to the round field
// in persistedAccountData, and serves the same purpose.
round basics.Round
}
@@ -2576,7 +2576,7 @@ func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxRes
return
}
-func (qs *accountsDbQueries) lookupKeyValue(key string) (pv persistedBoxData, err error) {
+func (qs *accountsDbQueries) lookupKeyValue(key string) (pv persistedKVData, err error) {
err = db.Retry(func() error {
var v sql.NullString
// Cast to []byte to avoid interpretation as character string, see note in upsertKvPair
@@ -3401,7 +3401,7 @@ func accountsNewRound(
tx *sql.Tx,
updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedBoxes map[string]persistedBoxData, err error) {
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedKVs map[string]persistedKVData, err error) {
hasAccounts := updates.len() > 0
hasResources := resources.len() > 0
hasKvPairs := len(kvPairs) > 0
@@ -3439,7 +3439,7 @@ func accountsNewRoundImpl(
writer accountsWriter,
updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedBoxes map[string]persistedBoxData, err error) {
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedKVs map[string]persistedKVData, err error) {
updatedAccounts = make([]persistedAccountData, updates.len())
updatedAccountIdx := 0
newAddressesRowIDs := make(map[basics.Address]int64)
@@ -3637,14 +3637,14 @@ func accountsNewRoundImpl(
}
}
- updatedBoxes = make(map[string]persistedBoxData)
+ updatedKVs = make(map[string]persistedKVData, len(kvPairs))
for key, value := range kvPairs {
if value.data != nil {
err = writer.upsertKvPair(key, *value.data)
- updatedBoxes[key] = persistedBoxData{value: value.data, round: lastUpdateRound}
+ updatedKVs[key] = persistedKVData{value: value.data, round: lastUpdateRound}
} else {
err = writer.deleteKvPair(key)
- updatedBoxes[key] = persistedBoxData{value: nil, round: lastUpdateRound}
+ updatedKVs[key] = persistedKVData{value: nil, round: lastUpdateRound}
}
if err != nil {
return
@@ -4769,9 +4769,9 @@ func (prd *persistedResourcesData) before(other *persistedResourcesData) bool {
return prd.round < other.round
}
-// before compares the round numbers of two persistedBoxData and determines if the current persistedBoxData
+// before compares the round numbers of two persistedKVData and determines if the current persistedKVData
// happened before the other.
-func (prd *persistedBoxData) before(other *persistedBoxData) bool {
+func (prd *persistedKVData) before(other *persistedKVData) bool {
return prd.round < other.round
}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 15a34bd9a9..cf71dd5d59 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -319,7 +319,7 @@ func TestAccountDBRound(t *testing.T) {
numResUpdates += len(rs)
}
require.Equal(t, resourceUpdatesCnt.len(), numResUpdates)
- require.Equal(t, 0, len(updatedBoxes))
+ require.Empty(t, updatedBoxes)
updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i))
require.NoError(t, err)
@@ -463,7 +463,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
updatesResources[addr][0],
)
- require.Equal(t, 0, len(updatedBoxes))
+ require.Empty(t, updatedBoxes)
})
}
}
@@ -2892,9 +2892,9 @@ func TestAccountUnorderedUpdates(t *testing.T) {
&mock2, acctVariant, resVariant, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
- a.Equal(3, len(updatedAccounts))
- a.Equal(3, len(updatedResources))
- a.Equal(0, len(updatedBoxes))
+ a.Len(updatedAccounts, 3)
+ a.Len(updatedResources, 3)
+ a.Empty(updatedBoxes)
})
}
}
@@ -3002,7 +3002,6 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
}
func BenchmarkLRUResources(b *testing.B) {
- b.StopTimer()
var baseResources lruResources
baseResources.init(nil, 1000, 850)
@@ -3021,7 +3020,7 @@ func BenchmarkLRUResources(b *testing.B) {
baseResources.write(data, addr)
}
- b.StartTimer()
+ b.ResetTimer()
for i := 0; i < b.N; i++ {
pos := i % 850
data, has = baseResources.read(addrs[pos], basics.CreatableIndex(1))
@@ -3097,7 +3096,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
require.NoError(b, err)
var v sql.NullString
for i := 0; i < b.N; i++ {
- var pv persistedBoxData
+ var pv persistedKVData
boxName := boxNames[i%totalBoxes]
b.StartTimer()
err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
@@ -3128,7 +3127,7 @@ func BenchmarkBoxDatabaseRead(b *testing.B) {
require.NoError(b, err)
var v sql.NullString
for i := 0; i < b.N+lookback; i++ {
- var pv persistedBoxData
+ var pv persistedKVData
boxName := boxNames[i%totalBoxes]
err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
require.NoError(b, err)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 4e9d3485f8..38a16cbc54 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -68,14 +68,14 @@ const baseResourcesPendingAccountsBufferSize = 100000
// is being flushed into the main base resources cache.
const baseResourcesPendingAccountsWarnThreshold = 85000
-// baseBoxesPendingBufferSize defines the size of the base boxes pending buffer size.
-// At the beginning of a new round, the entries from this buffer are being flushed into the base boxes map.
-const baseBoxesPendingBufferSize = 5000
+// baseKVPendingBufferSize defines the size of the base KVs pending buffer size.
+// At the beginning of a new round, the entries from this buffer are being flushed into the base KVs map.
+const baseKVPendingBufferSize = 5000
-// baseBoxesPendingWarnThreshold defines the threshold at which the lruBoxes would generate a warning
-// after we've surpassed a given pending boxes size. The warning is being generated when the pending box data
-// is being flushed into the main base boxes cache.
-const baseBoxesPendingWarnThreshold = 4250
+// baseKVPendingWarnThreshold defines the threshold at which the lruKV would generate a warning
+// after we've surpassed a given pending kv size. The warning is being generated when the pending kv data
+// is being flushed into the main base kv cache.
+const baseKVPendingWarnThreshold = 4250
// initializeCachesReadaheadBlocksStream defines how many block we're going to attempt to queue for the
// initializeCaches method before it can process and store the account changes to disk.
@@ -215,8 +215,8 @@ type accountUpdates struct {
// baseResources stores the most recently used resources, at exactly dbRound
baseResources lruResources
- // baseBoxes stores the most recently used box, at exactly dbRound
- baseBoxes lruBoxes
+ // baseKV stores the most recently used KV, at exactly dbRound
+ baseKV lruKV
// logAccountUpdatesMetrics is a flag for enable/disable metrics logging
logAccountUpdatesMetrics bool
@@ -322,7 +322,7 @@ func (au *accountUpdates) close() {
}
au.baseAccounts.prune(0)
au.baseResources.prune(0)
- au.baseBoxes.prune(0)
+ au.baseKV.prune(0)
}
func (au *accountUpdates) LookupResource(rnd basics.Round, addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.AccountResource, basics.Round, error) {
@@ -387,11 +387,11 @@ func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bo
rnd = currentDbRound + basics.Round(currentDeltaLen)
}
- // check the baseBoxes cache
- if pbd, has := au.baseBoxes.read(key); has {
- // we don't technically need this, since it's already in the baseBoxes, however, writing this over
+ // check the baseKV cache
+ if pbd, has := au.baseKV.read(key); has {
+ // we don't technically need this, since it's already in the baseKV, however, writing this over
// would ensure that we promote this field.
- au.baseBoxes.writePending(pbd, key)
+ au.baseKV.writePending(pbd, key)
return pbd.value, nil
}
@@ -406,9 +406,11 @@ func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bo
persistedData, err := au.accountsq.lookupKeyValue(key)
if persistedData.round == currentDbRound {
- if persistedData.value != nil {
- // if we read actual data return it
- au.baseBoxes.writePending(persistedData, key)
+ if err == nil {
+ // if we read actual data return it. This includes deleted values
+ // where persistedData.value == nil to avoid unnecessary db lookups
+ // for deleted KVs.
+ au.baseKV.writePending(persistedData, key)
return persistedData.value, err
}
// otherwise return empty
@@ -529,6 +531,10 @@ func (au *accountUpdates) lookupKeysByPrefix(round basics.Round, keyPrefix strin
needUnlock = false
}
+ // NOTE: the kv cache isn't used here because the data structure doesn't support range
+ // queries. It may be preferable to increase the SQLite cache size if these reads become
+ // too slow.
+
// Finishing searching updates of this account in kvDeltas, keep going: use on-disk DB
// to find the rest matching keys in DB.
dbRound, _err := au.accountsq.lookupKeysByPrefix(keyPrefix, maxKeyNum, results, resultCount)
@@ -936,7 +942,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
- au.baseBoxes.init(au.log, baseBoxesPendingBufferSize, baseBoxesPendingWarnThreshold)
+ au.baseKV.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold)
return
}
@@ -961,7 +967,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.flushPendingWrites()
au.baseResources.flushPendingWrites()
- au.baseBoxes.flushPendingWrites()
+ au.baseKV.flushPendingWrites()
for i := 0; i < delta.Accts.Len(); i++ {
addr, data := delta.Accts.GetByIdx(i)
@@ -1016,8 +1022,8 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.prune(newBaseAccountSize)
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
- newBaseBoxesSize := (len(au.kvStore) + 1) + baseBoxesPendingBufferSize
- au.baseBoxes.prune(newBaseBoxesSize)
+ newBaseKVSize := (len(au.kvStore) + 1) + baseKVPendingBufferSize
+ au.baseKV.prune(newBaseKVSize)
}
// lookupLatest returns the account data for a given address for the latest round.
@@ -1317,7 +1323,7 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
// against the database.
persistedData, err = au.accountsq.lookupResources(addr, aidx, ctype)
if persistedData.round == currentDbRound {
- if persistedData.addrid != 0 {
+ if err == nil {
// if we read actual data return it
au.baseResources.writePending(persistedData, addr)
return persistedData.AccountResource(), rnd, err
@@ -1643,7 +1649,7 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe
// the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
// so that we can update the base account back.
- dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, dcc.updatedPersistedBoxes, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
+ dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, dcc.updatedPersistedKVs, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
if err != nil {
return err
}
@@ -1734,8 +1740,8 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
}
}
- for key, persistedBox := range dcc.updatedPersistedBoxes {
- au.baseBoxes.write(persistedBox, key)
+ for key, persistedKV := range dcc.updatedPersistedKVs {
+ au.baseKV.write(persistedKV, key)
}
for cidx, modCrt := range dcc.compactCreatableDeltas {
@@ -1867,7 +1873,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
// rowid are flushed.
au.baseAccounts.prune(0)
au.baseResources.prune(0)
- au.baseBoxes.prune(0)
+ au.baseKV.prune(0)
startTime := time.Now()
vacuumExitCh := make(chan struct{}, 1)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 89f1fc737d..be5218daf4 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -1352,23 +1352,15 @@ func TestBoxNamesByAppIDs(t *testing.T) {
}
}
-func TestBoxCache(t *testing.T) {
+func TestKVCache(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
initialBlocksCount := 1
accts := make(map[basics.Address]basics.AccountData)
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 8
- protoParams.SeedLookback = 1
- protoParams.SeedRefreshInterval = 1
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestBoxNamesByAppIDs")
- config.Consensus[testProtocolVersion] = protoParams
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
-
- ml := makeMockLedgerForTracker(t, true, initialBlocksCount, testProtocolVersion,
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion,
[]map[basics.Address]basics.AccountData{accts},
)
defer ml.Close()
@@ -1378,29 +1370,31 @@ func TestBoxCache(t *testing.T) {
defer au.close()
knownCreatables := make(map[basics.CreatableIndex]bool)
- opts := auNewBlockOpts{ledgercore.AccountDeltas{}, testProtocolVersion, protoParams, knownCreatables}
+ opts := auNewBlockOpts{ledgercore.AccountDeltas{}, protocol.ConsensusCurrentVersion, protoParams, knownCreatables}
- boxCnt := 1000
- boxesPerBlock := 100
- curBox := 0
+ kvCnt := 1000
+ kvsPerBlock := 100
+ curKV := 0
var currentRound basics.Round
currentDBRound := basics.Round(1)
- boxMap := make(map[string]string)
- for i := 0; i < boxCnt; i++ {
- boxMap[fmt.Sprintf("%d", i)] = fmt.Sprintf("value%d", i)
+ kvMap := make(map[string]string)
+ for i := 0; i < kvCnt; i++ {
+ kvMap[fmt.Sprintf("%d", i)] = fmt.Sprintf("value%d", i)
}
- // add boxesPerBlock boxes on each iteration
- for i := 0; i < boxCnt/boxesPerBlock+int(conf.MaxAcctLookback); i++ {
+ // add kvsPerBlock KVs on each iteration. The first kvCnt/kvsPerBlock
+ // iterations produce a block with kvCnt kv manipulations. The last
+ // conf.MaxAcctLookback iterations are meant to verify the contents of the cache
+ // are correct after every kv containing block has been committed.
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
currentRound = currentRound + 1
-
kvMods := make(map[string]*string)
- if i < boxCnt/boxesPerBlock {
- for j := 0; j < boxesPerBlock; j++ {
- name := fmt.Sprintf("%d", curBox)
- curBox++
- val := boxMap[name]
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ curKV++
+ val := kvMap[name]
kvMods[name] = &val
}
}
@@ -1411,44 +1405,42 @@ func TestBoxCache(t *testing.T) {
// ensure rounds
rnd := au.latest()
require.Equal(t, currentRound, rnd)
- if uint64(currentRound) > protoParams.MaxBalLookback {
- require.Equal(t, basics.Round(uint64(currentRound)-protoParams.MaxBalLookback), au.cachedDBRound)
+ if uint64(currentRound) > conf.MaxAcctLookback {
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
} else {
require.Equal(t, basics.Round(0), au.cachedDBRound)
}
- // verify cache doesn't contain the new boxes until commited to DB
- if i < boxCnt/boxesPerBlock {
- for name := range kvMods {
- _, has := au.baseBoxes.read(name)
- require.False(t, has)
- }
+ // verify cache doesn't contain the new kvs until committed to DB.
+ for name := range kvMods {
+ _, has := au.baseKV.read(name)
+ require.False(t, has)
}
- // verify commited boxes appear in the box cache
+ // verify commited kvs appear in the kv cache
for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
- startBox := (currentDBRound - 1) * basics.Round(boxesPerBlock)
- for j := 0; j < boxesPerBlock; j++ {
- name := fmt.Sprintf("%d", uint64(startBox)+uint64(j))
- persistedValue, has := au.baseBoxes.read(name)
+ startKV := (currentDBRound - 1) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKV.read(name)
require.True(t, has)
- require.Equal(t, boxMap[name], *persistedValue.value)
+ require.Equal(t, kvMap[name], *persistedValue.value)
}
}
}
- // updating inserted boxes
- curBox = 0
- for i := 0; i < boxCnt/boxesPerBlock+int(conf.MaxAcctLookback); i++ {
+ // updating inserted KVs
+ curKV = 0
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
currentRound = currentRound + 1
kvMods := make(map[string]*string)
- if i < boxCnt/boxesPerBlock {
- for j := 0; j < boxesPerBlock; j++ {
- name := fmt.Sprintf("%d", curBox)
- val := fmt.Sprintf("modified value%d", curBox)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ val := fmt.Sprintf("modified value%d", curKV)
kvMods[name] = &val
- curBox++
+ curKV++
}
}
@@ -1458,28 +1450,28 @@ func TestBoxCache(t *testing.T) {
// ensure rounds
rnd := au.latest()
require.Equal(t, currentRound, rnd)
- require.Equal(t, basics.Round(uint64(currentRound)-protoParams.MaxBalLookback), au.cachedDBRound)
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
- // verify cache doesn't contain updated box values that haven't been committed to db
- if i < boxCnt/boxesPerBlock {
+ // verify cache doesn't contain updated kv values that haven't been committed to db
+ if i < kvCnt/kvsPerBlock {
for name := range kvMods {
- persistedValue, has := au.baseBoxes.read(name)
+ persistedValue, has := au.baseKV.read(name)
require.True(t, has)
- require.Equal(t, boxMap[name], *persistedValue.value)
+ require.Equal(t, kvMap[name], *persistedValue.value)
}
}
- // verify commited updated box values appear in the box cache
+ // verify commited updated kv values appear in the kv cache
for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
- lookback := basics.Round(boxCnt/boxesPerBlock + int(conf.MaxAcctLookback) + 1)
+ lookback := basics.Round(kvCnt/kvsPerBlock + int(conf.MaxAcctLookback) + 1)
if currentDBRound < lookback {
continue
}
- startBox := (currentDBRound - lookback) * basics.Round(boxesPerBlock)
- for j := 0; j < boxesPerBlock; j++ {
- name := fmt.Sprintf("%d", uint64(startBox)+uint64(j))
- persistedValue, has := au.baseBoxes.read(name)
+ startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKV.read(name)
require.True(t, has)
expectedValue := fmt.Sprintf("modified value%s", name)
require.Equal(t, expectedValue, *persistedValue.value)
@@ -1487,17 +1479,17 @@ func TestBoxCache(t *testing.T) {
}
}
- // deleting boxes
- curBox = 0
- for i := 0; i < boxCnt/boxesPerBlock+int(conf.MaxAcctLookback); i++ {
+ // deleting KVs
+ curKV = 0
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
currentRound = currentRound + 1
kvMods := make(map[string]*string)
- if i < boxCnt/boxesPerBlock {
- for j := 0; j < boxesPerBlock; j++ {
- name := fmt.Sprintf("%d", curBox)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
kvMods[name] = nil
- curBox++
+ curKV++
}
}
@@ -1507,29 +1499,29 @@ func TestBoxCache(t *testing.T) {
// ensure rounds
rnd := au.latest()
require.Equal(t, currentRound, rnd)
- require.Equal(t, basics.Round(uint64(currentRound)-protoParams.MaxBalLookback), au.cachedDBRound)
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
- // verify cache doesn't contain updated box values that haven't been committed to db
- if i < boxCnt/boxesPerBlock {
+ // verify cache doesn't contain updated kv values that haven't been committed to db
+ if i < kvCnt/kvsPerBlock {
for name := range kvMods {
- persistedValue, has := au.baseBoxes.read(name)
+ persistedValue, has := au.baseKV.read(name)
require.True(t, has)
value := fmt.Sprintf("modified value%s", name)
require.Equal(t, value, *persistedValue.value)
}
}
- // verify commited updated box values appear in the box cache
+ // verify commited updated kv values appear in the kv cache
for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
- lookback := basics.Round(2*(boxCnt/boxesPerBlock+int(conf.MaxAcctLookback)) + 1)
+ lookback := basics.Round(2*(kvCnt/kvsPerBlock+int(conf.MaxAcctLookback)) + 1)
if currentDBRound < lookback {
continue
}
- startBox := (currentDBRound - lookback) * basics.Round(boxesPerBlock)
- for j := 0; j < boxesPerBlock; j++ {
- name := fmt.Sprintf("%d", uint64(startBox)+uint64(j))
- persistedValue, has := au.baseBoxes.read(name)
+ startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKV.read(name)
require.True(t, has)
require.True(t, persistedValue.value == nil)
}
diff --git a/ledger/lruboxes.go b/ledger/lruboxes.go
deleted file mode 100644
index eaf63256cf..0000000000
--- a/ledger/lruboxes.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package ledger
-
-import (
- "github.com/algorand/go-algorand/logging"
-)
-
-//msgp:ignore cachedBoxData
-type cachedBoxData struct {
- persistedBoxData
-
- // box name
- key string
-}
-
-// lruBoxes provides a storage class for the most recently used box data.
-// It doesn't have any synchronization primitive on it's own and require to be
-// syncronized by the caller.
-type lruBoxes struct {
- // boxList contain the list of persistedBoxData, where the front ones are the most "fresh"
- // and the ones on the back are the oldest.
- boxList *persistedBoxesDataList
-
- // boxes provides fast access to the various elements in the list by using the key
- boxes map[string]*persistedBoxesDataListNode
-
- // pendingBoxes are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these,
- // it would call flushPendingWrites and these would be merged into the boxes/boxesList
- pendingBoxes chan cachedBoxData
-
- // log interface; used for logging the threshold event.
- log logging.Logger
-
- // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingBoxes entries
- pendingWritesWarnThreshold int
-}
-
-// init initializes the lruBoxes for use.
-// thread locking semantics : write lock
-func (m *lruBoxes) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) {
- m.boxList = newPersistedBoxList().allocateFreeNodes(pendingWrites)
- m.boxes = make(map[string]*persistedBoxesDataListNode, pendingWrites)
- m.pendingBoxes = make(chan cachedBoxData, pendingWrites)
- m.log = log
- m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
-}
-
-// read the persistedBoxesData object that the lruBoxes has for the given key.
-// thread locking semantics : read lock
-func (m *lruBoxes) read(key string) (data persistedBoxData, has bool) {
- if el := m.boxes[key]; el != nil {
- return el.Value.persistedBoxData, true
- }
- return persistedBoxData{}, false
-}
-
-// flushPendingWrites flushes the pending writes to the main lruBoxes cache.
-// thread locking semantics : write lock
-func (m *lruBoxes) flushPendingWrites() {
- pendingEntriesCount := len(m.pendingBoxes)
- if pendingEntriesCount >= m.pendingWritesWarnThreshold {
- m.log.Warnf("lruBoxes: number of entries in pendingBoxes(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
- }
- for ; pendingEntriesCount > 0; pendingEntriesCount-- {
- select {
- case pendingBoxData := <-m.pendingBoxes:
- m.write(pendingBoxData.persistedBoxData, pendingBoxData.key)
- default:
- return
- }
- }
-}
-
-// writePending write a single persistedBoxData entry to the pendingBoxes buffer.
-// the function doesn't block, and in case of a buffer overflow the entry would not be added.
-// thread locking semantics : no lock is required.
-func (m *lruBoxes) writePending(box persistedBoxData, key string) {
- select {
- case m.pendingBoxes <- cachedBoxData{persistedBoxData: box, key: key}:
- default:
- }
-}
-
-// write a single persistedBoxData to the lruBoxes cache.
-// when writing the entry, the round number would be used to determine if it's a newer
-// version of what's already on the cache or not. In all cases, the entry is going
-// to be promoted to the front of the list.
-// thread locking semantics : write lock
-func (m *lruBoxes) write(boxData persistedBoxData, key string) {
- if el := m.boxes[key]; el != nil {
- // already exists; is it a newer ?
- if el.Value.before(&boxData) {
- // we update with a newer version.
- el.Value = &cachedBoxData{persistedBoxData: boxData, key: key}
- }
- m.boxList.moveToFront(el)
- } else {
- // new entry.
- m.boxes[key] = m.boxList.pushFront(&cachedBoxData{persistedBoxData: boxData, key: key})
- }
-}
-
-// prune adjust the current size of the lruBoxes cache, by dropping the least
-// recently used entries.
-// thread locking semantics : write lock
-func (m *lruBoxes) prune(newSize int) (removed int) {
- for {
- if len(m.boxes) <= newSize {
- break
- }
- back := m.boxList.back()
- delete(m.boxes, back.Value.key)
- m.boxList.remove(back)
- removed++
- }
- return
-}
diff --git a/ledger/lruboxes_test.go b/ledger/lruboxes_test.go
deleted file mode 100644
index 552c57a031..0000000000
--- a/ledger/lruboxes_test.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package ledger
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestLRUBasicBoxes(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var baseBoxes lruBoxes
- baseBoxes.init(logging.TestingLog(t), 10, 5)
-
- boxNum := 50
- // write 50 boxes
- for i := 0; i < boxNum; i++ {
- boxValue := fmt.Sprintf("box %d value", i)
- box := persistedBoxData{
- value: &boxValue,
- round: basics.Round(i),
- }
- baseBoxes.write(box, fmt.Sprintf("key%d", i))
- }
-
- // verify that all these boxes are truly there.
- for i := 0; i < boxNum; i++ {
- box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
- require.True(t, has)
- require.Equal(t, basics.Round(i), box.round)
- require.Equal(t, fmt.Sprintf("box %d value", i), *(box.value))
- }
-
- // verify expected missing entries
- for i := boxNum; i < boxNum*2; i++ {
- box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
- require.False(t, has)
- require.Equal(t, persistedBoxData{}, box)
- }
-
- baseBoxes.prune(boxNum / 2)
-
- // verify expected (missing/existing) entries
- for i := 0; i < boxNum*2; i++ {
- box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
-
- if i >= boxNum/2 && i < boxNum {
- // expected to have it.
- require.True(t, has)
- require.Equal(t, basics.Round(i), box.round)
- require.Equal(t, fmt.Sprintf("box %d value", i), *(box.value))
- } else {
- require.False(t, has)
- require.Equal(t, persistedBoxData{}, box)
- }
- }
-}
-
-func TestLRUBoxesPendingWrites(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var baseBoxes lruBoxes
- boxesNum := 250
- baseBoxes.init(logging.TestingLog(t), boxesNum*2, boxesNum)
-
- for i := 0; i < boxesNum; i++ {
- go func(i int) {
- time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond)
- boxValue := fmt.Sprintf("box %d value", i)
- box := persistedBoxData{
- value: &boxValue,
- round: basics.Round(i),
- }
- baseBoxes.writePending(box, fmt.Sprintf("key%d", i))
- }(i)
- }
- testStarted := time.Now()
- for {
- baseBoxes.flushPendingWrites()
-
- // check if all boxes were loaded into "main" cache.
- allBoxesLoaded := true
- for i := 0; i < boxesNum; i++ {
- _, has := baseBoxes.read(fmt.Sprintf("key%d", i))
- if !has {
- allBoxesLoaded = false
- break
- }
- }
- if allBoxesLoaded {
- break
- }
- if time.Since(testStarted).Seconds() > 20 {
- require.Fail(t, "failed after waiting for 20 second")
- }
- // not yet, keep looping.
- }
-}
-
-type lruBoxesTestLogger struct {
- logging.Logger
- WarnfCallback func(string, ...interface{})
- warnMsgCount int
-}
-
-func (cl *lruBoxesTestLogger) Warnf(s string, args ...interface{}) {
- cl.warnMsgCount++
-}
-
-func TestLRUBoxesPendingWritesWarning(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var baseBoxes lruBoxes
- pendingWritesBuffer := 50
- pendingWritesThreshold := 40
- log := &lruBoxesTestLogger{Logger: logging.TestingLog(t)}
- baseBoxes.init(log, pendingWritesBuffer, pendingWritesThreshold)
- for j := 0; j < 50; j++ {
- for i := 0; i < j; i++ {
- boxValue := fmt.Sprintf("box %d value", i)
- box := persistedBoxData{
- value: &boxValue,
- round: basics.Round(i),
- }
- baseBoxes.writePending(box, fmt.Sprintf("key%d", i))
- }
- baseBoxes.flushPendingWrites()
- if j >= pendingWritesThreshold {
- // expect a warning in the log
- require.Equal(t, 1+j-pendingWritesThreshold, log.warnMsgCount)
- }
- }
-}
-
-func TestLRUBoxesOmittedPendingWrites(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var baseBoxes lruBoxes
- pendingWritesBuffer := 50
- pendingWritesThreshold := 40
- log := &lruBoxesTestLogger{Logger: logging.TestingLog(t)}
- baseBoxes.init(log, pendingWritesBuffer, pendingWritesThreshold)
-
- for i := 0; i < pendingWritesBuffer*2; i++ {
- boxValue := fmt.Sprintf("box %d value", i)
- box := persistedBoxData{
- value: &boxValue,
- round: basics.Round(i),
- }
- baseBoxes.writePending(box, fmt.Sprintf("key%d", i))
- }
-
- baseBoxes.flushPendingWrites()
-
- // verify that all these boxes are truly there.
- for i := 0; i < pendingWritesBuffer; i++ {
- box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
- require.True(t, has)
- require.Equal(t, basics.Round(i), box.round)
- require.Equal(t, fmt.Sprintf("box %d value", i), *(box.value))
- }
-
- // verify expected missing entries
- for i := pendingWritesBuffer; i < pendingWritesBuffer*2; i++ {
- box, has := baseBoxes.read(fmt.Sprintf("key%d", i))
- require.False(t, has)
- require.Equal(t, persistedBoxData{}, box)
- }
-}
-
-func BenchmarkLRUBoxesWrite(b *testing.B) {
- numTestBoxes := 5000
- // there are 2500 boxes that overlap
- fillerBoxes := generatePersistedBoxesData(0, 97500)
- boxes := generatePersistedBoxesData(97500-numTestBoxes/2, 97500+numTestBoxes/2)
-
- benchLruWriteBoxes(b, fillerBoxes, boxes)
-}
-
-func benchLruWriteBoxes(b *testing.B, fillerBoxes []cachedBoxData, boxes []cachedBoxData) {
- b.ResetTimer()
- b.StopTimer()
- var baseBoxes lruBoxes
- // setting up the baseBoxes with a predefined cache size
- baseBoxes.init(logging.TestingLog(b), baseBoxesPendingBufferSize, baseBoxesPendingWarnThreshold)
- for i := 0; i < b.N; i++ {
- baseBoxes = fillLRUBoxes(baseBoxes, fillerBoxes)
-
- b.StartTimer()
- fillLRUBoxes(baseBoxes, boxes)
- b.StopTimer()
- baseBoxes.prune(0)
- }
-}
-
-func fillLRUBoxes(baseBoxes lruBoxes, fillerBoxes []cachedBoxData) lruBoxes {
- for _, entry := range fillerBoxes {
- baseBoxes.write(entry.persistedBoxData, entry.key)
- }
- return baseBoxes
-}
-
-func generatePersistedBoxesData(startRound, endRound int) []cachedBoxData {
- boxes := make([]cachedBoxData, endRound-startRound)
- for i := startRound; i < endRound; i++ {
- boxValue := fmt.Sprintf("box %d value", i)
-
- boxes[i-startRound] = cachedBoxData{
- persistedBoxData: persistedBoxData{
- value: &boxValue,
- round: basics.Round(i + startRound),
- },
- key: fmt.Sprintf("key%d", i),
- }
- }
- return boxes
-}
diff --git a/ledger/lrukv.go b/ledger/lrukv.go
new file mode 100644
index 0000000000..45f4f5027d
--- /dev/null
+++ b/ledger/lrukv.go
@@ -0,0 +1,132 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+import (
+ "github.com/algorand/go-algorand/logging"
+)
+
+//msgp:ignore cachedKVData
+type cachedKVData struct {
+ persistedKVData
+
+ // kv key
+ key string
+}
+
+// lruKV provides a storage class for the most recently used kv data.
+// It doesn't have any synchronization primitive on it's own and require to be
+// syncronized by the caller.
+type lruKV struct {
+ // kvList contain the list of persistedKVData, where the front ones are the most "fresh"
+ // and the ones on the back are the oldest.
+ kvList *persistedKVDataList
+
+ // kvs provides fast access to the various elements in the list by using the key
+ kvs map[string]*persistedKVDataListNode
+
+ // pendingKVs are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these,
+ // it would call flushPendingWrites and these would be merged into the kvs/kvList
+ pendingKVs chan cachedKVData
+
+ // log interface; used for logging the threshold event.
+ log logging.Logger
+
+ // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingKVs entries
+ pendingWritesWarnThreshold int
+}
+
+// init initializes the lruKV for use.
+// thread locking semantics : write lock
+func (m *lruKV) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) {
+ m.kvList = newPersistedKVList().allocateFreeNodes(pendingWrites)
+ m.kvs = make(map[string]*persistedKVDataListNode, pendingWrites)
+ m.pendingKVs = make(chan cachedKVData, pendingWrites)
+ m.log = log
+ m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
+}
+
+// read the persistedKVData object that the lruKV has for the given key.
+// thread locking semantics : read lock
+func (m *lruKV) read(key string) (data persistedKVData, has bool) {
+ if el := m.kvs[key]; el != nil {
+ return el.Value.persistedKVData, true
+ }
+ return persistedKVData{}, false
+}
+
+// flushPendingWrites flushes the pending writes to the main lruKV cache.
+// thread locking semantics : write lock
+func (m *lruKV) flushPendingWrites() {
+ pendingEntriesCount := len(m.pendingKVs)
+ if pendingEntriesCount >= m.pendingWritesWarnThreshold {
+ m.log.Warnf("lruKV: number of entries in pendingKVs(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
+ }
+ for ; pendingEntriesCount > 0; pendingEntriesCount-- {
+ select {
+ case pendingKVData := <-m.pendingKVs:
+ m.write(pendingKVData.persistedKVData, pendingKVData.key)
+ default:
+ return
+ }
+ }
+}
+
+// writePending write a single persistedKVData entry to the pendingKVs buffer.
+// the function doesn't block, and in case of a buffer overflow the entry would not be added.
+// thread locking semantics : no lock is required.
+func (m *lruKV) writePending(kv persistedKVData, key string) {
+ select {
+ case m.pendingKVs <- cachedKVData{persistedKVData: kv, key: key}:
+ default:
+ }
+}
+
+// write a single persistedKVData to the lruKV cache.
+// when writing the entry, the round number would be used to determine if it's a newer
+// version of what's already on the cache or not. In all cases, the entry is going
+// to be promoted to the front of the list.
+// thread locking semantics : write lock
+func (m *lruKV) write(kvData persistedKVData, key string) {
+ if el := m.kvs[key]; el != nil {
+ // already exists; is it a newer ?
+ if el.Value.before(&kvData) {
+ // we update with a newer version.
+ el.Value = &cachedKVData{persistedKVData: kvData, key: key}
+ }
+ m.kvList.moveToFront(el)
+ } else {
+ // new entry.
+ m.kvs[key] = m.kvList.pushFront(&cachedKVData{persistedKVData: kvData, key: key})
+ }
+}
+
+// prune adjust the current size of the lruKV cache, by dropping the least
+// recently used entries.
+// thread locking semantics : write lock
+func (m *lruKV) prune(newSize int) (removed int) {
+ for {
+ if len(m.kvs) <= newSize {
+ break
+ }
+ back := m.kvList.back()
+ delete(m.kvs, back.Value.key)
+ m.kvList.remove(back)
+ removed++
+ }
+ return
+}
diff --git a/ledger/lrukv_test.go b/ledger/lrukv_test.go
new file mode 100644
index 0000000000..8eebb1459c
--- /dev/null
+++ b/ledger/lrukv_test.go
@@ -0,0 +1,240 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestLRUBasicKV(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ baseKV.init(logging.TestingLog(t), 10, 5)
+
+ kvNum := 50
+ // write 50 KVs
+ for i := 0; i < kvNum; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: &kvValue,
+ round: basics.Round(i),
+ }
+ baseKV.write(kv, fmt.Sprintf("key%d", i))
+ }
+
+ // verify that all these KVs are truly there.
+ for i := 0; i < kvNum; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), *(kv.value))
+ }
+
+ // verify expected missing entries
+ for i := kvNum; i < kvNum*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+
+ baseKV.prune(kvNum / 2)
+
+ // verify expected (missing/existing) entries
+ for i := 0; i < kvNum*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+
+ if i >= kvNum/2 && i < kvNum {
+ // expected to have it.
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), *(kv.value))
+ } else {
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+ }
+}
+
+func TestLRUKVPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ kvNum := 250
+ baseKV.init(logging.TestingLog(t), kvNum*2, kvNum)
+
+ for i := 0; i < kvNum; i++ {
+ go func(i int) {
+ time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond)
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: &kvValue,
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }(i)
+ }
+ testStarted := time.Now()
+ for {
+ baseKV.flushPendingWrites()
+
+ // check if all kvs were loaded into "main" cache.
+ allKVsLoaded := true
+ for i := 0; i < kvNum; i++ {
+ _, has := baseKV.read(fmt.Sprintf("key%d", i))
+ if !has {
+ allKVsLoaded = false
+ break
+ }
+ }
+ if allKVsLoaded {
+ break
+ }
+ if time.Since(testStarted).Seconds() > 20 {
+ require.Fail(t, "failed after waiting for 20 second")
+ }
+ // not yet, keep looping.
+ }
+}
+
+type lruKVTestLogger struct {
+ logging.Logger
+ WarnfCallback func(string, ...interface{})
+ warnMsgCount int
+}
+
+func (cl *lruKVTestLogger) Warnf(s string, args ...interface{}) {
+ cl.warnMsgCount++
+}
+
+func TestLRUKVPendingWritesWarning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruKVTestLogger{Logger: logging.TestingLog(t)}
+ baseKV.init(log, pendingWritesBuffer, pendingWritesThreshold)
+ for j := 0; j < 50; j++ {
+ for i := 0; i < j; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: &kvValue,
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }
+ baseKV.flushPendingWrites()
+ if j >= pendingWritesThreshold {
+ // expect a warning in the log
+ require.Equal(t, 1+j-pendingWritesThreshold, log.warnMsgCount)
+ }
+ }
+}
+
+func TestLRUKVOmittedPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruKVTestLogger{Logger: logging.TestingLog(t)}
+ baseKV.init(log, pendingWritesBuffer, pendingWritesThreshold)
+
+ for i := 0; i < pendingWritesBuffer*2; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: &kvValue,
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }
+
+ baseKV.flushPendingWrites()
+
+ // verify that all these kvs are truly there.
+ for i := 0; i < pendingWritesBuffer; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), *(kv.value))
+ }
+
+ // verify expected missing entries
+ for i := pendingWritesBuffer; i < pendingWritesBuffer*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+}
+
+func BenchmarkLRUKVWrite(b *testing.B) {
+ numTestKV := 5000
+ // there are 2500 kvs that overlap
+ fillerKVs := generatePersistedKVData(0, 97500)
+ kvs := generatePersistedKVData(97500-numTestKV/2, 97500+numTestKV/2)
+
+ benchLruWriteKVs(b, fillerKVs, kvs)
+}
+
+func benchLruWriteKVs(b *testing.B, fillerKVs []cachedKVData, kvs []cachedKVData) {
+ b.ResetTimer()
+ b.StopTimer()
+ var baseKV lruKV
+ // setting up the baseKV with a predefined cache size
+ baseKV.init(logging.TestingLog(b), baseKVPendingBufferSize, baseKVPendingWarnThreshold)
+ for i := 0; i < b.N; i++ {
+ baseKV = fillLRUKV(baseKV, fillerKVs)
+
+ b.StartTimer()
+ fillLRUKV(baseKV, kvs)
+ b.StopTimer()
+ baseKV.prune(0)
+ }
+}
+
+func fillLRUKV(baseKV lruKV, fillerKVs []cachedKVData) lruKV {
+ for _, entry := range fillerKVs {
+ baseKV.write(entry.persistedKVData, entry.key)
+ }
+ return baseKV
+}
+
+func generatePersistedKVData(startRound, endRound int) []cachedKVData {
+ kvs := make([]cachedKVData, endRound-startRound)
+ for i := startRound; i < endRound; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+
+ kvs[i-startRound] = cachedKVData{
+ persistedKVData: persistedKVData{
+ value: &kvValue,
+ round: basics.Round(i + startRound),
+ },
+ key: fmt.Sprintf("key%d", i),
+ }
+ }
+ return kvs
+}
diff --git a/ledger/persistedboxes_list_test.go b/ledger/persistedboxes_list_test.go
deleted file mode 100644
index 021e290a54..0000000000
--- a/ledger/persistedboxes_list_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package ledger
-
-import (
- "testing"
-
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func (l *persistedBoxesDataList) getRoot() dataListNode {
- return &l.root
-}
-
-func (l *persistedBoxesDataListNode) getNext() dataListNode {
- // get rid of returning nil wrapped into an interface to let i = x.getNext(); i != nil work.
- if l.next == nil {
- return nil
- }
- return l.next
-}
-
-func (l *persistedBoxesDataListNode) getPrev() dataListNode {
- if l.prev == nil {
- return nil
- }
- return l.prev
-}
-
-// inspect that the list seems like the array
-func checkListPointersBD(t *testing.T, l *persistedBoxesDataList, es []*persistedBoxesDataListNode) {
- es2 := make([]dataListNode, len(es))
- for i, el := range es {
- es2[i] = el
- }
-
- checkListPointers(t, l, es2)
-}
-
-func TestRemoveFromListBD(t *testing.T) {
- partitiontest.PartitionTest(t)
- l := newPersistedBoxList()
- e1 := l.pushFront(&cachedBoxData{key: "key1"})
- e2 := l.pushFront(&cachedBoxData{key: "key2"})
- e3 := l.pushFront(&cachedBoxData{key: "key3"})
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e2, e1})
-
- l.remove(e2)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e1})
- l.remove(e3)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1})
-}
-
-func TestAddingNewNodeWithAllocatedFreeListBD(t *testing.T) {
- partitiontest.PartitionTest(t)
- l := newPersistedBoxList().allocateFreeNodes(10)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
- if countListSize(l.freeList) != 10 {
- t.Errorf("free list did not allocate nodes")
- return
- }
- // test elements
- e1 := l.pushFront(&cachedBoxData{key: "key1"})
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1})
-
- if countListSize(l.freeList) != 9 {
- t.Errorf("free list did not provide a node on new list entry")
- return
- }
-}
-
-func TestMultielementListPositioningBD(t *testing.T) {
- partitiontest.PartitionTest(t)
- l := newPersistedBoxList()
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
- // test elements
- e2 := l.pushFront(&cachedBoxData{key: "key1"})
- e1 := l.pushFront(&cachedBoxData{key: "key2"})
- e3 := l.pushFront(&cachedBoxData{key: "key3"})
- e4 := l.pushFront(&cachedBoxData{key: "key4"})
- e5 := l.pushFront(&cachedBoxData{key: "key5"})
-
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e5, e4, e3, e1, e2})
-
- l.move(e4, e1)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e5, e3, e1, e4, e2})
-
- l.remove(e5)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e1, e4, e2})
-
- l.move(e1, e4) // swap in middle
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e4, e1, e2})
-
- l.moveToFront(e4)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e4, e3, e1, e2})
-
- l.remove(e2)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e4, e3, e1})
-
- l.moveToFront(e3) // move from middle
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e3, e4, e1})
-
- l.moveToFront(e1) // move from end
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1, e3, e4})
-
- l.moveToFront(e1) // no movement
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1, e3, e4})
-
- e2 = l.pushFront(&cachedBoxData{key: "key2"})
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1, e3, e4})
-
- l.remove(e3) // removing from middle
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1, e4})
-
- l.remove(e4) // removing from end
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1})
-
- l.move(e2, e1) // swapping between two elements
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e1, e2})
-
- l.remove(e1) // removing front
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2})
-
- l.move(e2, l.back()) // swapping element with itself.
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2})
-
- l.remove(e2) // remove last one
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
-}
-
-func TestSingleElementListPositioningBD(t *testing.T) {
- partitiontest.PartitionTest(t)
- l := newPersistedBoxList()
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
- e := l.pushFront(&cachedBoxData{key: "key1"})
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e})
- l.moveToFront(e)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e})
- l.remove(e)
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{})
-}
-
-func TestRemovedNodeShouldBeMovedToFreeListBD(t *testing.T) {
- partitiontest.PartitionTest(t)
- l := newPersistedBoxList()
- e1 := l.pushFront(&cachedBoxData{key: "key1"})
- e2 := l.pushFront(&cachedBoxData{key: "key2"})
-
- checkListPointersBD(t, l, []*persistedBoxesDataListNode{e2, e1})
-
- e := l.back()
- l.remove(e)
-
- for i := l.freeList.next; i != nil; i = i.next {
- if i == e {
- // stopping the tst with good results:
- return
- }
- }
- t.Error("expected the removed node to appear at the freelist")
-}
diff --git a/ledger/persistedboxes_list.go b/ledger/persistedkvs.go
similarity index 64%
rename from ledger/persistedboxes_list.go
rename to ledger/persistedkvs.go
index 153e06d7d1..34f3c36ecb 100644
--- a/ledger/persistedboxes_list.go
+++ b/ledger/persistedkvs.go
@@ -16,35 +16,35 @@
package ledger
-// persistedBoxesDataList represents a doubly linked list.
-// TODO: must initiate with newPersistedAccountList.
-type persistedBoxesDataList struct {
- root persistedBoxesDataListNode // sentinel list element, only &root, root.prev, and root.next are used
- freeList *persistedBoxesDataListNode // preallocated nodes location
+// persistedKVDataList represents a doubly linked list.
+// must initiate with newPersistedKVList.
+type persistedKVDataList struct {
+ root persistedKVDataListNode // sentinel list element, only &root, root.prev, and root.next are used
+ freeList *persistedKVDataListNode // preallocated nodes location
}
-type persistedBoxesDataListNode struct {
+type persistedKVDataListNode struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
- next, prev *persistedBoxesDataListNode
+ next, prev *persistedKVDataListNode
- Value *cachedBoxData
+ Value *cachedKVData
}
-func newPersistedBoxList() *persistedBoxesDataList {
- l := new(persistedBoxesDataList)
+func newPersistedKVList() *persistedKVDataList {
+ l := new(persistedKVDataList)
l.root.next = &l.root
l.root.prev = &l.root
// used as a helper but does not store value
- l.freeList = new(persistedBoxesDataListNode)
+ l.freeList = new(persistedKVDataListNode)
return l
}
-func (l *persistedBoxesDataList) insertNodeToFreeList(otherNode *persistedBoxesDataListNode) {
+func (l *persistedKVDataList) insertNodeToFreeList(otherNode *persistedKVDataListNode) {
otherNode.next = l.freeList.next
otherNode.prev = nil
otherNode.Value = nil
@@ -52,9 +52,9 @@ func (l *persistedBoxesDataList) insertNodeToFreeList(otherNode *persistedBoxesD
l.freeList.next = otherNode
}
-func (l *persistedBoxesDataList) getNewNode() *persistedBoxesDataListNode {
+func (l *persistedKVDataList) getNewNode() *persistedKVDataListNode {
if l.freeList.next == nil {
- return new(persistedBoxesDataListNode)
+ return new(persistedKVDataListNode)
}
newNode := l.freeList.next
l.freeList.next = newNode.next
@@ -62,20 +62,20 @@ func (l *persistedBoxesDataList) getNewNode() *persistedBoxesDataListNode {
return newNode
}
-func (l *persistedBoxesDataList) allocateFreeNodes(numAllocs int) *persistedBoxesDataList {
+func (l *persistedKVDataList) allocateFreeNodes(numAllocs int) *persistedKVDataList {
if l.freeList == nil {
return l
}
for i := 0; i < numAllocs; i++ {
- l.insertNodeToFreeList(new(persistedBoxesDataListNode))
+ l.insertNodeToFreeList(new(persistedKVDataListNode))
}
return l
}
// Back returns the last element of list l or nil if the list is empty.
-func (l *persistedBoxesDataList) back() *persistedBoxesDataListNode {
- isEmpty := func(list *persistedBoxesDataList) bool {
+func (l *persistedKVDataList) back() *persistedKVDataListNode {
+ isEmpty := func(list *persistedKVDataList) bool {
// assumes we are inserting correctly to the list - using pushFront.
return list.root.next == &list.root
}
@@ -88,7 +88,7 @@ func (l *persistedBoxesDataList) back() *persistedBoxesDataListNode {
// remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
-func (l *persistedBoxesDataList) remove(e *persistedBoxesDataListNode) {
+func (l *persistedKVDataList) remove(e *persistedKVDataListNode) {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
@@ -98,14 +98,14 @@ func (l *persistedBoxesDataList) remove(e *persistedBoxesDataListNode) {
}
// pushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *persistedBoxesDataList) pushFront(v *cachedBoxData) *persistedBoxesDataListNode {
+func (l *persistedKVDataList) pushFront(v *cachedKVData) *persistedKVDataListNode {
newNode := l.getNewNode()
newNode.Value = v
return l.insertValue(newNode, &l.root)
}
// insertValue inserts e after at, increments l.len, and returns e.
-func (l *persistedBoxesDataList) insertValue(newNode *persistedBoxesDataListNode, at *persistedBoxesDataListNode) *persistedBoxesDataListNode {
+func (l *persistedKVDataList) insertValue(newNode *persistedKVDataListNode, at *persistedKVDataListNode) *persistedKVDataListNode {
n := at.next
at.next = newNode
newNode.prev = at
@@ -118,7 +118,7 @@ func (l *persistedBoxesDataList) insertValue(newNode *persistedBoxesDataListNode
// moveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
-func (l *persistedBoxesDataList) moveToFront(e *persistedBoxesDataListNode) {
+func (l *persistedKVDataList) moveToFront(e *persistedKVDataListNode) {
if l.root.next == e {
return
}
@@ -126,7 +126,7 @@ func (l *persistedBoxesDataList) moveToFront(e *persistedBoxesDataListNode) {
}
// move moves e to next to at and returns e.
-func (l *persistedBoxesDataList) move(e, at *persistedBoxesDataListNode) *persistedBoxesDataListNode {
+func (l *persistedKVDataList) move(e, at *persistedKVDataListNode) *persistedKVDataListNode {
if e == at {
return e
}
diff --git a/ledger/persistedkvs_test.go b/ledger/persistedkvs_test.go
new file mode 100644
index 0000000000..eb5ed9dff7
--- /dev/null
+++ b/ledger/persistedkvs_test.go
@@ -0,0 +1,175 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func (l *persistedKVDataList) getRoot() dataListNode {
+ return &l.root
+}
+
+func (l *persistedKVDataListNode) getNext() dataListNode {
+ // get rid of returning nil wrapped into an interface to let i = x.getNext(); i != nil work.
+ if l.next == nil {
+ return nil
+ }
+ return l.next
+}
+
+func (l *persistedKVDataListNode) getPrev() dataListNode {
+ if l.prev == nil {
+ return nil
+ }
+ return l.prev
+}
+
+// inspect that the list seems like the array
+func checkListPointersBD(t *testing.T, l *persistedKVDataList, es []*persistedKVDataListNode) {
+ es2 := make([]dataListNode, len(es))
+ for i, el := range es {
+ es2[i] = el
+ }
+
+ checkListPointers(t, l, es2)
+}
+
+func TestRemoveFromListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ e2 := l.pushFront(&cachedKVData{key: "key2"})
+ e3 := l.pushFront(&cachedKVData{key: "key3"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e2, e1})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e1})
+ l.remove(e3)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1})
+}
+
+func TestAddingNewNodeWithAllocatedFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList().allocateFreeNodes(10)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ if countListSize(l.freeList) != 10 {
+ t.Errorf("free list did not allocate nodes")
+ return
+ }
+ // test elements
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1})
+
+ if countListSize(l.freeList) != 9 {
+ t.Errorf("free list did not provide a node on new list entry")
+ return
+ }
+}
+
+func TestMultielementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ // test elements
+ e2 := l.pushFront(&cachedKVData{key: "key1"})
+ e1 := l.pushFront(&cachedKVData{key: "key2"})
+ e3 := l.pushFront(&cachedKVData{key: "key3"})
+ e4 := l.pushFront(&cachedKVData{key: "key4"})
+ e5 := l.pushFront(&cachedKVData{key: "key5"})
+
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e5, e4, e3, e1, e2})
+
+ l.move(e4, e1)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e5, e3, e1, e4, e2})
+
+ l.remove(e5)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e1, e4, e2})
+
+ l.move(e1, e4) // swap in middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e4, e1, e2})
+
+ l.moveToFront(e4)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e4, e3, e1, e2})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e4, e3, e1})
+
+ l.moveToFront(e3) // move from middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e4, e1})
+
+ l.moveToFront(e1) // move from end
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e3, e4})
+
+ l.moveToFront(e1) // no movement
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e3, e4})
+
+ e2 = l.pushFront(&cachedKVData{key: "key2"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1, e3, e4})
+
+ l.remove(e3) // removing from middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1, e4})
+
+ l.remove(e4) // removing from end
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1})
+
+ l.move(e2, e1) // swapping between two elements
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e2})
+
+ l.remove(e1) // removing front
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2})
+
+ l.move(e2, l.back()) // swapping element with itself.
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2})
+
+ l.remove(e2) // remove last one
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+}
+
+func TestSingleElementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ e := l.pushFront(&cachedKVData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e})
+ l.moveToFront(e)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e})
+ l.remove(e)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+}
+
+func TestRemovedNodeShouldBeMovedToFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ e2 := l.pushFront(&cachedKVData{key: "key2"})
+
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1})
+
+ e := l.back()
+ l.remove(e)
+
+ for i := l.freeList.next; i != nil; i = i.next {
+ if i == e {
+ // stopping the tst with good results:
+ return
+ }
+ }
+ t.Error("expected the removed node to appear at the freelist")
+}
diff --git a/ledger/persistedresources_list.go b/ledger/persistedresources_list.go
index 57b0cdc44c..baa7ac351a 100644
--- a/ledger/persistedresources_list.go
+++ b/ledger/persistedresources_list.go
@@ -17,7 +17,7 @@
package ledger
// persistedResourcesDataList represents a doubly linked list.
-// must initiate with newPersistedAccountList.
+// must initiate with newPersistedResourcesList.
type persistedResourcesDataList struct {
root persistedResourcesDataListNode // sentinel list element, only &root, root.prev, and root.next are used
freeList *persistedResourcesDataListNode // preallocated nodes location
diff --git a/ledger/tracker.go b/ledger/tracker.go
index ae16b61e31..c5c4baedb0 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -247,7 +247,7 @@ type deferredCommitContext struct {
updatedPersistedAccounts []persistedAccountData
updatedPersistedResources map[basics.Address][]persistedResourcesData
- updatedPersistedBoxes map[string]persistedBoxData
+ updatedPersistedKVs map[string]persistedKVData
compactOnlineAccountDeltas compactOnlineAccountDeltas
updatedPersistedOnlineAccounts []persistedOnlineAccountData
From 856b59c7615c650f532efd6e734534a787e05cce Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Wed, 10 Aug 2022 13:00:19 -0400
Subject: [PATCH 17/21] rename updatedBoxes
---
ledger/accountdb_test.go | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index cf71dd5d59..a5f15fa495 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -311,7 +311,7 @@ func TestAccountDBRound(t *testing.T) {
require.NoError(t, err)
expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams)
- updatedAccts, updatesResources, updatedBoxes, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i))
+ updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
numResUpdates := 0
@@ -319,7 +319,7 @@ func TestAccountDBRound(t *testing.T) {
numResUpdates += len(rs)
}
require.Equal(t, resourceUpdatesCnt.len(), numResUpdates)
- require.Empty(t, updatedBoxes)
+ require.Empty(t, updatedKVs)
updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i))
require.NoError(t, err)
@@ -449,7 +449,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
err = outResourcesDeltas.resourcesLoadOld(tx, knownAddresses)
require.NoError(t, err)
- updatedAccts, updatesResources, updatedBoxes, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound))
+ updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound))
require.NoError(t, err)
require.Equal(t, 1, len(updatedAccts)) // we store empty even for deleted accounts
require.Equal(t,
@@ -463,7 +463,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
updatesResources[addr][0],
)
- require.Empty(t, updatedBoxes)
+ require.Empty(t, updatedKVs)
})
}
}
@@ -2888,13 +2888,13 @@ func TestAccountUnorderedUpdates(t *testing.T) {
t.Run(fmt.Sprintf("acct-perm-%d|res-perm-%d", i, j), func(t *testing.T) {
a := require.New(t)
mock2 := mock.clone()
- updatedAccounts, updatedResources, updatedBoxes, err := accountsNewRoundImpl(
+ updatedAccounts, updatedResources, updatedKVs, err := accountsNewRoundImpl(
&mock2, acctVariant, resVariant, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
a.Len(updatedAccounts, 3)
a.Len(updatedResources, 3)
- a.Empty(updatedBoxes)
+ a.Empty(updatedKVs)
})
}
}
@@ -2971,13 +2971,13 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
a.Equal(1, len(resDeltas.misses)) // (addr2, aidx) does not exist
a.Equal(2, resDeltas.len()) // (addr1, aidx) found
- updatedAccounts, updatedResources, updatedBoxes, err := accountsNewRoundImpl(
+ updatedAccounts, updatedResources, updatedKVs, err := accountsNewRoundImpl(
&mock, acctDeltas, resDeltas, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
a.Equal(3, len(updatedAccounts))
a.Equal(2, len(updatedResources))
- a.Equal(0, len(updatedBoxes))
+ a.Equal(0, len(updatedKVs))
// one deletion entry for pre-existing account addr1, and one entry for in-memory account addr2
// in base accounts updates and in resources updates
From fb0cb6c51a9e758aa6fb439e38cc2b0255db2bf4 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Tue, 16 Aug 2022 15:04:19 -0400
Subject: [PATCH 18/21] fix compile error
---
ledger/acctupdates_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 7a0da94e3c..347aa2d410 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -1371,7 +1371,7 @@ func TestKVCache(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
- au, _ := newAcctUpdates(t, ml, conf, ".")
+ au, _ := newAcctUpdates(t, ml, conf)
defer au.close()
knownCreatables := make(map[basics.CreatableIndex]bool)
From 6458d41d416b2d6f2500dabd64ff3cbaffe7f35e Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Thu, 18 Aug 2022 11:19:24 -0400
Subject: [PATCH 19/21] rename and minor refactor for error handling
---
ledger/accountdb.go | 2 +-
ledger/acctupdates.go | 40 ++++++++++++++++++++--------------------
2 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 357e290a8e..1450d37557 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -4818,7 +4818,7 @@ func (prd *persistedResourcesData) before(other *persistedResourcesData) bool {
// before compares the round numbers of two persistedKVData and determines if the current persistedKVData
// happened before the other.
-func (prd *persistedKVData) before(other *persistedKVData) bool {
+func (prd persistedKVData) before(other *persistedKVData) bool {
return prd.round < other.round
}
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index bf47c42cc7..273506894a 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -215,8 +215,8 @@ type accountUpdates struct {
// baseResources stores the most recently used resources, at exactly dbRound
baseResources lruResources
- // baseKV stores the most recently used KV, at exactly dbRound
- baseKV lruKV
+ // baseKVs stores the most recently used KV, at exactly dbRound
+ baseKVs lruKV
// logAccountUpdatesMetrics is a flag for enable/disable metrics logging
logAccountUpdatesMetrics bool
@@ -322,7 +322,7 @@ func (au *accountUpdates) close() {
}
au.baseAccounts.prune(0)
au.baseResources.prune(0)
- au.baseKV.prune(0)
+ au.baseKVs.prune(0)
}
func (au *accountUpdates) LookupResource(rnd basics.Round, addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.AccountResource, basics.Round, error) {
@@ -388,10 +388,10 @@ func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bo
}
// check the baseKV cache
- if pbd, has := au.baseKV.read(key); has {
+ if pbd, has := au.baseKVs.read(key); has {
// we don't technically need this, since it's already in the baseKV, however, writing this over
// would ensure that we promote this field.
- au.baseKV.writePending(pbd, key)
+ au.baseKVs.writePending(pbd, key)
return pbd.value, nil
}
@@ -405,18 +405,18 @@ func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bo
// on-disk DB.
persistedData, err := au.accountsq.lookupKeyValue(key)
- if persistedData.round == currentDbRound {
- if err == nil {
- // if we read actual data return it. This includes deleted values
- // where persistedData.value == nil to avoid unnecessary db lookups
- // for deleted KVs.
- au.baseKV.writePending(persistedData, key)
- return persistedData.value, err
- }
- // otherwise return empty
+ if err != nil {
return nil, err
}
+ if persistedData.round == currentDbRound {
+ // if we read actual data return it. This includes deleted values
+ // where persistedData.value == nil to avoid unnecessary db lookups
+ // for deleted KVs.
+ au.baseKVs.writePending(persistedData, key)
+ return persistedData.value, err
+ }
+
// The db round is unexpected...
if synchronized {
if persistedData.round < currentDbRound {
@@ -954,7 +954,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
- au.baseKV.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold)
+ au.baseKVs.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold)
return
}
@@ -979,7 +979,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.flushPendingWrites()
au.baseResources.flushPendingWrites()
- au.baseKV.flushPendingWrites()
+ au.baseKVs.flushPendingWrites()
for i := 0; i < delta.Accts.Len(); i++ {
addr, data := delta.Accts.GetByIdx(i)
@@ -1035,7 +1035,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
newBaseKVSize := (len(au.kvStore) + 1) + baseKVPendingBufferSize
- au.baseKV.prune(newBaseKVSize)
+ au.baseKVs.prune(newBaseKVSize)
}
// lookupLatest returns the account data for a given address for the latest round.
@@ -1335,7 +1335,7 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
// against the database.
persistedData, err = au.accountsq.lookupResources(addr, aidx, ctype)
if persistedData.round == currentDbRound {
- if err == nil {
+ if persistedData.addrid != 0 {
// if we read actual data return it
au.baseResources.writePending(persistedData, addr)
return persistedData.AccountResource(), rnd, err
@@ -1753,7 +1753,7 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
}
for key, persistedKV := range dcc.updatedPersistedKVs {
- au.baseKV.write(persistedKV, key)
+ au.baseKVs.write(persistedKV, key)
}
for cidx, modCrt := range dcc.compactCreatableDeltas {
@@ -1885,7 +1885,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
// rowid are flushed.
au.baseAccounts.prune(0)
au.baseResources.prune(0)
- au.baseKV.prune(0)
+ au.baseKVs.prune(0)
startTime := time.Now()
vacuumExitCh := make(chan struct{}, 1)
From 2b3a0aebdeccd59fc7fb52595beedfb8550a5e91 Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Thu, 18 Aug 2022 11:49:14 -0400
Subject: [PATCH 20/21] fix compile error
---
ledger/acctupdates_test.go | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 347aa2d410..7ac098ebea 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -1418,7 +1418,7 @@ func TestKVCache(t *testing.T) {
// verify cache doesn't contain the new kvs until committed to DB.
for name := range kvMods {
- _, has := au.baseKV.read(name)
+ _, has := au.baseKVs.read(name)
require.False(t, has)
}
@@ -1427,7 +1427,7 @@ func TestKVCache(t *testing.T) {
startKV := (currentDBRound - 1) * basics.Round(kvsPerBlock)
for j := 0; j < kvsPerBlock; j++ {
name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
- persistedValue, has := au.baseKV.read(name)
+ persistedValue, has := au.baseKVs.read(name)
require.True(t, has)
require.Equal(t, kvMap[name], *persistedValue.value)
}
@@ -1460,7 +1460,7 @@ func TestKVCache(t *testing.T) {
// verify cache doesn't contain updated kv values that haven't been committed to db
if i < kvCnt/kvsPerBlock {
for name := range kvMods {
- persistedValue, has := au.baseKV.read(name)
+ persistedValue, has := au.baseKVs.read(name)
require.True(t, has)
require.Equal(t, kvMap[name], *persistedValue.value)
}
@@ -1476,7 +1476,7 @@ func TestKVCache(t *testing.T) {
startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
for j := 0; j < kvsPerBlock; j++ {
name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
- persistedValue, has := au.baseKV.read(name)
+ persistedValue, has := au.baseKVs.read(name)
require.True(t, has)
expectedValue := fmt.Sprintf("modified value%s", name)
require.Equal(t, expectedValue, *persistedValue.value)
@@ -1509,7 +1509,7 @@ func TestKVCache(t *testing.T) {
// verify cache doesn't contain updated kv values that haven't been committed to db
if i < kvCnt/kvsPerBlock {
for name := range kvMods {
- persistedValue, has := au.baseKV.read(name)
+ persistedValue, has := au.baseKVs.read(name)
require.True(t, has)
value := fmt.Sprintf("modified value%s", name)
require.Equal(t, value, *persistedValue.value)
@@ -1526,7 +1526,7 @@ func TestKVCache(t *testing.T) {
startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
for j := 0; j < kvsPerBlock; j++ {
name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
- persistedValue, has := au.baseKV.read(name)
+ persistedValue, has := au.baseKVs.read(name)
require.True(t, has)
require.True(t, persistedValue.value == nil)
}
From 24ff56e45280372a7554c668c25bc32c31b51f2e Mon Sep 17 00:00:00 2001
From: algoidurovic <91566643+algoidurovic@users.noreply.github.com>
Date: Thu, 18 Aug 2022 12:40:07 -0400
Subject: [PATCH 21/21] explicit nil error
---
ledger/acctupdates.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 273506894a..1cba218fce 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -414,7 +414,7 @@ func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bo
// where persistedData.value == nil to avoid unnecessary db lookups
// for deleted KVs.
au.baseKVs.writePending(persistedData, key)
- return persistedData.value, err
+ return persistedData.value, nil
}
// The db round is unexpected...