Skip to content

Commit

Permalink
Merge pull request #819 from gzliudan/read_db_only
Browse files Browse the repository at this point in the history
all: add read-only option to database
  • Loading branch information
gzliudan authored Jan 24, 2025
2 parents ff923bd + 97c50f9 commit 8b9de4b
Show file tree
Hide file tree
Showing 17 changed files with 95 additions and 63 deletions.
2 changes: 1 addition & 1 deletion XDCxDAO/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func NewBatchDatabase(datadir string, cacheLimit int) *BatchDatabase {

// batchdatabase is a fast cache db to retrieve in-mem object
func NewBatchDatabaseWithEncode(datadir string, cacheLimit int) *BatchDatabase {
db, err := rawdb.NewLevelDBDatabase(datadir, 128, 1024, "")
db, err := rawdb.NewLevelDBDatabase(datadir, 128, 1024, "", false)
if err != nil {
log.Error("Can't create new DB", "error", err)
return nil
Expand Down
16 changes: 8 additions & 8 deletions cmd/XDC/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ func initGenesis(ctx *cli.Context) error {
defer stack.Close()

for _, name := range []string{"chaindata", "lightchaindata"} {
chaindb, err := stack.OpenDatabase(name, 0, 0, "")
chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
Expand All @@ -244,7 +244,7 @@ func importChain(ctx *cli.Context) error {
// Start metrics export if enabled
utils.SetupMetrics(&cfg.Metrics)

chain, chainDb := utils.MakeChain(ctx, stack)
chain, chainDb := utils.MakeChain(ctx, stack, false)
defer chainDb.Close()

// Start periodically gathering memory profiles
Expand Down Expand Up @@ -338,7 +338,7 @@ func exportChain(ctx *cli.Context) error {
stack, _ := makeFullNode(ctx)
defer stack.Close()

chain, db := utils.MakeChain(ctx, stack)
chain, db := utils.MakeChain(ctx, stack, true)
defer db.Close()
start := time.Now()

Expand Down Expand Up @@ -375,7 +375,7 @@ func importPreimages(ctx *cli.Context) error {
stack, _ := makeFullNode(ctx)
defer stack.Close()

diskdb := utils.MakeChainDatabase(ctx, stack)
diskdb := utils.MakeChainDatabase(ctx, stack, false)
defer diskdb.Close()

start := time.Now()
Expand All @@ -394,7 +394,7 @@ func exportPreimages(ctx *cli.Context) error {
stack, _ := makeFullNode(ctx)
defer stack.Close()

diskdb := utils.MakeChainDatabase(ctx, stack)
diskdb := utils.MakeChainDatabase(ctx, stack, true)
defer diskdb.Close()

start := time.Now()
Expand All @@ -414,7 +414,7 @@ func copyDb(ctx *cli.Context) error {
stack, _ := makeFullNode(ctx)
defer stack.Close()

chain, chainDb := utils.MakeChain(ctx, stack)
chain, chainDb := utils.MakeChain(ctx, stack, false)
defer chainDb.Close()

var syncmode downloader.SyncMode
Expand All @@ -424,7 +424,7 @@ func copyDb(ctx *cli.Context) error {
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil, nil)

// Create a source peer to satisfy downloader requests from
db, err := rawdb.NewLevelDBDatabase(ctx.Args().First(), ctx.Int(utils.CacheFlag.Name), 256, "")
db, err := rawdb.NewLevelDBDatabase(ctx.Args().First(), ctx.Int(utils.CacheFlag.Name), 256, "", false)
if err != nil {
return err
}
Expand Down Expand Up @@ -492,7 +492,7 @@ func dump(ctx *cli.Context) error {
stack, _ := makeFullNode(ctx)
defer stack.Close()

chain, chainDb := utils.MakeChain(ctx, stack)
chain, chainDb := utils.MakeChain(ctx, stack, true)
defer chainDb.Close()

for _, arg := range ctx.Args().Slice() {
Expand Down
8 changes: 4 additions & 4 deletions cmd/XDC/consolecmd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func TestConsoleWelcome(t *testing.T) {

// Start a XDC console, make sure it's cleaned up and terminate the console
XDC := runXDC(t,
"console", "--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx/"+time.Now().String(),
"console", "--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx",
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
"--miner-etherbase", coinbase)

Expand Down Expand Up @@ -86,7 +86,7 @@ func TestIPCAttachWelcome(t *testing.T) {
ipc = filepath.Join(datadir, "XDC.ipc")
}
XDC := runXDC(t,
"--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx/"+time.Now().String(),
"--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx",
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
"--miner-etherbase", coinbase, "--ipcpath", ipc)

Expand All @@ -103,7 +103,7 @@ func TestHTTPAttachWelcome(t *testing.T) {
datadir := tmpdir(t)
defer os.RemoveAll(datadir)
XDC := runXDC(t,
"--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx/"+time.Now().String(),
"--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx",
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
"--miner-etherbase", coinbase, "--http", "--http-port", port)

Expand All @@ -120,7 +120,7 @@ func TestWSAttachWelcome(t *testing.T) {
datadir := tmpdir(t)
defer os.RemoveAll(datadir)
XDC := runXDC(t,
"--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx/"+time.Now().String(),
"--datadir", datadir, "--XDCx-datadir", datadir+"/XDCx",
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
"--miner-etherbase", coinbase, "--ws", "--ws-port", port)

Expand Down
2 changes: 1 addition & 1 deletion cmd/XDC/dao_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
}
// Retrieve the DAO config flag from the database
path := filepath.Join(datadir, "XDC", "chaindata")
db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "")
db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "", false)
if err != nil {
t.Fatalf("test %d: failed to open test database: %v", test, err)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/gc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ type ResultProcessNode struct {

func main() {
flag.Parse()
db, _ := leveldb.New(*dir, ethconfig.Defaults.DatabaseCache, utils.MakeDatabaseHandles(0), "")
db, _ := leveldb.New(*dir, ethconfig.Defaults.DatabaseCache, utils.MakeDatabaseHandles(0), "", false)
lddb := rawdb.NewDatabase(db)
head := rawdb.ReadHeadBlockHash(lddb)
currentHeader := rawdb.ReadHeader(lddb, head, *rawdb.ReadHeaderNumber(lddb, head))
Expand Down
8 changes: 4 additions & 4 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -1604,7 +1604,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
}

// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
var (
cache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
handles = MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name))
Expand All @@ -1613,7 +1613,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
if ctx.Bool(LightModeFlag.Name) {
name = "lightchaindata"
}
chainDb, err := stack.OpenDatabase(name, cache, handles, "")
chainDb, err := stack.OpenDatabase(name, cache, handles, "", readonly)
if err != nil {
Fatalf("Could not open database: %v", err)
}
Expand All @@ -1634,9 +1634,9 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
}

// MakeChain creates a chain manager from set command line flags.
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack)
chainDb = MakeChainDatabase(ctx, stack, readonly)

config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion consensus/XDPoS/engines/engine_v2/snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func TestStoreLoadSnapshot(t *testing.T) {
if err != nil {
panic(fmt.Sprintf("can't create temporary directory: %v", err))
}
db, err := leveldb.New(dir, 256, 0, "")
db, err := leveldb.New(dir, 256, 0, "", false)
if err != nil {
panic(fmt.Sprintf("can't create temporary database: %v", err))
}
Expand Down
8 changes: 4 additions & 4 deletions core/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
b.Fatalf("cannot create temporary directory: %v", err)
}
defer os.RemoveAll(dir)
db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "")
db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
if err != nil {
b.Fatalf("cannot create temporary database: %v", err)
}
Expand Down Expand Up @@ -254,7 +254,7 @@ func benchWriteChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("cannot create temporary directory: %v", err)
}
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "")
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
Expand All @@ -271,7 +271,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
}
defer os.RemoveAll(dir)

db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "")
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
Expand All @@ -282,7 +282,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
b.ResetTimer()

for i := 0; i < b.N; i++ {
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "")
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
Expand Down
5 changes: 3 additions & 2 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package rawdb

import (
"fmt"

"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/ethdb/leveldb"
"github.com/XinFinOrg/XDPoSChain/ethdb/memorydb"
Expand Down Expand Up @@ -101,8 +102,8 @@ func NewMemoryDatabase() ethdb.Database {

// NewLevelDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewLevelDBDatabase(file string, cache int, handles int, namespace string) (ethdb.Database, error) {
db, err := leveldb.New(file, cache, handles, namespace)
func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
db, err := leveldb.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ func makeExtraData(extra []byte) []byte {

// CreateDB creates the chain database.
func CreateDB(ctx *node.ServiceContext, config *ethconfig.Config, name string) (ethdb.Database, error) {
db, err := ctx.OpenDatabase(name, config.DatabaseCache, config.DatabaseHandles)
db, err := ctx.OpenDatabase(name, config.DatabaseCache, config.DatabaseHandles, false)
if err != nil {
return nil, err
}
Expand Down
6 changes: 3 additions & 3 deletions eth/filters/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
b.Log("Running bloombits benchmark section size:", sectionSize)

db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
Expand Down Expand Up @@ -131,7 +131,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
for i := 0; i < benchFilterCnt; i++ {
if i%20 == 0 {
db.Close()
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
backend = &testBackend{db: db, sections: cnt}
sys = NewFilterSystem(backend, Config{})
}
Expand Down Expand Up @@ -178,7 +178,7 @@ func clearBloomBits(db ethdb.Database) {
func BenchmarkNoBloomBits(b *testing.B) {
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
b.Log("Running benchmark without bloombits")
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
Expand Down
4 changes: 2 additions & 2 deletions eth/filters/filter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func BenchmarkFilters(b *testing.B) {
defer os.RemoveAll(dir)

var (
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false)
_, sys = newTestFilterSystem(b, db, Config{})
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
Expand Down Expand Up @@ -103,7 +103,7 @@ func TestFilters(t *testing.T) {
defer os.RemoveAll(dir)

var (
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "", false)
_, sys = newTestFilterSystem(t, db, Config{})
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey)
Expand Down
77 changes: 54 additions & 23 deletions ethdb/leveldb/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

//go:build !js
// +build !js

// Package leveldb implements the key-value database layer based on LevelDB.
Expand Down Expand Up @@ -83,25 +84,40 @@ type Database struct {

// New returns a wrapped LevelDB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
func New(file string, cache int, handles int, namespace string) (*Database, error) {
// Ensure we have some minimal caching and file guarantees
if cache < minCache {
cache = minCache
}
if handles < minHandles {
handles = minHandles
}
func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) {
return NewCustom(file, namespace, func(options *opt.Options) {
// Ensure we have some minimal caching and file guarantees
if cache < minCache {
cache = minCache
}
if handles < minHandles {
handles = minHandles
}
// Set default options
options.OpenFilesCacheCapacity = handles
options.BlockCacheCapacity = cache / 2 * opt.MiB
options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally
if readonly {
options.ReadOnly = true
}
})
}

// NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
// The customize function allows the caller to modify the leveldb options.
func NewCustom(file string, namespace string, customize func(options *opt.Options)) (*Database, error) {
options := configureOptions(customize)
logger := log.New("database", file)
logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles)
usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2
logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()}
if options.ReadOnly {
logCtx = append(logCtx, "readonly", "true")
}
logger.Info("Allocated cache and file handles", logCtx...)

// Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, &opt.Options{
OpenFilesCacheCapacity: handles,
BlockCacheCapacity: cache / 2 * opt.MiB,
WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally
Filter: filter.NewBloomFilter(10),
DisableSeeksCompaction: true,
})
db, err := leveldb.OpenFile(file, options)
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil)
}
Expand Down Expand Up @@ -133,6 +149,20 @@ func New(file string, cache int, handles int, namespace string) (*Database, erro
return ldb, nil
}

// configureOptions sets some default options, then runs the provided setter.
func configureOptions(customizeFn func(*opt.Options)) *opt.Options {
// Set default options
options := &opt.Options{
Filter: filter.NewBloomFilter(10),
DisableSeeksCompaction: true,
}
// Allow caller to make custom modifications to the options
if customizeFn != nil {
customizeFn(options)
}
return options
}

// Close stops the metrics collection, flushes any pending data to disk and closes
// all io accesses to the underlying key-value store.
func (db *Database) Close() error {
Expand Down Expand Up @@ -215,13 +245,14 @@ func (db *Database) Path() string {
// the metrics subsystem.
//
// This is how a LevelDB stats table looks like (currently):
// Compactions
// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
// -------+------------+---------------+---------------+---------------+---------------
// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098
// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
//
// Compactions
// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
// -------+------------+---------------+---------------+---------------+---------------
// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098
// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
//
// This is how the write delay look like (currently):
// DelayN:5 Delay:406.604657ms Paused: false
Expand Down
Loading

0 comments on commit 8b9de4b

Please sign in to comment.