From 70c08083214cd99a7dbb24ce3b2d8687fe96ceaf Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 13 May 2024 18:23:59 +0530 Subject: [PATCH 01/56] snap-sync consensus 0.2: start reading parent chain from the right block --- arbnode/node.go | 81 ++++++++++++++++++++++++++++++++++------ staker/rollup_watcher.go | 39 ++++++++++++++++++- 2 files changed, 108 insertions(+), 12 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index c346a38e14..a209e81e3a 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -277,19 +277,21 @@ type Node struct { } type SnapSyncConfig struct { - Enabled bool - PrevBatchMessageCount uint64 - PrevDelayedRead uint64 - BatchCount uint64 - DelayedCount uint64 + Enabled bool + PrevBatchMessageCount uint64 + PrevDelayedRead uint64 + BatchCount uint64 + DelayedCount uint64 + ParentChainAssertionBlock uint64 } var DefaultSnapSyncConfig = SnapSyncConfig{ - Enabled: false, - PrevBatchMessageCount: 0, - BatchCount: 0, - DelayedCount: 0, - PrevDelayedRead: 0, + Enabled: false, + PrevBatchMessageCount: 0, + PrevDelayedRead: 0, + BatchCount: 0, + DelayedCount: 0, + ParentChainAssertionBlock: 0, } type ConfigFetcher interface { @@ -564,7 +566,25 @@ func createNodeImpl( if err != nil { return nil, err } - inboxReader, err := NewInboxReader(inboxTracker, l1client, l1Reader, new(big.Int).SetUint64(deployInfo.DeployedAt), delayedBridge, sequencerInbox, func() *InboxReaderConfig { return &configFetcher.Get().InboxReader }) + firstMessageBlock := new(big.Int).SetUint64(deployInfo.DeployedAt) + if config.SnapSyncTest.Enabled { + firstMessageToRead := config.SnapSyncTest.DelayedCount + if firstMessageToRead > config.SnapSyncTest.BatchCount { + firstMessageToRead = config.SnapSyncTest.BatchCount + } + if firstMessageToRead > 0 { + firstMessageToRead-- + } + // Find the first block containing the first message to read + // Subtract 1 to get the block before the first message to read, + // this is done to fetch previous batch metadata needed for snap sync. + block, err := FindBlockContainingBatch(ctx, deployInfo.Rollup, l1client, config.SnapSyncTest.ParentChainAssertionBlock, firstMessageToRead-1) + if err != nil { + return nil, err + } + firstMessageBlock.SetUint64(block) + } + inboxReader, err := NewInboxReader(inboxTracker, l1client, l1Reader, firstMessageBlock, delayedBridge, sequencerInbox, func() *InboxReaderConfig { return &configFetcher.Get().InboxReader }) if err != nil { return nil, err } @@ -740,6 +760,45 @@ func createNodeImpl( }, nil } +func FindBlockContainingBatch(ctx context.Context, rollupAddress common.Address, l1Client arbutil.L1Interface, parentChainAssertionBlock uint64, batch uint64) (uint64, error) { + callOpts := bind.CallOpts{Context: ctx} + rollup, err := staker.NewRollupWatcher(rollupAddress, l1Client, callOpts) + if err != nil { + return 0, err + } + high := parentChainAssertionBlock + low := high / 2 + // Exponentially reduce high and low by a factor of 2 until lowNode.InboxMaxCount < batch + // This will give us a range (low to high) of blocks that contain the batch + for low > 0 { + lowNode, err := rollup.LookupNodeByBlockNumber(ctx, low) + if err != nil { + return 0, err + } + if lowNode.InboxMaxCount.Uint64() > batch { + high = low + low = low / 2 + } else { + break + } + } + // Then binary search between low and high to find the block containing the batch + for low < high { + mid := low + (high-low)/2 + + midNode, err := rollup.LookupNodeByBlockNumber(ctx, mid) + if err != nil { + return 0, err + } + if midNode.InboxMaxCount.Uint64() < batch { + low = mid + 1 + } else { + high = mid + } + } + return low, nil +} + func (n *Node) OnConfigReload(_ *Config, _ *Config) error { // TODO return nil diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 118ce15b44..0cc5b43999 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -57,7 +57,6 @@ func NewRollupWatcher(address common.Address, client arbutil.L1Interface, callOp if err != nil { return nil, err } - return &RollupWatcher{ address: address, client: client, @@ -165,6 +164,44 @@ func (r *RollupWatcher) LookupNode(ctx context.Context, number uint64) (*NodeInf }, nil } +func (r *RollupWatcher) LookupNodeByBlockNumber(ctx context.Context, blockNumber uint64) (*NodeInfo, error) { + var query = ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(blockNumber)), + ToBlock: big.NewInt(int64(blockNumber)), + Addresses: []common.Address{r.address}, + Topics: [][]common.Hash{{nodeCreatedID}}, + } + logs, err := r.client.FilterLogs(ctx, query) + if err != nil { + return nil, err + } + if len(logs) == 0 { + return nil, fmt.Errorf("couldn't find node at the request blockNumber %v", blockNumber) + } + if len(logs) > 1 { + return nil, fmt.Errorf("found multiple instances of node at the requested blockNumber %v", blockNumber) + } + ethLog := logs[0] + parsedLog, err := r.ParseNodeCreated(ethLog) + if err != nil { + return nil, err + } + l1BlockProposed, err := arbutil.CorrespondingL1BlockNumber(ctx, r.client, ethLog.BlockNumber) + if err != nil { + return nil, err + } + return &NodeInfo{ + NodeNum: parsedLog.NodeNum, + L1BlockProposed: l1BlockProposed, + ParentChainBlockProposed: ethLog.BlockNumber, + Assertion: NewAssertionFromSolidity(parsedLog.Assertion), + InboxMaxCount: parsedLog.InboxMaxCount, + AfterInboxBatchAcc: parsedLog.AfterInboxBatchAcc, + NodeHash: parsedLog.NodeHash, + WasmModuleRoot: parsedLog.WasmModuleRoot, + }, nil +} + func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, nodeHash common.Hash) ([]*NodeInfo, error) { node, err := r.RollupUserLogic.GetNode(r.getCallOpts(ctx), nodeNum) if err != nil { From 0ddcc71b57fa14112cfa4d1ca3c363033a92bb86 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 13 May 2024 18:26:03 +0530 Subject: [PATCH 02/56] minor fix --- staker/rollup_watcher.go | 1 + 1 file changed, 1 insertion(+) diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 0cc5b43999..fdf30a9d61 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -57,6 +57,7 @@ func NewRollupWatcher(address common.Address, client arbutil.L1Interface, callOp if err != nil { return nil, err } + return &RollupWatcher{ address: address, client: client, From 19643b77e08b2703ae7d303a5a9c096077a75d76 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Wed, 29 May 2024 17:35:07 +0530 Subject: [PATCH 03/56] Changes based on PR comments --- arbnode/node.go | 36 ++++++++++++++++-------------------- staker/rollup_watcher.go | 38 -------------------------------------- 2 files changed, 16 insertions(+), 58 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index b4321f7767..e99433e392 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -567,17 +567,14 @@ func createNodeImpl( } firstMessageBlock := new(big.Int).SetUint64(deployInfo.DeployedAt) if config.SnapSyncTest.Enabled { - firstMessageToRead := config.SnapSyncTest.DelayedCount - if firstMessageToRead > config.SnapSyncTest.BatchCount { - firstMessageToRead = config.SnapSyncTest.BatchCount - } - if firstMessageToRead > 0 { - firstMessageToRead-- - } + batchCount := config.SnapSyncTest.BatchCount // Find the first block containing the first message to read // Subtract 1 to get the block before the first message to read, // this is done to fetch previous batch metadata needed for snap sync. - block, err := FindBlockContainingBatch(ctx, deployInfo.Rollup, l1client, config.SnapSyncTest.ParentChainAssertionBlock, firstMessageToRead-1) + if batchCount > 0 { + batchCount-- + } + block, err := FindBlockContainingBatchCount(ctx, deployInfo.Bridge, l1client, config.SnapSyncTest.ParentChainAssertionBlock, batchCount) if err != nil { return nil, err } @@ -759,37 +756,36 @@ func createNodeImpl( }, nil } -func FindBlockContainingBatch(ctx context.Context, rollupAddress common.Address, l1Client arbutil.L1Interface, parentChainAssertionBlock uint64, batch uint64) (uint64, error) { - callOpts := bind.CallOpts{Context: ctx} - rollup, err := staker.NewRollupWatcher(rollupAddress, l1Client, callOpts) +func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Address, l1Client arbutil.L1Interface, parentChainAssertionBlock uint64, batchCount uint64) (uint64, error) { + bridge, err := bridgegen.NewIBridge(bridgeAddress, l1Client) if err != nil { return 0, err } high := parentChainAssertionBlock - low := high / 2 - // Exponentially reduce high and low by a factor of 2 until lowNode.InboxMaxCount < batch - // This will give us a range (low to high) of blocks that contain the batch + low := high - 100 + // Reduce high and low by 100 until lowNode.InboxMaxCount < batchCount + // This will give us a range (low to high) of blocks that contain the batch count. for low > 0 { - lowNode, err := rollup.LookupNodeByBlockNumber(ctx, low) + lowCount, err := bridge.SequencerMessageCount(&bind.CallOpts{Context: ctx, BlockNumber: new(big.Int).SetUint64(low)}) if err != nil { return 0, err } - if lowNode.InboxMaxCount.Uint64() > batch { + if lowCount.Uint64() > batchCount { high = low - low = low / 2 + low = low - 100 } else { break } } - // Then binary search between low and high to find the block containing the batch + // Then binary search between low and high to find the block containing the batch count. for low < high { mid := low + (high-low)/2 - midNode, err := rollup.LookupNodeByBlockNumber(ctx, mid) + midCount, err := bridge.SequencerMessageCount(&bind.CallOpts{Context: ctx, BlockNumber: new(big.Int).SetUint64(mid)}) if err != nil { return 0, err } - if midNode.InboxMaxCount.Uint64() < batch { + if midCount.Uint64() < batchCount { low = mid + 1 } else { high = mid diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index fdf30a9d61..118ce15b44 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -165,44 +165,6 @@ func (r *RollupWatcher) LookupNode(ctx context.Context, number uint64) (*NodeInf }, nil } -func (r *RollupWatcher) LookupNodeByBlockNumber(ctx context.Context, blockNumber uint64) (*NodeInfo, error) { - var query = ethereum.FilterQuery{ - FromBlock: big.NewInt(int64(blockNumber)), - ToBlock: big.NewInt(int64(blockNumber)), - Addresses: []common.Address{r.address}, - Topics: [][]common.Hash{{nodeCreatedID}}, - } - logs, err := r.client.FilterLogs(ctx, query) - if err != nil { - return nil, err - } - if len(logs) == 0 { - return nil, fmt.Errorf("couldn't find node at the request blockNumber %v", blockNumber) - } - if len(logs) > 1 { - return nil, fmt.Errorf("found multiple instances of node at the requested blockNumber %v", blockNumber) - } - ethLog := logs[0] - parsedLog, err := r.ParseNodeCreated(ethLog) - if err != nil { - return nil, err - } - l1BlockProposed, err := arbutil.CorrespondingL1BlockNumber(ctx, r.client, ethLog.BlockNumber) - if err != nil { - return nil, err - } - return &NodeInfo{ - NodeNum: parsedLog.NodeNum, - L1BlockProposed: l1BlockProposed, - ParentChainBlockProposed: ethLog.BlockNumber, - Assertion: NewAssertionFromSolidity(parsedLog.Assertion), - InboxMaxCount: parsedLog.InboxMaxCount, - AfterInboxBatchAcc: parsedLog.AfterInboxBatchAcc, - NodeHash: parsedLog.NodeHash, - WasmModuleRoot: parsedLog.WasmModuleRoot, - }, nil -} - func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, nodeHash common.Hash) ([]*NodeInfo, error) { node, err := r.RollupUserLogic.GetNode(r.getCallOpts(ctx), nodeNum) if err != nil { From b75a4c8a5c1f80ffe7b27468aa1936b1df607c66 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Wed, 29 May 2024 17:38:04 +0530 Subject: [PATCH 04/56] fix comment --- arbnode/node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index e99433e392..bcb5928c7d 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -568,8 +568,8 @@ func createNodeImpl( firstMessageBlock := new(big.Int).SetUint64(deployInfo.DeployedAt) if config.SnapSyncTest.Enabled { batchCount := config.SnapSyncTest.BatchCount - // Find the first block containing the first message to read - // Subtract 1 to get the block before the first message to read, + // Find the first block containing the batch count. + // Subtract 1 to get the block before the needed batch count, // this is done to fetch previous batch metadata needed for snap sync. if batchCount > 0 { batchCount-- From 5b39217575f83f78039e4b172db77d292263ffdf Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Wed, 29 May 2024 18:24:40 +0530 Subject: [PATCH 05/56] fix underflow --- arbnode/node.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index bcb5928c7d..049d5da2eb 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -762,7 +762,10 @@ func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Add return 0, err } high := parentChainAssertionBlock - low := high - 100 + low := uint64(0) + if high > 100 { + low = high - 100 + } // Reduce high and low by 100 until lowNode.InboxMaxCount < batchCount // This will give us a range (low to high) of blocks that contain the batch count. for low > 0 { @@ -772,7 +775,11 @@ func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Add } if lowCount.Uint64() > batchCount { high = low - low = low - 100 + if low > 100 { + low = low - 100 + } else { + low = 0 + } } else { break } From 2737e35e446ea36a57165214e2712c42298c0913 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 31 Jul 2024 16:08:44 -0300 Subject: [PATCH 06/56] Removes HardReorg config from InboxReader --- arbnode/delayed_seq_reorg_test.go | 153 ------------------------------ arbnode/inbox_reader.go | 13 +-- arbnode/inbox_tracker.go | 24 +++-- cmd/nitro/nitro.go | 4 - system_tests/seqinbox_test.go | 1 - 5 files changed, 13 insertions(+), 182 deletions(-) delete mode 100644 arbnode/delayed_seq_reorg_test.go diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go deleted file mode 100644 index 699eb3e8f6..0000000000 --- a/arbnode/delayed_seq_reorg_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package arbnode - -import ( - "context" - "encoding/binary" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/solgen/go/bridgegen" -) - -func TestSequencerReorgFromDelayed(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil, DefaultSnapSyncConfig) - Require(t, err) - - err = streamer.Start(ctx) - Require(t, err) - exec.Start(ctx) - init, err := streamer.GetMessage(0) - Require(t, err) - - initMsgDelayed := &DelayedInboxMessage{ - BlockHash: [32]byte{}, - BeforeInboxAcc: [32]byte{}, - Message: init.Message, - } - delayedRequestId := common.BigToHash(common.Big1) - userDelayed := &DelayedInboxMessage{ - BlockHash: [32]byte{}, - BeforeInboxAcc: initMsgDelayed.AfterInboxAcc(), - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_EndOfBlock, - Poster: [20]byte{}, - BlockNumber: 0, - Timestamp: 0, - RequestId: &delayedRequestId, - L1BaseFee: common.Big0, - }, - }, - } - err = tracker.AddDelayedMessages([]*DelayedInboxMessage{initMsgDelayed, userDelayed}, false) - Require(t, err) - - serializedInitMsgBatch := make([]byte, 40) - binary.BigEndian.PutUint64(serializedInitMsgBatch[32:], 1) - initMsgBatch := &SequencerInboxBatch{ - BlockHash: [32]byte{}, - ParentChainBlockNumber: 0, - SequenceNumber: 0, - BeforeInboxAcc: [32]byte{}, - AfterInboxAcc: [32]byte{1}, - AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), - AfterDelayedCount: 1, - TimeBounds: bridgegen.IBridgeTimeBounds{}, - rawLog: types.Log{}, - dataLocation: 0, - bridgeAddress: [20]byte{}, - serialized: serializedInitMsgBatch, - } - serializedUserMsgBatch := make([]byte, 40) - binary.BigEndian.PutUint64(serializedUserMsgBatch[32:], 2) - userMsgBatch := &SequencerInboxBatch{ - BlockHash: [32]byte{}, - ParentChainBlockNumber: 0, - SequenceNumber: 1, - BeforeInboxAcc: [32]byte{1}, - AfterInboxAcc: [32]byte{2}, - AfterDelayedAcc: userDelayed.AfterInboxAcc(), - AfterDelayedCount: 2, - TimeBounds: bridgegen.IBridgeTimeBounds{}, - rawLog: types.Log{}, - dataLocation: 0, - bridgeAddress: [20]byte{}, - serialized: serializedUserMsgBatch, - } - emptyBatch := &SequencerInboxBatch{ - BlockHash: [32]byte{}, - ParentChainBlockNumber: 0, - SequenceNumber: 2, - BeforeInboxAcc: [32]byte{2}, - AfterInboxAcc: [32]byte{3}, - AfterDelayedAcc: userDelayed.AfterInboxAcc(), - AfterDelayedCount: 2, - TimeBounds: bridgegen.IBridgeTimeBounds{}, - rawLog: types.Log{}, - dataLocation: 0, - bridgeAddress: [20]byte{}, - serialized: serializedUserMsgBatch, - } - err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{initMsgBatch, userMsgBatch, emptyBatch}) - Require(t, err) - - // Reorg out the user delayed message - err = tracker.ReorgDelayedTo(1, true) - Require(t, err) - - msgCount, err := streamer.GetMessageCount() - Require(t, err) - if msgCount != 1 { - Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 1)") - } - - delayedCount, err := tracker.GetDelayedCount() - Require(t, err) - if delayedCount != 1 { - Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 1)") - } - - batchCount, err := tracker.GetBatchCount() - Require(t, err) - if batchCount != 1 { - Fail(t, "Unexpected tracker batch count", batchCount, "(expected 1)") - } - - emptyBatch = &SequencerInboxBatch{ - BlockHash: [32]byte{}, - ParentChainBlockNumber: 0, - SequenceNumber: 1, - BeforeInboxAcc: [32]byte{1}, - AfterInboxAcc: [32]byte{2}, - AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), - AfterDelayedCount: 1, - TimeBounds: bridgegen.IBridgeTimeBounds{}, - rawLog: types.Log{}, - dataLocation: 0, - bridgeAddress: [20]byte{}, - serialized: serializedInitMsgBatch, - } - err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{emptyBatch}) - Require(t, err) - - msgCount, err = streamer.GetMessageCount() - Require(t, err) - if msgCount != 2 { - Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 2)") - } - - batchCount, err = tracker.GetBatchCount() - Require(t, err) - if batchCount != 2 { - Fail(t, "Unexpected tracker batch count", batchCount, "(expected 2)") - } -} diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 98104b2ea7..a44d506d41 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -27,7 +27,6 @@ import ( type InboxReaderConfig struct { DelayBlocks uint64 `koanf:"delay-blocks" reload:"hot"` CheckDelay time.Duration `koanf:"check-delay" reload:"hot"` - HardReorg bool `koanf:"hard-reorg" reload:"hot"` MinBlocksToRead uint64 `koanf:"min-blocks-to-read" reload:"hot"` DefaultBlocksToRead uint64 `koanf:"default-blocks-to-read" reload:"hot"` TargetMessagesRead uint64 `koanf:"target-messages-read" reload:"hot"` @@ -51,7 +50,6 @@ func (c *InboxReaderConfig) Validate() error { func InboxReaderConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".delay-blocks", DefaultInboxReaderConfig.DelayBlocks, "number of latest blocks to ignore to reduce reorgs") f.Duration(prefix+".check-delay", DefaultInboxReaderConfig.CheckDelay, "the maximum time to wait between inbox checks (if not enough new blocks are found)") - f.Bool(prefix+".hard-reorg", DefaultInboxReaderConfig.HardReorg, "erase future transactions in addition to overwriting existing ones on reorg") f.Uint64(prefix+".min-blocks-to-read", DefaultInboxReaderConfig.MinBlocksToRead, "the minimum number of blocks to read at once (when caught up lowers load on L1)") f.Uint64(prefix+".default-blocks-to-read", DefaultInboxReaderConfig.DefaultBlocksToRead, "the default number of blocks to read at once (will vary based on traffic by default)") f.Uint64(prefix+".target-messages-read", DefaultInboxReaderConfig.TargetMessagesRead, "if adjust-blocks-to-read is enabled, the target number of messages to read at once") @@ -62,7 +60,6 @@ func InboxReaderConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultInboxReaderConfig = InboxReaderConfig{ DelayBlocks: 0, CheckDelay: time.Minute, - HardReorg: false, MinBlocksToRead: 1, DefaultBlocksToRead: 100, TargetMessagesRead: 500, @@ -73,7 +70,6 @@ var DefaultInboxReaderConfig = InboxReaderConfig{ var TestInboxReaderConfig = InboxReaderConfig{ DelayBlocks: 0, CheckDelay: time.Millisecond * 10, - HardReorg: false, MinBlocksToRead: 1, DefaultBlocksToRead: 100, TargetMessagesRead: 500, @@ -338,7 +334,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { missingDelayed = true } else if ourLatestDelayedCount > checkingDelayedCount { log.Info("backwards reorg of delayed messages", "from", ourLatestDelayedCount, "to", checkingDelayedCount) - err = r.tracker.ReorgDelayedTo(checkingDelayedCount, config.HardReorg) + err = r.tracker.ReorgDelayedTo(checkingDelayedCount) if err != nil { return err } @@ -373,11 +369,6 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { if ourLatestBatchCount < checkingBatchCount { checkingBatchCount = ourLatestBatchCount missingSequencer = true - } else if ourLatestBatchCount > checkingBatchCount && config.HardReorg { - err = r.tracker.ReorgBatchesTo(checkingBatchCount) - if err != nil { - return err - } } if checkingBatchCount > 0 { checkingBatchSeqNum := checkingBatchCount - 1 @@ -566,7 +557,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } func (r *InboxReader) addMessages(ctx context.Context, sequencerBatches []*SequencerInboxBatch, delayedMessages []*DelayedInboxMessage) (bool, error) { - err := r.tracker.AddDelayedMessages(delayedMessages, r.config().HardReorg) + err := r.tracker.AddDelayedMessages(delayedMessages) if err != nil { return false, err } diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 7686fe413f..0eed2f5e1a 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -404,7 +404,7 @@ func (t *InboxTracker) GetDelayedMessageBytes(ctx context.Context, seqNum uint64 return msg.Serialize() } -func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardReorg bool) error { +func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage) error { var nextAcc common.Hash firstDelayedMsgToKeep := uint64(0) if len(messages) == 0 { @@ -440,17 +440,15 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR t.mutex.Lock() defer t.mutex.Unlock() - if !hardReorg { - // This math is safe to do as we know len(messages) > 0 - haveLastAcc, err := t.GetDelayedAcc(pos + uint64(len(messages)) - 1) - if err == nil { - if haveLastAcc == messages[len(messages)-1].AfterInboxAcc() { - // We already have these delayed messages - return nil - } - } else if !errors.Is(err, AccumulatorNotFoundErr) { - return err + // This math is safe to do as we know len(messages) > 0 + haveLastAcc, err := t.GetDelayedAcc(pos + uint64(len(messages)) - 1) + if err == nil { + if haveLastAcc == messages[len(messages)-1].AfterInboxAcc() { + // We already have these delayed messages + return nil } + } else if !errors.Is(err, AccumulatorNotFoundErr) { + return err } if pos > firstDelayedMsgToKeep { @@ -852,7 +850,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client *ethclien return nil } -func (t *InboxTracker) ReorgDelayedTo(count uint64, canReorgBatches bool) error { +func (t *InboxTracker) ReorgDelayedTo(count uint64) error { t.mutex.Lock() defer t.mutex.Unlock() @@ -867,7 +865,7 @@ func (t *InboxTracker) ReorgDelayedTo(count uint64, canReorgBatches bool) error return errors.New("attempted to reorg to future delayed count") } - return t.setDelayedCountReorgAndWriteBatch(t.db.NewBatch(), count, canReorgBatches) + return t.setDelayedCountReorgAndWriteBatch(t.db.NewBatch(), count, false) } func (t *InboxTracker) ReorgBatchesTo(count uint64) error { diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index bc2155a475..1d54e26a01 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -231,10 +231,6 @@ func mainImpl() int { nodeConfig.Node.ParentChainReader.Enable = true } - if nodeConfig.Execution.Sequencer.Enable && nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Node.InboxReader.HardReorg { - flag.Usage() - log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") - } if nodeConfig.Execution.Sequencer.Enable != nodeConfig.Node.Sequencer { log.Error("consensus and execution must agree if sequencing is enabled or not", "Execution.Sequencer.Enable", nodeConfig.Execution.Sequencer.Enable, "Node.Sequencer", nodeConfig.Node.Sequencer) } diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index a9f66b0e2f..e0da2d4f3f 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -139,7 +139,6 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { defer cancel() builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.nodeConfig.InboxReader.HardReorg = true if validator { builder.nodeConfig.BlockValidator.Enable = true } From 94a07f076e0e264387c28303755e4e53f2b2cb0e Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Fri, 4 Oct 2024 12:35:17 -0300 Subject: [PATCH 07/56] TestSequencerReorgFromDelayed --- arbnode/delayed_seq_reorg_test.go | 209 ++++++++++++++++++++++++++++++ 1 file changed, 209 insertions(+) create mode 100644 arbnode/delayed_seq_reorg_test.go diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go new file mode 100644 index 0000000000..78e7fe42ce --- /dev/null +++ b/arbnode/delayed_seq_reorg_test.go @@ -0,0 +1,209 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbnode + +import ( + "context" + "encoding/binary" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/solgen/go/bridgegen" +) + +func TestSequencerReorgFromDelayed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) + tracker, err := NewInboxTracker(db, streamer, nil, DefaultSnapSyncConfig) + Require(t, err) + + err = streamer.Start(ctx) + Require(t, err) + exec.Start(ctx) + init, err := streamer.GetMessage(0) + Require(t, err) + + initMsgDelayed := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: [32]byte{}, + Message: init.Message, + } + delayedRequestId := common.BigToHash(common.Big1) + userDelayed := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: initMsgDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId, + L1BaseFee: common.Big0, + }, + }, + } + delayedRequestId2 := common.BigToHash(common.Big2) + userDelayed2 := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId2, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{initMsgDelayed, userDelayed, userDelayed2}) + Require(t, err) + + serializedInitMsgBatch := make([]byte, 40) + binary.BigEndian.PutUint64(serializedInitMsgBatch[32:], 1) + initMsgBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 0, + BeforeInboxAcc: [32]byte{}, + AfterInboxAcc: [32]byte{1}, + AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), + AfterDelayedCount: 1, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedInitMsgBatch, + } + serializedUserMsgBatch := make([]byte, 40) + binary.BigEndian.PutUint64(serializedUserMsgBatch[32:], 2) + userMsgBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 1, + BeforeInboxAcc: [32]byte{1}, + AfterInboxAcc: [32]byte{2}, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedUserMsgBatch, + } + emptyBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 2, + BeforeInboxAcc: [32]byte{2}, + AfterInboxAcc: [32]byte{3}, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedUserMsgBatch, + } + err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{initMsgBatch, userMsgBatch, emptyBatch}) + Require(t, err) + + msgCount, err := streamer.GetMessageCount() + Require(t, err) + if msgCount != 3 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 3)") + } + + delayedCount, err := tracker.GetDelayedCount() + Require(t, err) + if delayedCount != 3 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 3)") + } + + // By modifying the timestamp of the userDelayed message, and adding it again, we remove userDelayed2 message. + userDelayedModified := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: initMsgDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: userDelayed.Message.Header.Timestamp + 1, + RequestId: &delayedRequestId, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayedModified}) + Require(t, err) + + // userMsgBatch, and emptyBatch will be deleted since their AfterDelayedAcc are not valid anymore after the reorg + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 1 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 1)") + } + + // userDelayed2 will be deleted since its AfterDelayedAcc is not valid anymore after the reorg + delayedCount, err = tracker.GetDelayedCount() + Require(t, err) + if delayedCount != 2 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 2)") + } + + // guarantees that delayed msg 1 is userDelayedModified and not userDelayed + msg, err := tracker.GetDelayedMessage(ctx, 1) + Require(t, err) + if (*msg.Header.RequestId).Cmp(*userDelayedModified.Message.Header.RequestId) != 0 { + Fail(t, "Unexpected delayed message requestId", msg.Header.RequestId, "(expected", userDelayedModified.Message.Header.RequestId, ")") + } + if msg.Header.Timestamp != userDelayedModified.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", msg.Header.Timestamp, "(expected", userDelayedModified.Message.Header.Timestamp, ")") + } + if userDelayedModified.Message.Header.Timestamp == userDelayed.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", userDelayedModified.Message.Header.Timestamp, "(expected", userDelayed.Message.Header.Timestamp, ")") + } + + batchCount, err := tracker.GetBatchCount() + Require(t, err) + if batchCount != 1 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 1)") + } + + emptyBatch = &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 1, + BeforeInboxAcc: [32]byte{1}, + AfterInboxAcc: [32]byte{2}, + AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), + AfterDelayedCount: 1, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedInitMsgBatch, + } + err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{emptyBatch}) + Require(t, err) + + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 2 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 2)") + } + + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 2 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 2)") + } +} From 176ffb3fe03770d0909a60d3827bf4170bcf56cf Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Fri, 4 Oct 2024 13:00:22 -0300 Subject: [PATCH 08/56] Fix lint issue --- arbnode/delayed_seq_reorg_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index 78e7fe42ce..ae7775cda9 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -162,7 +162,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { // guarantees that delayed msg 1 is userDelayedModified and not userDelayed msg, err := tracker.GetDelayedMessage(ctx, 1) Require(t, err) - if (*msg.Header.RequestId).Cmp(*userDelayedModified.Message.Header.RequestId) != 0 { + if msg.Header.RequestId.Cmp(*userDelayedModified.Message.Header.RequestId) != 0 { Fail(t, "Unexpected delayed message requestId", msg.Header.RequestId, "(expected", userDelayedModified.Message.Header.RequestId, ")") } if msg.Header.Timestamp != userDelayedModified.Message.Header.Timestamp { From 4f8f8621fde2bc793b860ea1ab7a8827e314214c Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Fri, 4 Oct 2024 16:04:58 -0300 Subject: [PATCH 09/56] Improves TestSequencerReorgFromDelayed --- arbnode/delayed_seq_reorg_test.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index ae7775cda9..17f4756a53 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -127,7 +127,13 @@ func TestSequencerReorgFromDelayed(t *testing.T) { Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 3)") } - // By modifying the timestamp of the userDelayed message, and adding it again, we remove userDelayed2 message. + batchCount, err := tracker.GetBatchCount() + Require(t, err) + if batchCount != 3 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 3)") + } + + // By modifying the timestamp of the userDelayed message, and adding it again, we cause a reorg userDelayedModified := &DelayedInboxMessage{ BlockHash: [32]byte{}, BeforeInboxAcc: initMsgDelayed.AfterInboxAcc(), @@ -145,14 +151,20 @@ func TestSequencerReorgFromDelayed(t *testing.T) { err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayedModified}) Require(t, err) - // userMsgBatch, and emptyBatch will be deleted since their AfterDelayedAcc are not valid anymore after the reorg + // userMsgBatch, and emptyBatch will be reorged out msgCount, err = streamer.GetMessageCount() Require(t, err) if msgCount != 1 { Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 1)") } - // userDelayed2 will be deleted since its AfterDelayedAcc is not valid anymore after the reorg + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 1 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 1)") + } + + // userDelayed2 will be deleted delayedCount, err = tracker.GetDelayedCount() Require(t, err) if delayedCount != 2 { @@ -172,12 +184,6 @@ func TestSequencerReorgFromDelayed(t *testing.T) { Fail(t, "Unexpected delayed message timestamp", userDelayedModified.Message.Header.Timestamp, "(expected", userDelayed.Message.Header.Timestamp, ")") } - batchCount, err := tracker.GetBatchCount() - Require(t, err) - if batchCount != 1 { - Fail(t, "Unexpected tracker batch count", batchCount, "(expected 1)") - } - emptyBatch = &SequencerInboxBatch{ BlockHash: [32]byte{}, ParentChainBlockNumber: 0, From fa075bff1b3818e5bab8f7fe69c6fe140ceb178e Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 9 Oct 2024 10:48:50 -0300 Subject: [PATCH 10/56] TestSequencerReorgFromLastDelayedMsg --- arbnode/delayed_seq_reorg_test.go | 200 ++++++++++++++++++++++++++++++ 1 file changed, 200 insertions(+) diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index 17f4756a53..86506a7f62 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -213,3 +213,203 @@ func TestSequencerReorgFromDelayed(t *testing.T) { Fail(t, "Unexpected tracker batch count", batchCount, "(expected 2)") } } + +func TestSequencerReorgFromLastDelayedMsg(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) + tracker, err := NewInboxTracker(db, streamer, nil, DefaultSnapSyncConfig) + Require(t, err) + + err = streamer.Start(ctx) + Require(t, err) + exec.Start(ctx) + init, err := streamer.GetMessage(0) + Require(t, err) + + initMsgDelayed := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: [32]byte{}, + Message: init.Message, + } + delayedRequestId := common.BigToHash(common.Big1) + userDelayed := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: initMsgDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId, + L1BaseFee: common.Big0, + }, + }, + } + delayedRequestId2 := common.BigToHash(common.Big2) + userDelayed2 := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId2, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{initMsgDelayed, userDelayed, userDelayed2}) + Require(t, err) + + serializedInitMsgBatch := make([]byte, 40) + binary.BigEndian.PutUint64(serializedInitMsgBatch[32:], 1) + initMsgBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 0, + BeforeInboxAcc: [32]byte{}, + AfterInboxAcc: [32]byte{1}, + AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), + AfterDelayedCount: 1, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedInitMsgBatch, + } + serializedUserMsgBatch := make([]byte, 40) + binary.BigEndian.PutUint64(serializedUserMsgBatch[32:], 2) + userMsgBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 1, + BeforeInboxAcc: [32]byte{1}, + AfterInboxAcc: [32]byte{2}, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedUserMsgBatch, + } + emptyBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 2, + BeforeInboxAcc: [32]byte{2}, + AfterInboxAcc: [32]byte{3}, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedUserMsgBatch, + } + err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{initMsgBatch, userMsgBatch, emptyBatch}) + Require(t, err) + + msgCount, err := streamer.GetMessageCount() + Require(t, err) + if msgCount != 3 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 3)") + } + + delayedCount, err := tracker.GetDelayedCount() + Require(t, err) + if delayedCount != 3 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 3)") + } + + batchCount, err := tracker.GetBatchCount() + Require(t, err) + if batchCount != 3 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 3)") + } + + // By modifying the timestamp of the userDelayed2 message, and adding it again, we cause a reorg + userDelayed2Modified := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: userDelayed2.Message.Header.Timestamp + 1, + RequestId: &delayedRequestId2, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayed2Modified}) + Require(t, err) + + // FAILS HERE + // userMsgBatch, and emptyBatch will be reorged out + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 1 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 1)") + } + + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 1 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 1)") + } + + delayedCount, err = tracker.GetDelayedCount() + Require(t, err) + if delayedCount != 3 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 3)") + } + + // guarantees that delayed msg 2 is userDelayedModified and not userDelayed + msg, err := tracker.GetDelayedMessage(ctx, 2) + Require(t, err) + if msg.Header.RequestId.Cmp(*userDelayed2Modified.Message.Header.RequestId) != 0 { + Fail(t, "Unexpected delayed message requestId", msg.Header.RequestId, "(expected", userDelayed2Modified.Message.Header.RequestId, ")") + } + if msg.Header.Timestamp != userDelayed2Modified.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", msg.Header.Timestamp, "(expected", userDelayed2Modified.Message.Header.Timestamp, ")") + } + if userDelayed2Modified.Message.Header.Timestamp == userDelayed2.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", userDelayed2Modified.Message.Header.Timestamp, "(expected", userDelayed2.Message.Header.Timestamp, ")") + } + + emptyBatch = &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 1, + BeforeInboxAcc: [32]byte{1}, + AfterInboxAcc: [32]byte{2}, + AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), + AfterDelayedCount: 1, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedInitMsgBatch, + } + err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{emptyBatch}) + Require(t, err) + + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 2 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 2)") + } + + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 2 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 2)") + } +} From 80bfa33f29555b73fca061906768c399f71916a0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 22 Oct 2024 09:38:23 -0500 Subject: [PATCH 11/56] Fix setDelayedCountReorgAndWriteBatch --- arbnode/inbox_tracker.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 0eed2f5e1a..ffd7ed9ab8 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -462,6 +462,7 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage) error } } + firstPos := pos batch := t.db.NewBatch() for _, message := range messages { seqNum, err := message.Message.Header.SeqNum() @@ -504,13 +505,16 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage) error pos++ } - return t.setDelayedCountReorgAndWriteBatch(batch, pos, true) + return t.setDelayedCountReorgAndWriteBatch(batch, firstPos, pos, true) } // All-in-one delayed message count adjuster. Can go forwards or backwards. // Requires the mutex is held. Sets the delayed count and performs any sequencer batch reorg necessary. // Also deletes any future delayed messages. -func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, newDelayedCount uint64, canReorgBatches bool) error { +func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, firstNewDelayedMessagePos uint64, newDelayedCount uint64, canReorgBatches bool) error { + if firstNewDelayedMessagePos > newDelayedCount { + return fmt.Errorf("firstNewDelayedMessagePos %v is after newDelayedCount %v", firstNewDelayedMessagePos, newDelayedCount) + } err := deleteStartingAt(t.db, batch, rlpDelayedMessagePrefix, uint64ToKey(newDelayedCount)) if err != nil { return err @@ -533,7 +537,7 @@ func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, newD return err } - seqBatchIter := t.db.NewIterator(delayedSequencedPrefix, uint64ToKey(newDelayedCount+1)) + seqBatchIter := t.db.NewIterator(delayedSequencedPrefix, uint64ToKey(firstNewDelayedMessagePos+1)) defer seqBatchIter.Release() var reorgSeqBatchesToCount *uint64 for seqBatchIter.Next() { @@ -865,7 +869,7 @@ func (t *InboxTracker) ReorgDelayedTo(count uint64) error { return errors.New("attempted to reorg to future delayed count") } - return t.setDelayedCountReorgAndWriteBatch(t.db.NewBatch(), count, false) + return t.setDelayedCountReorgAndWriteBatch(t.db.NewBatch(), count, count, false) } func (t *InboxTracker) ReorgBatchesTo(count uint64) error { From 241aed530dd4de0592ddc5f626a204211402ed96 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 22 Oct 2024 20:04:10 -0500 Subject: [PATCH 12/56] Address PR comment and update test --- arbnode/delayed_seq_reorg_test.go | 33 +++++++++++++++++++++++++++++-- arbnode/inbox_tracker.go | 16 +++++++++++++++ 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index 86506a7f62..6cec1e0904 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -333,6 +333,37 @@ func TestSequencerReorgFromLastDelayedMsg(t *testing.T) { Fail(t, "Unexpected tracker batch count", batchCount, "(expected 3)") } + // Adding an already existing message alongside a new one shouldn't cause a reorg + delayedRequestId3 := common.BigToHash(common.Big3) + userDelayed3 := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed2.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId3, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayed2, userDelayed3}) + Require(t, err) + + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 3 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 3)") + } + + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 3 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 3)") + } + // By modifying the timestamp of the userDelayed2 message, and adding it again, we cause a reorg userDelayed2Modified := &DelayedInboxMessage{ BlockHash: [32]byte{}, @@ -351,8 +382,6 @@ func TestSequencerReorgFromLastDelayedMsg(t *testing.T) { err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayed2Modified}) Require(t, err) - // FAILS HERE - // userMsgBatch, and emptyBatch will be reorged out msgCount, err = streamer.GetMessageCount() Require(t, err) if msgCount != 1 { diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index ffd7ed9ab8..04b60924d5 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -479,6 +479,22 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage) error } nextAcc = message.AfterInboxAcc() + if firstPos == pos { + // Check if this message is a duplicate + haveAcc, err := t.GetDelayedAcc(seqNum) + if err == nil { + if haveAcc == nextAcc { + // Skip this message, as we already have it in our database + pos++ + firstPos++ + messages = messages[1:] + continue + } + } else if !errors.Is(err, AccumulatorNotFoundErr) { + return err + } + } + delayedMsgKey := dbKey(rlpDelayedMessagePrefix, seqNum) msgData, err := rlp.EncodeToBytes(message.Message) From bde52acec9e52d84228419da74177353a090e275 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Tue, 29 Oct 2024 20:51:50 +0530 Subject: [PATCH 13/56] fix linter --- arbnode/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/node.go b/arbnode/node.go index 46fbec4b61..51c41d684b 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -787,7 +787,7 @@ func createNodeImpl( }, nil } -func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Address, l1Client arbutil.L1Interface, parentChainAssertionBlock uint64, batchCount uint64) (uint64, error) { +func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Address, l1Client *ethclient.Client, parentChainAssertionBlock uint64, batchCount uint64) (uint64, error) { bridge, err := bridgegen.NewIBridge(bridgeAddress, l1Client) if err != nil { return 0, err From 5eb9704138f997a7b76f1ebf18745116f4ed2c68 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 9 Dec 2024 15:46:36 -0600 Subject: [PATCH 14/56] Allow waiting for a minimum amount of time since parent assertion was created to post a new assertion --- arbnode/node.go | 2 ++ bold | 2 +- staker/bold/bold_staker.go | 7 ++++++- system_tests/bold_challenge_protocol_test.go | 1 + system_tests/bold_new_challenge_test.go | 1 + system_tests/overflow_assertions_test.go | 1 + 6 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index f2e3433ecd..48920cdbdd 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -212,6 +212,7 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.Staker = legacystaker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} + config.Bold.MinimumGapToParentAssertion = 0 return &config } @@ -230,6 +231,7 @@ func ConfigDefaultL2Test() *Config { config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} config.TransactionStreamer = DefaultTransactionStreamerConfig + config.Bold.MinimumGapToParentAssertion = 0 return &config } diff --git a/bold b/bold index d0a87de774..1b1c34c319 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit d0a87de774aecfa97161efd1b0a924d4d5fbcf74 +Subproject commit 1b1c34c31967242a492d29925640ede98d921fca diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index 1a8eed80fa..ad0e24e338 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -57,7 +57,9 @@ type BoldConfig struct { // How often to scan for newly created assertions onchain. AssertionScanningInterval time.Duration `koanf:"assertion-scanning-interval"` // How often to confirm assertions onchain. - AssertionConfirmingInterval time.Duration `koanf:"assertion-confirming-interval"` + AssertionConfirmingInterval time.Duration `koanf:"assertion-confirming-interval"` + // How long to wait since parent assertion was created to post a new assertion + MinimumGapToParentAssertion time.Duration `koanf:"minimum-gap-to-parent-assertion"` API bool `koanf:"api"` APIHost string `koanf:"api-host"` APIPort uint16 `koanf:"api-port"` @@ -98,6 +100,7 @@ var DefaultBoldConfig = BoldConfig{ AssertionPostingInterval: time.Minute * 15, AssertionScanningInterval: time.Minute, AssertionConfirmingInterval: time.Minute, + MinimumGapToParentAssertion: time.Minute, // Correct default? API: false, APIHost: "127.0.0.1", APIPort: 9393, @@ -121,6 +124,7 @@ func BoldConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".assertion-posting-interval", DefaultBoldConfig.AssertionPostingInterval, "assertion posting interval") f.Duration(prefix+".assertion-scanning-interval", DefaultBoldConfig.AssertionScanningInterval, "scan assertion interval") f.Duration(prefix+".assertion-confirming-interval", DefaultBoldConfig.AssertionConfirmingInterval, "confirm assertion interval") + f.Duration(prefix+".minimum-gap-to-parent-assertion", DefaultBoldConfig.MinimumGapToParentAssertion, "minimum duration to wait since the parent assertion was created to post a new assertion") f.Duration(prefix+".check-staker-switch-interval", DefaultBoldConfig.CheckStakerSwitchInterval, "how often to check if staker can switch to bold") f.Bool(prefix+".api", DefaultBoldConfig.API, "enable api") f.String(prefix+".api-host", DefaultBoldConfig.APIHost, "bold api host") @@ -448,6 +452,7 @@ func newBOLDChallengeManager( challengemanager.StackWithPollingInterval(scanningInterval), challengemanager.StackWithPostingInterval(postingInterval), challengemanager.StackWithConfirmationInterval(confirmingInterval), + challengemanager.StackWithMinimumGapToParentAssertion(config.MinimumGapToParentAssertion), challengemanager.StackWithTrackChallengeParentAssertionHashes(config.TrackChallengeParentAssertionHashes), challengemanager.StackWithHeaderProvider(l1Reader), } diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go index 777817bf3e..3677021c00 100644 --- a/system_tests/bold_challenge_protocol_test.go +++ b/system_tests/bold_challenge_protocol_test.go @@ -417,6 +417,7 @@ func testChallengeProtocolBOLD(t *testing.T, spawnerOpts ...server_arb.SpawnerOp challengemanager.StackWithMode(modes.MakeMode), challengemanager.StackWithPostingInterval(time.Second * 3), challengemanager.StackWithPollingInterval(time.Second), + challengemanager.StackWithMinimumGapToParentAssertionCreationTime(0), challengemanager.StackWithAverageBlockCreationTime(time.Second), } diff --git a/system_tests/bold_new_challenge_test.go b/system_tests/bold_new_challenge_test.go index ad6e44bc71..eb452ca5d4 100644 --- a/system_tests/bold_new_challenge_test.go +++ b/system_tests/bold_new_challenge_test.go @@ -344,6 +344,7 @@ func startBoldChallengeManager(t *testing.T, ctx context.Context, builder *NodeB challengemanager.StackWithPostingInterval(time.Second * 3), challengemanager.StackWithPollingInterval(time.Second), challengemanager.StackWithAverageBlockCreationTime(time.Second), + challengemanager.StackWithMinimumGapToParentAssertionCreationTime(0), } challengeManager, err := challengemanager.NewChallengeStack( diff --git a/system_tests/overflow_assertions_test.go b/system_tests/overflow_assertions_test.go index c024a43070..848c61e7d3 100644 --- a/system_tests/overflow_assertions_test.go +++ b/system_tests/overflow_assertions_test.go @@ -224,6 +224,7 @@ func TestOverflowAssertions(t *testing.T) { challengemanager.StackWithPostingInterval(time.Second), challengemanager.StackWithPollingInterval(time.Millisecond * 500), challengemanager.StackWithAverageBlockCreationTime(time.Second), + challengemanager.StackWithMinimumGapToParentAssertionCreationTime(0), } manager, err := challengemanager.NewChallengeStack( From 81dcb5a01589dcb58fd009fd622af1aef38f8e1a Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 9 Dec 2024 16:23:57 -0600 Subject: [PATCH 15/56] minor fix --- system_tests/bold_challenge_protocol_test.go | 2 +- system_tests/bold_new_challenge_test.go | 2 +- system_tests/overflow_assertions_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go index 3677021c00..83700fc838 100644 --- a/system_tests/bold_challenge_protocol_test.go +++ b/system_tests/bold_challenge_protocol_test.go @@ -417,7 +417,7 @@ func testChallengeProtocolBOLD(t *testing.T, spawnerOpts ...server_arb.SpawnerOp challengemanager.StackWithMode(modes.MakeMode), challengemanager.StackWithPostingInterval(time.Second * 3), challengemanager.StackWithPollingInterval(time.Second), - challengemanager.StackWithMinimumGapToParentAssertionCreationTime(0), + challengemanager.StackWithMinimumGapToParentAssertion(0), challengemanager.StackWithAverageBlockCreationTime(time.Second), } diff --git a/system_tests/bold_new_challenge_test.go b/system_tests/bold_new_challenge_test.go index eb452ca5d4..fae4a57deb 100644 --- a/system_tests/bold_new_challenge_test.go +++ b/system_tests/bold_new_challenge_test.go @@ -344,7 +344,7 @@ func startBoldChallengeManager(t *testing.T, ctx context.Context, builder *NodeB challengemanager.StackWithPostingInterval(time.Second * 3), challengemanager.StackWithPollingInterval(time.Second), challengemanager.StackWithAverageBlockCreationTime(time.Second), - challengemanager.StackWithMinimumGapToParentAssertionCreationTime(0), + challengemanager.StackWithMinimumGapToParentAssertion(0), } challengeManager, err := challengemanager.NewChallengeStack( diff --git a/system_tests/overflow_assertions_test.go b/system_tests/overflow_assertions_test.go index 848c61e7d3..eb2bb01470 100644 --- a/system_tests/overflow_assertions_test.go +++ b/system_tests/overflow_assertions_test.go @@ -224,7 +224,7 @@ func TestOverflowAssertions(t *testing.T) { challengemanager.StackWithPostingInterval(time.Second), challengemanager.StackWithPollingInterval(time.Millisecond * 500), challengemanager.StackWithAverageBlockCreationTime(time.Second), - challengemanager.StackWithMinimumGapToParentAssertionCreationTime(0), + challengemanager.StackWithMinimumGapToParentAssertion(0), } manager, err := challengemanager.NewChallengeStack( From 231da30980575693c2c9f6d7be127ac77772cc9d Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 10 Dec 2024 10:52:02 -0600 Subject: [PATCH 16/56] update bold pin --- bold | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold b/bold index 1b1c34c319..81f1b421b2 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit 1b1c34c31967242a492d29925640ede98d921fca +Subproject commit 81f1b421b2dbbf96c7a2b427a9458667b07b0b27 From 3e59f8fb3a6576c84d2387574dd4907d1f60bd62 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 16 Dec 2024 08:31:57 -0600 Subject: [PATCH 17/56] update bold submodule --- bold | 2 +- nitro-testnode | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bold b/bold index d0a87de774..d3f4d600ab 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit d0a87de774aecfa97161efd1b0a924d4d5fbcf74 +Subproject commit d3f4d600abdacec800e9e27a429a730639233073 diff --git a/nitro-testnode b/nitro-testnode index c177f28234..fa19e22104 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit c177f282340285bcdae2d6a784547e2bb8b97498 +Subproject commit fa19e2210403ad24519ea46c2d337f54a9f47593 From da609b6c5bedb1999362ffaa28257f0fa7291d97 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 16 Dec 2024 08:50:05 -0600 Subject: [PATCH 18/56] include all the new config opts --- staker/bold/bold_staker.go | 88 +++++++++++++++++++++++++++++++++----- 1 file changed, 78 insertions(+), 10 deletions(-) diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index 1a8eed80fa..2a6c32f5a9 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "math/big" + "strings" "time" flag "github.com/spf13/pflag" @@ -57,16 +58,21 @@ type BoldConfig struct { // How often to scan for newly created assertions onchain. AssertionScanningInterval time.Duration `koanf:"assertion-scanning-interval"` // How often to confirm assertions onchain. - AssertionConfirmingInterval time.Duration `koanf:"assertion-confirming-interval"` - API bool `koanf:"api"` - APIHost string `koanf:"api-host"` - APIPort uint16 `koanf:"api-port"` - APIDBPath string `koanf:"api-db-path"` - TrackChallengeParentAssertionHashes []string `koanf:"track-challenge-parent-assertion-hashes"` - CheckStakerSwitchInterval time.Duration `koanf:"check-staker-switch-interval"` - StateProviderConfig StateProviderConfig `koanf:"state-provider-config"` - StartValidationFromStaked bool `koanf:"start-validation-from-staked"` + AssertionConfirmingInterval time.Duration `koanf:"assertion-confirming-interval"` + API bool `koanf:"api"` + APIHost string `koanf:"api-host"` + APIPort uint16 `koanf:"api-port"` + APIDBPath string `koanf:"api-db-path"` + TrackChallengeParentAssertionHashes []string `koanf:"track-challenge-parent-assertion-hashes"` + CheckStakerSwitchInterval time.Duration `koanf:"check-staker-switch-interval"` + StateProviderConfig StateProviderConfig `koanf:"state-provider-config"` + StartValidationFromStaked bool `koanf:"start-validation-from-staked"` + AutoDeposit bool `koanf:"auto-deposit"` + AutoIncreaseAllowance bool `koanf:"auto-increase-allowance"` + DelegatedStaking DelegatedStakingConfig `koanf:"delegated-staking"` + RPCBlockNumber string `koanf:"rpc-block-number"` strategy legacystaker.StakerStrategy + blockNum rpc.BlockNumber } func (c *BoldConfig) Validate() error { @@ -75,9 +81,31 @@ func (c *BoldConfig) Validate() error { return err } c.strategy = strategy + var blockNum rpc.BlockNumber + switch strings.ToLower(c.RPCBlockNumber) { + case "safe": + blockNum = rpc.SafeBlockNumber + case "finalized": + blockNum = rpc.FinalizedBlockNumber + case "latest": + blockNum = rpc.LatestBlockNumber + default: + return fmt.Errorf("unknown rpc block number \"%v\", expected either latest, safe, or finalized", c.RPCBlockNumber) + } + c.blockNum = blockNum return nil } +type DelegatedStakingConfig struct { + Enable bool `koanf:"enable"` + CustomWithdrawalAddress string `koanf:"custom-withdrawal-address"` +} + +var DefaultDelegatedStakingConfig = DelegatedStakingConfig{ + Enable: false, + CustomWithdrawalAddress: "", +} + type StateProviderConfig struct { // A name identifier for the validator for cosmetic purposes. ValidatorName string `koanf:"validator-name"` @@ -106,6 +134,10 @@ var DefaultBoldConfig = BoldConfig{ CheckStakerSwitchInterval: time.Minute, // Every minute, check if the Nitro node staker should switch to using BOLD. StateProviderConfig: DefaultStateProviderConfig, StartValidationFromStaked: true, + AutoDeposit: true, + AutoIncreaseAllowance: true, + DelegatedStaking: DefaultDelegatedStakingConfig, + RPCBlockNumber: "finalized", } var BoldModes = map[legacystaker.StakerStrategy]boldtypes.Mode{ @@ -118,6 +150,7 @@ var BoldModes = map[legacystaker.StakerStrategy]boldtypes.Mode{ func BoldConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultBoldConfig.Enable, "enable bold challenge protocol") f.String(prefix+".strategy", DefaultBoldConfig.Strategy, "define the bold validator staker strategy, either watchtower, defensive, stakeLatest, or makeNodes") + f.String(prefix+".rpc-block-number", DefaultBoldConfig.RPCBlockNumber, "define the block number to use for reading data onchain, either latest, safe, or finalized") f.Duration(prefix+".assertion-posting-interval", DefaultBoldConfig.AssertionPostingInterval, "assertion posting interval") f.Duration(prefix+".assertion-scanning-interval", DefaultBoldConfig.AssertionScanningInterval, "scan assertion interval") f.Duration(prefix+".assertion-confirming-interval", DefaultBoldConfig.AssertionConfirmingInterval, "confirm assertion interval") @@ -129,6 +162,9 @@ func BoldConfigAddOptions(prefix string, f *flag.FlagSet) { f.StringSlice(prefix+".track-challenge-parent-assertion-hashes", DefaultBoldConfig.TrackChallengeParentAssertionHashes, "only track challenges/edges with these parent assertion hashes") StateProviderConfigAddOptions(prefix+".state-provider-config", f) f.Bool(prefix+".start-validation-from-staked", DefaultBoldConfig.StartValidationFromStaked, "assume staked nodes are valid") + f.Bool(prefix+".auto-deposit", DefaultBoldConfig.StartValidationFromStaked, "assume staked nodes are valid") + f.Bool(prefix+".auto-increase-allowance", DefaultBoldConfig.StartValidationFromStaked, "assume staked nodes are valid") + DelegatedStakingConfigAddOptions(prefix+".delegated-staking", f) } func StateProviderConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -137,6 +173,11 @@ func StateProviderConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".machine-leaves-cache-path", DefaultStateProviderConfig.MachineLeavesCachePath, "path to machine cache") } +func DelegatedStakingConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultDelegatedStakingConfig.Enable, "check batch finality") + f.String(prefix+".custom-withdrawal-address", DefaultDelegatedStakingConfig.CustomWithdrawalAddress, "path to machine cache") +} + type BOLDStaker struct { stopwaiter.StopWaiter config *BoldConfig @@ -365,7 +406,25 @@ func newBOLDChallengeManager( if err != nil { return nil, fmt.Errorf("could not create challenge manager bindings: %w", err) } - assertionChain, err := solimpl.NewAssertionChain(ctx, rollupAddress, chalManager, txOpts, client, NewDataPosterTransactor(dataPoster)) + assertionChainOpts := []solimpl.Opt{ + solimpl.WithRpcHeadBlockNumber(config.blockNum), + } + if config.DelegatedStaking.Enable && config.DelegatedStaking.CustomWithdrawalAddress != "" { + withdrawalAddr := common.HexToAddress(config.DelegatedStaking.CustomWithdrawalAddress) + assertionChainOpts = append(assertionChainOpts, solimpl.WithCustomWithdrawalAddress(withdrawalAddr)) + } + if !config.AutoDeposit { + assertionChainOpts = append(assertionChainOpts, solimpl.WithoutAutoDeposit()) + } + assertionChain, err := solimpl.NewAssertionChain( + ctx, + rollupAddress, + chalManager, + txOpts, + client, + NewDataPosterTransactor(dataPoster), + assertionChainOpts..., + ) if err != nil { return nil, fmt.Errorf("could not create assertion chain: %w", err) } @@ -455,6 +514,15 @@ func newBOLDChallengeManager( apiAddr := fmt.Sprintf("%s:%d", config.APIHost, config.APIPort) stackOpts = append(stackOpts, challengemanager.StackWithAPIEnabled(apiAddr, apiDBPath)) } + if !config.AutoDeposit { + stackOpts = append(stackOpts, challengemanager.StackWithoutAutoDeposit()) + } + if !config.AutoIncreaseAllowance { + stackOpts = append(stackOpts, challengemanager.StackWithoutAutoAllowanceApproval()) + } + if config.DelegatedStaking.Enable { + stackOpts = append(stackOpts, challengemanager.StackWithDelegatedStaking()) + } manager, err := challengemanager.NewChallengeStack( assertionChain, From 177b7a887821aceb838254acd7c040485ba8d999 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 16 Dec 2024 09:09:41 -0600 Subject: [PATCH 19/56] comment --- staker/bold/bold_staker.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index 2a6c32f5a9..b08b2ec18b 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -162,8 +162,8 @@ func BoldConfigAddOptions(prefix string, f *flag.FlagSet) { f.StringSlice(prefix+".track-challenge-parent-assertion-hashes", DefaultBoldConfig.TrackChallengeParentAssertionHashes, "only track challenges/edges with these parent assertion hashes") StateProviderConfigAddOptions(prefix+".state-provider-config", f) f.Bool(prefix+".start-validation-from-staked", DefaultBoldConfig.StartValidationFromStaked, "assume staked nodes are valid") - f.Bool(prefix+".auto-deposit", DefaultBoldConfig.StartValidationFromStaked, "assume staked nodes are valid") - f.Bool(prefix+".auto-increase-allowance", DefaultBoldConfig.StartValidationFromStaked, "assume staked nodes are valid") + f.Bool(prefix+".auto-deposit", DefaultBoldConfig.AutoDeposit, "auto-deposit stake token whenever making a move in BoLD that does not have enough stake token balance") + f.Bool(prefix+".auto-increase-allowance", DefaultBoldConfig.AutoIncreaseAllowance, "auto-increase spending allowance of the stake token by the rollup and challenge manager contracts") DelegatedStakingConfigAddOptions(prefix+".delegated-staking", f) } @@ -174,8 +174,8 @@ func StateProviderConfigAddOptions(prefix string, f *flag.FlagSet) { } func DelegatedStakingConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultDelegatedStakingConfig.Enable, "check batch finality") - f.String(prefix+".custom-withdrawal-address", DefaultDelegatedStakingConfig.CustomWithdrawalAddress, "path to machine cache") + f.Bool(prefix+".enable", DefaultDelegatedStakingConfig.Enable, "enable delegated staking by having the validator call newStake on startup") + f.String(prefix+".custom-withdrawal-address", DefaultDelegatedStakingConfig.CustomWithdrawalAddress, "enable a custom withdrawal address for staking on the rollup contract, useful for delegated stakers") } type BOLDStaker struct { From 6e0b0e0dbd84dc07681cb2fdf3c7a5696b114c42 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Tue, 17 Dec 2024 19:24:47 +0530 Subject: [PATCH 20/56] Store last message pruned in database --- arbnode/message_pruner.go | 50 +++++++++++++++++++++++++++++++++++---- arbnode/schema.go | 12 ++++++---- 2 files changed, 53 insertions(+), 9 deletions(-) diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 840a15f328..c86b88f2d0 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -121,7 +122,7 @@ func (m *MessagePruner) prune(ctx context.Context, count arbutil.MessageIndex, g } func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64) error { - prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, &m.cachedPrunedMessageResult, uint64(messageCount)) + prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, lastPrunedMessageResultKey, &m.cachedPrunedMessageResult, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting message results: %w", err) } @@ -129,7 +130,7 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned message results:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, &m.cachedPrunedBlockHashesInputFeed, uint64(messageCount)) + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, lastPrunedBlockHashInputFeedKey, &m.cachedPrunedBlockHashesInputFeed, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting expected block hashes: %w", err) } @@ -137,7 +138,7 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned expected block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, lastPrunedMessageKey, &m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting last batch messages: %w", err) } @@ -145,7 +146,7 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned last batch messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, &m.cachedPrunedDelayedMessages, delayedMessageCount) + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, lastPrunedRlpDelayedMessageKey, &m.cachedPrunedDelayedMessages, delayedMessageCount) if err != nil { return fmt.Errorf("error deleting last batch delayed messages: %w", err) } @@ -157,8 +158,12 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun // deleteFromLastPrunedUptoEndKey is similar to deleteFromRange but automatically populates the start key // cachedStartMinKey must not be nil. It's set to the new start key at the end of this function if successful. -func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, cachedStartMinKey *uint64, endMinKey uint64) ([]uint64, error) { +// Checks if the last pruned key is set in the database and uses it as the start key if it is. +func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, lastPrunedKey []byte, cachedStartMinKey *uint64, endMinKey uint64) ([]uint64, error) { startMinKey := *cachedStartMinKey + if startMinKey == 0 { + startMinKey = fetchLastPrunedKey(db, lastPrunedKey) + } if startMinKey == 0 { startIter := db.NewIterator(prefix, uint64ToKey(1)) if !startIter.Next() { @@ -169,11 +174,46 @@ func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, pref } if endMinKey <= startMinKey { *cachedStartMinKey = startMinKey + insertLastPrunedKey(db, lastPrunedKey, startMinKey) return nil, nil } keys, err := deleteFromRange(ctx, db, prefix, startMinKey, endMinKey-1) if err == nil { *cachedStartMinKey = endMinKey - 1 + insertLastPrunedKey(db, lastPrunedKey, endMinKey-1) } return keys, err } + +func insertLastPrunedKey(db ethdb.Database, lastPrunedKey []byte, lastPrunedValue uint64) { + lastPrunedValueByte, err := rlp.EncodeToBytes(lastPrunedValue) + if err != nil { + log.Error("error encoding last pruned value: %w", err) + } else { + err = db.Put(lastPrunedKey, lastPrunedValueByte) + if err != nil { + log.Error("error saving last pruned value: %w", err) + } + } +} + +func fetchLastPrunedKey(db ethdb.Database, lastPrunedKey []byte) uint64 { + hasKey, err := db.Has(lastPrunedKey) + if err != nil { + log.Warn("error checking for last pruned key: %w", err) + } else if hasKey { + lastPrunedValueByte, err := db.Get(lastPrunedKey) + if err != nil { + log.Warn("error fetching last pruned key: %w", err) + } else { + var lastPrunedValue uint64 + err = rlp.DecodeBytes(lastPrunedValueByte, &lastPrunedValue) + if err != nil { + log.Warn("error decoding last pruned value: %w", err) + } else { + return lastPrunedValue + } + } + } + return 0 +} diff --git a/arbnode/schema.go b/arbnode/schema.go index 1aaded2b95..e06d6a75c7 100644 --- a/arbnode/schema.go +++ b/arbnode/schema.go @@ -13,10 +13,14 @@ var ( sequencerBatchMetaPrefix []byte = []byte("s") // maps a batch sequence number to BatchMetadata delayedSequencedPrefix []byte = []byte("a") // maps a delayed message count to the first sequencer batch sequence number with this delayed count - messageCountKey []byte = []byte("_messageCount") // contains the current message count - delayedMessageCountKey []byte = []byte("_delayedMessageCount") // contains the current delayed message count - sequencerBatchCountKey []byte = []byte("_sequencerBatchCount") // contains the current sequencer message count - dbSchemaVersion []byte = []byte("_schemaVersion") // contains a uint64 representing the database schema version + messageCountKey []byte = []byte("_messageCount") // contains the current message count + lastPrunedMessageResultKey []byte = []byte("_lastPrunedMessageResultKey") // contains the last pruned message result key + lastPrunedBlockHashInputFeedKey []byte = []byte("_lastPrunedBlockHashInputFeedPrefix") // contains the last pruned block hash input feed key + lastPrunedMessageKey []byte = []byte("_lastPrunedMessageKey") // contains the last pruned message key + lastPrunedRlpDelayedMessageKey []byte = []byte("_lastPrunedRlpDelayedMessageKey") // contains the last pruned RLP delayed message key + delayedMessageCountKey []byte = []byte("_delayedMessageCount") // contains the current delayed message count + sequencerBatchCountKey []byte = []byte("_sequencerBatchCount") // contains the current sequencer message count + dbSchemaVersion []byte = []byte("_schemaVersion") // contains a uint64 representing the database schema version ) const currentDbSchemaVersion uint64 = 1 From ffdce5c13833c00b0a8f58463b95209e66cdc0aa Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Tue, 17 Dec 2024 14:01:58 -0600 Subject: [PATCH 21/56] edits --- nitro-testnode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nitro-testnode b/nitro-testnode index fa19e22104..c177f28234 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit fa19e2210403ad24519ea46c2d337f54a9f47593 +Subproject commit c177f282340285bcdae2d6a784547e2bb8b97498 From e93d7cae1fdfbd061f199c5efb1097c1751c045a Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Fri, 20 Dec 2024 17:25:55 +0530 Subject: [PATCH 22/56] Prevent calling arbtrace_ against nitro with the latest block number --- execution/gethexec/api.go | 55 +++++++++++++++++++++++++++++++------- execution/gethexec/node.go | 1 + 2 files changed, 47 insertions(+), 9 deletions(-) diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index 713d1496f9..d3ff209e64 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -285,14 +285,16 @@ func stateAndHeader(blockchain *core.BlockChain, block uint64) (*arbosState.Arbo type ArbTraceForwarderAPI struct { fallbackClientUrl string fallbackClientTimeout time.Duration + blockchain *core.BlockChain initialized atomic.Bool mutex sync.Mutex fallbackClient types.FallbackClient } -func NewArbTraceForwarderAPI(fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { +func NewArbTraceForwarderAPI(blockchain *core.BlockChain, fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { return &ArbTraceForwarderAPI{ + blockchain: blockchain, fallbackClientUrl: fallbackClientUrl, fallbackClientTimeout: fallbackClientTimeout, } @@ -332,16 +334,46 @@ func (api *ArbTraceForwarderAPI) forward(ctx context.Context, method string, arg return resp, nil } -func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_call", callArgs, traceTypes, blockNum) +func (api *ArbTraceForwarderAPI) ClipToPostNitroGenesis(blockNumOrHash json.RawMessage) (json.RawMessage, error) { + var bnh rpc.BlockNumberOrHash + err := bnh.UnmarshalJSON(blockNumOrHash) + if err != nil { + return nil, err + } + blockNum, isNum := bnh.Number() + if !isNum { + return blockNumOrHash, nil + } + blockNum, _ = api.blockchain.ClipToPostNitroGenesis(blockNum) + bnh.BlockNumber = &blockNum + return json.Marshal(bnh) } -func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_callMany", calls, blockNum) +func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + var err error + blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_call", callArgs, traceTypes, blockNumOrHash) } -func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNum json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_replayBlockTransactions", blockNum, traceTypes) +func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + var err error + blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_callMany", calls, blockNumOrHash) +} + +func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNumOrHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { + var err error + blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_replayBlockTransactions", blockNumOrHash, traceTypes) } func (api *ArbTraceForwarderAPI) ReplayTransaction(ctx context.Context, txHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { @@ -356,8 +388,13 @@ func (api *ArbTraceForwarderAPI) Get(ctx context.Context, txHash json.RawMessage return api.forward(ctx, "arbtrace_get", txHash, path) } -func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_block", blockNum) +func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + var err error + blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_block", blockNumOrHash) } func (api *ArbTraceForwarderAPI) Filter(ctx context.Context, filter json.RawMessage) (*json.RawMessage, error) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 11d173a21e..b25ca5763c 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -284,6 +284,7 @@ func CreateExecutionNode( Namespace: "arbtrace", Version: "1.0", Service: NewArbTraceForwarderAPI( + l2BlockChain, config.RPC.ClassicRedirect, config.RPC.ClassicRedirectTimeout, ), From 429b0c92130fda94fd20c2b9e5765c5294bb59a4 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 23 Dec 2024 18:18:24 +0530 Subject: [PATCH 23/56] Changes based on PR comments --- execution/gethexec/api.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index d3ff209e64..c58fbe7a1e 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -334,24 +334,26 @@ func (api *ArbTraceForwarderAPI) forward(ctx context.Context, method string, arg return resp, nil } -func (api *ArbTraceForwarderAPI) ClipToPostNitroGenesis(blockNumOrHash json.RawMessage) (json.RawMessage, error) { +func (api *ArbTraceForwarderAPI) blockSupportedByClassicNode(blockNumOrHash json.RawMessage) error { var bnh rpc.BlockNumberOrHash err := bnh.UnmarshalJSON(blockNumOrHash) if err != nil { - return nil, err + return err } blockNum, isNum := bnh.Number() if !isNum { - return blockNumOrHash, nil + return nil } blockNum, _ = api.blockchain.ClipToPostNitroGenesis(blockNum) - bnh.BlockNumber = &blockNum - return json.Marshal(bnh) + if blockNum < 0 || blockNum > rpc.BlockNumber(api.blockchain.Config().ArbitrumChainParams.GenesisBlockNum) { + return fmt.Errorf("block number %v is not supported by classic node", blockNum) + } + return nil } func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { var err error - blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + err = api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } @@ -360,7 +362,7 @@ func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMess func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { var err error - blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + err = api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } @@ -369,7 +371,7 @@ func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMes func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNumOrHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { var err error - blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + err = api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } @@ -390,7 +392,7 @@ func (api *ArbTraceForwarderAPI) Get(ctx context.Context, txHash json.RawMessage func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { var err error - blockNumOrHash, err = api.ClipToPostNitroGenesis(blockNumOrHash) + err = api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } From ccedd73a55333713dfa20a8190b023d93e758ac6 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 23 Dec 2024 18:36:20 +0530 Subject: [PATCH 24/56] Fix lint --- execution/gethexec/api.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index c58fbe7a1e..f813dc6a8c 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -344,7 +344,7 @@ func (api *ArbTraceForwarderAPI) blockSupportedByClassicNode(blockNumOrHash json if !isNum { return nil } - blockNum, _ = api.blockchain.ClipToPostNitroGenesis(blockNum) + // #nosec G115 if blockNum < 0 || blockNum > rpc.BlockNumber(api.blockchain.Config().ArbitrumChainParams.GenesisBlockNum) { return fmt.Errorf("block number %v is not supported by classic node", blockNum) } @@ -352,8 +352,7 @@ func (api *ArbTraceForwarderAPI) blockSupportedByClassicNode(blockNumOrHash json } func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { - var err error - err = api.blockSupportedByClassicNode(blockNumOrHash) + err := api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } @@ -361,8 +360,7 @@ func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMess } func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { - var err error - err = api.blockSupportedByClassicNode(blockNumOrHash) + err := api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } @@ -370,8 +368,7 @@ func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMes } func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNumOrHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { - var err error - err = api.blockSupportedByClassicNode(blockNumOrHash) + err := api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } @@ -391,8 +388,7 @@ func (api *ArbTraceForwarderAPI) Get(ctx context.Context, txHash json.RawMessage } func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { - var err error - err = api.blockSupportedByClassicNode(blockNumOrHash) + err := api.blockSupportedByClassicNode(blockNumOrHash) if err != nil { return nil, err } From 562747c4da3b75270b795434b4363609fa227b1d Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 23 Dec 2024 19:19:18 +0530 Subject: [PATCH 25/56] Add metrics for how many Stylus calls and gas used --- arbos/programs/programs.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 06ba6ead8c..5e08ba01ac 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" gethParams "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" @@ -163,6 +164,21 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVers return stylusVersion, codeHash, info.moduleHash, dataFee, false, p.setProgram(codeHash, programData) } +func runModeToString(runmode core.MessageRunMode) string { + switch runmode { + case core.MessageCommitMode: + return "commit_runmode" + case core.MessageGasEstimationMode: + return "gas_estimation_runmode" + case core.MessageEthcallMode: + return "eth_call_runmode" + case core.MessageReplayMode: + return "replay_runmode" + default: + return "unknown_runmode" + } +} + func (p Programs) CallProgram( scope *vm.ScopeContext, statedb vm.StateDB, @@ -250,7 +266,10 @@ func (p Programs) CallProgram( if runmode == core.MessageCommitMode { arbos_tag = statedb.Database().WasmCacheTag() } + + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/program_calls/%s", runModeToString(runmode)), nil).Inc(1) ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) + gasUsed := callCost if len(ret) > 0 && arbosVersion >= gethParams.ArbosVersion_StylusFixes { // Ensure that return data costs as least as much as it would in the EVM. evmCost := evmMemoryCost(uint64(len(ret))) @@ -260,7 +279,12 @@ func (p Programs) CallProgram( } maxGasToReturn := startingGas - evmCost contract.Gas = am.MinInt(contract.Gas, maxGasToReturn) + if evmCost > gasUsed { + gasUsed = evmCost + } } + // #nosec G115 + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(gasUsed)) return ret, err } From e3aaf054df3bb7bfb8d2cfb4f4267bde6849a2f2 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 23 Dec 2024 13:07:27 -0600 Subject: [PATCH 26/56] update submod --- bold | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold b/bold index d3f4d600ab..53a6195bd7 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit d3f4d600abdacec800e9e27a429a730639233073 +Subproject commit 53a6195bd7bbd749a81319920429a98b0b9213d4 From 944e1fe96093208b27e98d49cd3f57b0f2ba20b4 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 26 Dec 2024 19:25:56 +0530 Subject: [PATCH 27/56] Changes based on PR comments --- arbos/programs/programs.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 5e08ba01ac..e640728ae4 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -269,7 +269,6 @@ func (p Programs) CallProgram( metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/program_calls/%s", runModeToString(runmode)), nil).Inc(1) ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) - gasUsed := callCost if len(ret) > 0 && arbosVersion >= gethParams.ArbosVersion_StylusFixes { // Ensure that return data costs as least as much as it would in the EVM. evmCost := evmMemoryCost(uint64(len(ret))) @@ -279,12 +278,9 @@ func (p Programs) CallProgram( } maxGasToReturn := startingGas - evmCost contract.Gas = am.MinInt(contract.Gas, maxGasToReturn) - if evmCost > gasUsed { - gasUsed = evmCost - } } // #nosec G115 - metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(gasUsed)) + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas - contract.Gas)) return ret, err } From f2bfd88fc2508e7dddda97f3f23e57eee9c30906 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 30 Dec 2024 20:37:51 -0600 Subject: [PATCH 28/56] edit bold submod --- bold | 2 +- go-ethereum | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bold b/bold index 53a6195bd7..3df1191028 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit 53a6195bd7bbd749a81319920429a98b0b9213d4 +Subproject commit 3df119102815a7c17b87251e18df6e09f6e58128 diff --git a/go-ethereum b/go-ethereum index 26b4dff616..0d33cae0dd 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 26b4dff6165650b6963fb1b6f88958c29c059214 +Subproject commit 0d33cae0dd24ce387c589532e9557911780b389c From 7fd9dba8e7cb3b9540065057c88eb51ac3f58e2f Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 30 Dec 2024 21:38:07 -0600 Subject: [PATCH 29/56] build --- arbos/util/transfer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbos/util/transfer.go b/arbos/util/transfer.go index 0b61868abe..55281fa284 100644 --- a/arbos/util/transfer.go +++ b/arbos/util/transfer.go @@ -67,7 +67,7 @@ func TransferBalance( if arbmath.BigLessThan(balance.ToBig(), amount) { return fmt.Errorf("%w: addr %v have %v want %v", vm.ErrInsufficientBalance, *from, balance, amount) } - if evm.Context.ArbOSVersion < params.ArbosVersion_30 && amount.Sign() == 0 { + if evm.Context.ArbOSVersion < params.ArbosVersion_Stylus && amount.Sign() == 0 { evm.StateDB.CreateZombieIfDeleted(*from) } evm.StateDB.SubBalance(*from, uint256.MustFromBig(amount), tracing.BalanceChangeTransfer) From a46dc68c366a76290e7f480f2d18e3c214577414 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Tue, 31 Dec 2024 10:31:30 -0600 Subject: [PATCH 30/56] geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 0d33cae0dd..26b4dff616 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 0d33cae0dd24ce387c589532e9557911780b389c +Subproject commit 26b4dff6165650b6963fb1b6f88958c29c059214 From ed825913fcf2fb65a8e08e930524aeb88845462f Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 31 Dec 2024 14:29:09 -0700 Subject: [PATCH 31/56] wasmer: check against correct branch --- .github/workflows/submodule-pin-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 94fa705655..a6a7d9b66c 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -30,7 +30,7 @@ jobs: #TODO Rachel to check these are the intended branches. [arbitrator/langs/c]=origin/vm-storage-cache - [arbitrator/tools/wasmer]=origin/adopt-v4.2.8 + [arbitrator/tools/wasmer]=origin/stylus ) divergent=0 for mod in `git submodule --quiet foreach 'echo $name'`; do From 2188166608afed59ab450c995a0327b2598ad4ac Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 20:16:17 +0530 Subject: [PATCH 32/56] Changes based on PR comments --- arbos/programs/programs.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index e640728ae4..8b045ef822 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -274,6 +274,7 @@ func (p Programs) CallProgram( evmCost := evmMemoryCost(uint64(len(ret))) if startingGas < evmCost { contract.Gas = 0 + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas - contract.Gas)) return nil, vm.ErrOutOfGas } maxGasToReturn := startingGas - evmCost From 002ef4ef829c5ce26a5732555439098995e87db1 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 20:17:08 +0530 Subject: [PATCH 33/56] Changes based on PR comments --- arbos/programs/programs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 8b045ef822..d6a2d27923 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -274,7 +274,7 @@ func (p Programs) CallProgram( evmCost := evmMemoryCost(uint64(len(ret))) if startingGas < evmCost { contract.Gas = 0 - metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas - contract.Gas)) + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas)) return nil, vm.ErrOutOfGas } maxGasToReturn := startingGas - evmCost From 56dea63fdca950a7d5d1955661962015958fb699 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 20:20:07 +0530 Subject: [PATCH 34/56] Changes based on PR comments --- execution/gethexec/api.go | 9 +++++---- execution/gethexec/node.go | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index f813dc6a8c..574adb0ed4 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/ethereum/go-ethereum/params" "math/big" "sync" "sync/atomic" @@ -285,16 +286,16 @@ func stateAndHeader(blockchain *core.BlockChain, block uint64) (*arbosState.Arbo type ArbTraceForwarderAPI struct { fallbackClientUrl string fallbackClientTimeout time.Duration - blockchain *core.BlockChain + blockchainConfig *params.ChainConfig initialized atomic.Bool mutex sync.Mutex fallbackClient types.FallbackClient } -func NewArbTraceForwarderAPI(blockchain *core.BlockChain, fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { +func NewArbTraceForwarderAPI(blockchainConfig *params.ChainConfig, fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { return &ArbTraceForwarderAPI{ - blockchain: blockchain, + blockchainConfig: blockchainConfig, fallbackClientUrl: fallbackClientUrl, fallbackClientTimeout: fallbackClientTimeout, } @@ -345,7 +346,7 @@ func (api *ArbTraceForwarderAPI) blockSupportedByClassicNode(blockNumOrHash json return nil } // #nosec G115 - if blockNum < 0 || blockNum > rpc.BlockNumber(api.blockchain.Config().ArbitrumChainParams.GenesisBlockNum) { + if blockNum < 0 || blockNum > rpc.BlockNumber(api.blockchainConfig.ArbitrumChainParams.GenesisBlockNum) { return fmt.Errorf("block number %v is not supported by classic node", blockNum) } return nil diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index b25ca5763c..16e4948723 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -284,7 +284,7 @@ func CreateExecutionNode( Namespace: "arbtrace", Version: "1.0", Service: NewArbTraceForwarderAPI( - l2BlockChain, + l2BlockChain.Config(), config.RPC.ClassicRedirect, config.RPC.ClassicRedirectTimeout, ), From 693c4066100689aaefee081b3f9a1e0c4175bdb8 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 20:32:20 +0530 Subject: [PATCH 35/56] Changes based on PR comments --- arbnode/message_pruner.go | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 5cdd1128d0..3805bd8637 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -201,19 +201,21 @@ func fetchLastPrunedKey(db ethdb.Database, lastPrunedKey []byte) uint64 { hasKey, err := db.Has(lastPrunedKey) if err != nil { log.Warn("error checking for last pruned key: %w", err) - } else if hasKey { - lastPrunedValueByte, err := db.Get(lastPrunedKey) - if err != nil { - log.Warn("error fetching last pruned key: %w", err) - } else { - var lastPrunedValue uint64 - err = rlp.DecodeBytes(lastPrunedValueByte, &lastPrunedValue) - if err != nil { - log.Warn("error decoding last pruned value: %w", err) - } else { - return lastPrunedValue - } - } + return 0 + } + if !hasKey { + return 0 + } + lastPrunedValueByte, err := db.Get(lastPrunedKey) + if err != nil { + log.Warn("error fetching last pruned key: %w", err) + return 0 + } + var lastPrunedValue uint64 + err = rlp.DecodeBytes(lastPrunedValueByte, &lastPrunedValue) + if err != nil { + log.Warn("error decoding last pruned value: %w", err) + return 0 } - return 0 + return lastPrunedValue } From 205e12ed3b3191620e189e1b13b482d009724454 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 20:33:18 +0530 Subject: [PATCH 36/56] lint --- arbos/programs/programs.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index d6a2d27923..c7bb693d26 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -274,6 +274,7 @@ func (p Programs) CallProgram( evmCost := evmMemoryCost(uint64(len(ret))) if startingGas < evmCost { contract.Gas = 0 + // #nosec G115 metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas)) return nil, vm.ErrOutOfGas } From 7c1b94f52513a958ade1a6cde2343519dd0e9aff Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 20:34:40 +0530 Subject: [PATCH 37/56] lint --- execution/gethexec/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index 574adb0ed4..699aa081b5 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -8,7 +8,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ethereum/go-ethereum/params" "math/big" "sync" "sync/atomic" @@ -18,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbos/arbosState" From 669c5ef052d76ac264c0ba3fed0eeb4717db1a9a Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 21:01:43 +0530 Subject: [PATCH 38/56] Changes based on PR comments --- arbnode/message_pruner.go | 46 ++++++++++++++++++++++----------------- arbnode/schema.go | 14 +++++------- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 3805bd8637..b18796a4c5 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -24,15 +24,13 @@ import ( type MessagePruner struct { stopwaiter.StopWaiter - transactionStreamer *TransactionStreamer - inboxTracker *InboxTracker - config MessagePrunerConfigFetcher - pruningLock sync.Mutex - lastPruneDone time.Time - cachedPrunedMessages uint64 - cachedPrunedBlockHashesInputFeed uint64 - cachedPrunedMessageResult uint64 - cachedPrunedDelayedMessages uint64 + transactionStreamer *TransactionStreamer + inboxTracker *InboxTracker + config MessagePrunerConfigFetcher + pruningLock sync.Mutex + lastPruneDone time.Time + cachedPrunedMessages uint64 + cachedPrunedDelayedMessages uint64 } type MessagePrunerConfig struct { @@ -122,7 +120,14 @@ func (m *MessagePruner) prune(ctx context.Context, count arbutil.MessageIndex, g } func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64) error { - prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, lastPrunedMessageResultKey, &m.cachedPrunedMessageResult, uint64(messageCount)) + if m.cachedPrunedMessages == 0 { + m.cachedPrunedMessages = fetchLastPrunedKey(m.transactionStreamer.db, lastPrunedMessageKey) + } + if m.cachedPrunedDelayedMessages == 0 { + m.cachedPrunedDelayedMessages = fetchLastPrunedKey(m.inboxTracker.db, lastPrunedDelayedMessageKey) + } + lastPrunedMessage := m.cachedPrunedMessages + prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, &lastPrunedMessage, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting message results: %w", err) } @@ -130,7 +135,8 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned message results:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, lastPrunedBlockHashInputFeedKey, &m.cachedPrunedBlockHashesInputFeed, uint64(messageCount)) + lastPrunedMessage = m.cachedPrunedMessages + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, &lastPrunedMessage, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting expected block hashes: %w", err) } @@ -138,32 +144,34 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned expected block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, lastPrunedMessageKey, &m.cachedPrunedMessages, uint64(messageCount)) + lastPrunedMessage = m.cachedPrunedMessages + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &lastPrunedMessage, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting last batch messages: %w", err) } if len(prunedKeysRange) > 0 { log.Info("Pruned last batch messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } + insertLastPrunedKey(m.transactionStreamer.db, lastPrunedMessageKey, lastPrunedMessage) + m.cachedPrunedMessages = lastPrunedMessage - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, lastPrunedRlpDelayedMessageKey, &m.cachedPrunedDelayedMessages, delayedMessageCount) + lastPrunedDelayedMessage := m.cachedPrunedDelayedMessages + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, &lastPrunedDelayedMessage, delayedMessageCount) if err != nil { return fmt.Errorf("error deleting last batch delayed messages: %w", err) } if len(prunedKeysRange) > 0 { log.Info("Pruned last batch delayed messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } + insertLastPrunedKey(m.inboxTracker.db, lastPrunedDelayedMessageKey, lastPrunedMessage) + m.cachedPrunedDelayedMessages = lastPrunedDelayedMessage return nil } // deleteFromLastPrunedUptoEndKey is similar to deleteFromRange but automatically populates the start key // cachedStartMinKey must not be nil. It's set to the new start key at the end of this function if successful. -// Checks if the last pruned key is set in the database and uses it as the start key if it is. -func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, lastPrunedKey []byte, cachedStartMinKey *uint64, endMinKey uint64) ([]uint64, error) { +func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, cachedStartMinKey *uint64, endMinKey uint64) ([]uint64, error) { startMinKey := *cachedStartMinKey - if startMinKey == 0 { - startMinKey = fetchLastPrunedKey(db, lastPrunedKey) - } if startMinKey == 0 { startIter := db.NewIterator(prefix, uint64ToKey(1)) if !startIter.Next() { @@ -174,13 +182,11 @@ func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, pref } if endMinKey <= startMinKey { *cachedStartMinKey = startMinKey - insertLastPrunedKey(db, lastPrunedKey, startMinKey) return nil, nil } keys, err := deleteFromRange(ctx, db, prefix, startMinKey, endMinKey-1) if err == nil { *cachedStartMinKey = endMinKey - 1 - insertLastPrunedKey(db, lastPrunedKey, endMinKey-1) } return keys, err } diff --git a/arbnode/schema.go b/arbnode/schema.go index e06d6a75c7..88a31ce90a 100644 --- a/arbnode/schema.go +++ b/arbnode/schema.go @@ -13,14 +13,12 @@ var ( sequencerBatchMetaPrefix []byte = []byte("s") // maps a batch sequence number to BatchMetadata delayedSequencedPrefix []byte = []byte("a") // maps a delayed message count to the first sequencer batch sequence number with this delayed count - messageCountKey []byte = []byte("_messageCount") // contains the current message count - lastPrunedMessageResultKey []byte = []byte("_lastPrunedMessageResultKey") // contains the last pruned message result key - lastPrunedBlockHashInputFeedKey []byte = []byte("_lastPrunedBlockHashInputFeedPrefix") // contains the last pruned block hash input feed key - lastPrunedMessageKey []byte = []byte("_lastPrunedMessageKey") // contains the last pruned message key - lastPrunedRlpDelayedMessageKey []byte = []byte("_lastPrunedRlpDelayedMessageKey") // contains the last pruned RLP delayed message key - delayedMessageCountKey []byte = []byte("_delayedMessageCount") // contains the current delayed message count - sequencerBatchCountKey []byte = []byte("_sequencerBatchCount") // contains the current sequencer message count - dbSchemaVersion []byte = []byte("_schemaVersion") // contains a uint64 representing the database schema version + messageCountKey []byte = []byte("_messageCount") // contains the current message count + lastPrunedMessageKey []byte = []byte("_lastPrunedMessageKey") // contains the last pruned message key + lastPrunedDelayedMessageKey []byte = []byte("_lastPrunedDelayedMessageKey") // contains the last pruned RLP delayed message key + delayedMessageCountKey []byte = []byte("_delayedMessageCount") // contains the current delayed message count + sequencerBatchCountKey []byte = []byte("_sequencerBatchCount") // contains the current sequencer message count + dbSchemaVersion []byte = []byte("_schemaVersion") // contains a uint64 representing the database schema version ) const currentDbSchemaVersion uint64 = 1 From 2aa0b8001cc707cf3097517647b3354d0ea189ef Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 2 Jan 2025 21:48:54 +0530 Subject: [PATCH 39/56] Changes based on PR comments --- arbnode/node.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 37632654e7..d96c4001c4 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -601,6 +601,13 @@ func createNodeImpl( firstMessageBlock := new(big.Int).SetUint64(deployInfo.DeployedAt) if config.SnapSyncTest.Enabled { batchCount := config.SnapSyncTest.BatchCount + delayedMessageNumber, err := exec.NextDelayedMessageNumber() + if err != nil { + return nil, err + } + if batchCount > delayedMessageNumber { + batchCount = delayedMessageNumber + } // Find the first block containing the batch count. // Subtract 1 to get the block before the needed batch count, // this is done to fetch previous batch metadata needed for snap sync. @@ -796,8 +803,9 @@ func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Add } high := parentChainAssertionBlock low := uint64(0) - if high > 100 { - low = high - 100 + reduceBy := uint64(100) + if high > reduceBy { + low = high - reduceBy } // Reduce high and low by 100 until lowNode.InboxMaxCount < batchCount // This will give us a range (low to high) of blocks that contain the batch count. @@ -808,8 +816,9 @@ func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Add } if lowCount.Uint64() > batchCount { high = low - if low > 100 { - low = low - 100 + reduceBy = reduceBy * 2 + if low > reduceBy { + low = low - reduceBy } else { low = 0 } From 89cf1f87db443ccea57d9b5fbd32f7207d363750 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Thu, 2 Jan 2025 14:11:21 -0600 Subject: [PATCH 40/56] use proper bold commit --- bold | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold b/bold index 3df1191028..eae8d51fcf 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit 3df119102815a7c17b87251e18df6e09f6e58128 +Subproject commit eae8d51fcf02002d3216a0b15f23b66f819f792d From d3de311d8ac5153ee4299b0053d7a93d84e735b3 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 2 Jan 2025 13:51:48 -0700 Subject: [PATCH 41/56] remove most occurences of log.Crit --- arbos/arbosState/arbosstate.go | 7 ++-- arbos/arbosState/initialize.go | 4 +-- arbos/programs/api.go | 10 +++--- arbos/programs/native_api.go | 5 ++- arbos/programs/programs.go | 2 +- arbstate/inbox.go | 6 ++-- precompiles/context.go | 7 ++-- precompiles/precompile.go | 59 ++++++++++++++++------------------ precompiles/precompile_test.go | 9 ------ system_tests/staker_test.go | 2 +- 10 files changed, 46 insertions(+), 65 deletions(-) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 5ee070f942..de1a970b87 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/hashdb" @@ -123,13 +122,13 @@ func NewArbosMemoryBackedArbOSState() (*ArbosState, *state.StateDB) { db := state.NewDatabaseWithConfig(raw, trieConfig) statedb, err := state.New(common.Hash{}, db, nil) if err != nil { - log.Crit("failed to init empty statedb", "error", err) + panic("failed to init empty statedb: " + err.Error()) } burner := burn.NewSystemBurner(nil, false) chainConfig := chaininfo.ArbitrumDevTestChainConfig() newState, err := InitializeArbosState(statedb, burner, chainConfig, arbostypes.TestInitMessage) if err != nil { - log.Crit("failed to open the ArbOS state", "error", err) + panic("failed to open the ArbOS state: " + err.Error()) } return newState, statedb } @@ -139,7 +138,7 @@ func ArbOSVersion(stateDB vm.StateDB) uint64 { backingStorage := storage.NewGeth(stateDB, burn.NewSystemBurner(nil, false)) arbosVersion, err := backingStorage.GetUint64ByUint64(uint64(versionOffset)) if err != nil { - log.Crit("failed to get the ArbOS version", "error", err) + panic("failed to get the ArbOS version: " + err.Error()) } return arbosVersion } diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index 8fd417c2b2..840204382c 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -66,7 +66,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, }() statedb, err := state.New(common.Hash{}, stateDatabase, nil) if err != nil { - log.Crit("failed to init empty statedb", "error", err) + panic("failed to init empty statedb :" + err.Error()) } noStateTrieChangesToCommitError := regexp.MustCompile("^triedb layer .+ is disk layer$") @@ -96,7 +96,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, burner := burn.NewSystemBurner(nil, false) arbosState, err := InitializeArbosState(statedb, burner, chainConfig, initMessage) if err != nil { - log.Crit("failed to open the ArbOS state", "error", err) + panic("failed to open the ArbOS state :" + err.Error()) } chainOwner, err := initData.GetChainOwner() diff --git a/arbos/programs/api.go b/arbos/programs/api.go index d8f12ffbd3..cd2143f5d0 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" @@ -151,7 +150,7 @@ func newApiClosures( case vm.STATICCALL: ret, returnGas, err = evm.StaticCall(scope.Contract, contract, input, gas) default: - log.Crit("unsupported call type", "opcode", opcode) + panic("unsupported call type: " + opcode.String()) } interpreter.SetReturnData(ret) @@ -266,7 +265,7 @@ func newApiClosures( original := input crash := func(reason string) { - log.Crit("bad API call", "reason", reason, "request", req, "len", len(original), "remaining", len(input)) + panic("bad API call reason: " + reason + " request: " + string(req) + " len: " + string(len(original)) + " remaining: " + string(len(input))) } takeInput := func(needed int, reason string) []byte { if len(input) < needed { @@ -338,7 +337,7 @@ func newApiClosures( case StaticCall: opcode = vm.STATICCALL default: - log.Crit("unsupported call type", "opcode", opcode) + panic("unsupported call type opcode: " + opcode.String()) } contract := takeAddress() value := takeU256() @@ -414,8 +413,7 @@ func newApiClosures( captureHostio(name, args, outs, startInk, endInk) return []byte{}, nil, 0 default: - log.Crit("unsupported call type", "req", req) - return []byte{}, nil, 0 + panic("unsupported call type: " + string(req)) } } } diff --git a/arbos/programs/native_api.go b/arbos/programs/native_api.go index ab15800ef9..bf9cda658d 100644 --- a/arbos/programs/native_api.go +++ b/arbos/programs/native_api.go @@ -29,7 +29,6 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" @@ -69,11 +68,11 @@ func newApi( func getApi(id usize) NativeApi { any, ok := apiObjects.Load(uintptr(id)) if !ok { - log.Crit("failed to load stylus Go API", "id", id) + panic("failed to load stylus Go API id: " + string(id)) } api, ok := any.(NativeApi) if !ok { - log.Crit("wrong type for stylus Go API", "id", id) + panic("wrong type for stylus Go API id: " + string(id)) } return api } diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 06ba6ead8c..6b2a0c318b 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -219,7 +219,7 @@ func (p Programs) CallProgram( localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), contract.Code, contract.CodeHash, params.PageLimit, evm.Context.Time, debugMode, program) if err != nil { - log.Crit("failed to get local wasm for activated program", "program", contract.Address()) + panic("failed to get local wasm for activated program: " + contract.Address().Hex()) return nil, err } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index b58a7420b7..5539a75ce1 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -85,11 +85,11 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Matches the way keyset validation was done inside DAS readers i.e logging the error // But other daproviders might just want to return the error if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(payload[0]) { - logLevel := log.Error if keysetValidationMode == daprovider.KeysetPanicIfInvalid { - logLevel = log.Crit + panic(err.Error()) + } else { + log.Error(err.Error()) } - logLevel(err.Error()) } else { return nil, err } diff --git a/precompiles/context.go b/precompiles/context.go index 670ffa7443..86e56ffbff 100644 --- a/precompiles/context.go +++ b/precompiles/context.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -58,7 +57,7 @@ func (c *Context) GasLeft() *uint64 { } func (c *Context) Restrict(err error) { - log.Crit("A metered burner was used for access-controlled work", "error", err) + panic("A metered burner was used for access-controlled work :" + err.Error()) } func (c *Context) HandleError(err error) error { @@ -88,13 +87,13 @@ func testContext(caller addr, evm mech) *Context { } state, err := arbosState.OpenArbosState(evm.StateDB, burn.NewSystemBurner(tracingInfo, false)) if err != nil { - log.Crit("unable to open arbos state", "error", err) + panic("unable to open arbos state :" + err.Error()) } ctx.State = state var ok bool ctx.txProcessor, ok = evm.ProcessingHook.(*arbos.TxProcessor) if !ok { - log.Crit("must have tx processor") + panic("must have tx processor") } return ctx } diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 54d18a0cc9..f34c418313 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -120,7 +120,7 @@ func (e *SolError) Error() string { func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Precompile) { source, err := abi.JSON(strings.NewReader(metadata.ABI)) if err != nil { - log.Crit("Bad ABI") + panic("Bad ABI") } implementerType := reflect.TypeOf(implementer) @@ -128,12 +128,12 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr _, ok := implementerType.Elem().FieldByName("Address") if !ok { - log.Crit("Implementer for precompile ", contract, " is missing an Address field") + panic("Implementer for precompile " + contract + " is missing an Address field") } address, ok := reflect.ValueOf(implementer).Elem().FieldByName("Address").Interface().(addr) if !ok { - log.Crit("Implementer for precompile ", contract, "'s Address field has the wrong type") + panic("Implementer for precompile " + contract + "'s Address field has the wrong type") } gethAbiFuncTypeEquality := func(actual, geth reflect.Type) bool { @@ -167,7 +167,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr name = capitalize + name[1:] if len(method.ID) != 4 { - log.Crit("Method ID isn't 4 bytes") + panic("Method ID isn't 4 bytes") } id := *(*[4]byte)(method.ID) @@ -175,7 +175,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr handler, ok := implementerType.MethodByName(name) if !ok { - log.Crit("Precompile " + contract + " must implement " + name) + panic("Precompile " + contract + " must implement " + name) } var needs = []reflect.Type{ @@ -199,7 +199,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr needs = append(needs, reflect.TypeOf(&big.Int{})) purity = payable default: - log.Crit("Unknown state mutability ", method.StateMutability) + panic("Unknown state mutability " + method.StateMutability) } for _, arg := range method.Inputs { @@ -215,10 +215,9 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr expectedHandlerType := reflect.FuncOf(needs, outputs, false) if !gethAbiFuncTypeEquality(handler.Type, expectedHandlerType) { - log.Crit( - "Precompile "+contract+"'s "+name+"'s implementer has the wrong type\n", - "\texpected:\t", expectedHandlerType, "\n\tbut have:\t", handler.Type, - ) + panic( + "Precompile " + contract + "'s " + name + "'s implementer has the wrong type\n" + + "\texpected:\t" + expectedHandlerType.String() + "\n\tbut have:\t" + handler.Type.String()) } method := PrecompileMethod{ @@ -237,7 +236,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr method := implementerType.Method(i) name := method.Name if method.IsExported() && methodsByName[name] == nil { - log.Crit(contract + " is missing a solidity interface for " + name) + panic(contract + " is missing a solidity interface for " + name) } } @@ -269,11 +268,10 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr if arg.Indexed { _, ok := supportedIndices[arg.Type.String()] if !ok { - log.Crit( - "Please change the solidity for precompile ", contract, - "'s event ", name, ":\n\tEvent indices of type ", - arg.Type.String(), " are not supported", - ) + panic( + "Please change the solidity for precompile " + contract + + "'s event " + name + ":\n\tEvent indices of type " + + arg.Type.String() + " are not supported") } } } @@ -288,23 +286,21 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr field, ok := implementerType.Elem().FieldByName(name) if !ok { - log.Crit(missing, "event ", name, " of type\n\t", expectedFieldType) + panic(missing + "event " + name + " of type\n\t" + expectedFieldType.String()) } costField, ok := implementerType.Elem().FieldByName(name + "GasCost") if !ok { - log.Crit(missing, "event ", name, "'s GasCost of type\n\t", expectedCostType) + panic(missing + "event " + name + "'s GasCost of type\n\t" + expectedCostType.String()) } if !gethAbiFuncTypeEquality(field.Type, expectedFieldType) { - log.Crit( - context, "'s field for event ", name, " has the wrong type\n", - "\texpected:\t", expectedFieldType, "\n\tbut have:\t", field.Type, - ) + panic( + context + "'s field for event " + name + " has the wrong type\n" + + "\texpected:\t" + expectedFieldType.String() + "\n\tbut have:\t" + field.Type.String()) } if !gethAbiFuncTypeEquality(costField.Type, expectedCostType) { - log.Crit( - context, "'s field for event ", name, "GasCost has the wrong type\n", - "\texpected:\t", expectedCostType, "\n\tbut have:\t", costField.Type, - ) + panic( + context + "'s field for event " + name + "GasCost has the wrong type\n" + + "\texpected:\t" + expectedCostType.String() + "\n\tbut have:\t" + costField.Type.String()) } structFields := reflect.ValueOf(implementer).Elem() @@ -464,13 +460,12 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr field, ok := implementerType.Elem().FieldByName(name + "Error") if !ok { - log.Crit(missing, "custom error ", name, "Error of type\n\t", expectedFieldType) + panic(missing + "custom error " + name + "Error of type\n\t" + expectedFieldType.String()) } if field.Type != expectedFieldType { - log.Crit( - context, "'s field for error ", name, "Error has the wrong type\n", - "\texpected:\t", expectedFieldType, "\n\tbut have:\t", field.Type, - ) + panic( + context + "'s field for error " + name + "Error has the wrong type\n" + + "\texpected:\t" + expectedFieldType.String() + "\n\tbut have:\t" + field.Type.String()) } structFields := reflect.ValueOf(implementer).Elem() @@ -756,7 +751,7 @@ func (p *Precompile) Call( reflectArgs = append(reflectArgs, reflect.ValueOf(evm)) reflectArgs = append(reflectArgs, reflect.ValueOf(value)) default: - log.Crit("Unknown state mutability ", method.purity) + panic("Unknown state mutability " + string(method.purity)) } args, err := method.template.Inputs.Unpack(input[4:]) diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index 75fed711eb..183ec1f083 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -5,15 +5,12 @@ package precompiles import ( "fmt" - "io" "math/big" - "os" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" @@ -183,12 +180,6 @@ func TestEventCosts(t *testing.T) { } func TestPrecompilesPerArbosVersion(t *testing.T) { - // Set up a logger in case log.Crit is called by Precompiles() - glogger := log.NewGlogHandler( - log.NewTerminalHandler(io.Writer(os.Stderr), false)) - glogger.Verbosity(log.LevelWarn) - log.SetDefault(log.NewLogger(glogger)) - expectedNewMethodsPerArbosVersion := map[uint64]int{ 0: 89, params.ArbosVersion_5: 3, diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 69645d8878..55c13d664d 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -504,7 +504,7 @@ func TestGetValidatorWalletContractWithDataposterOnlyUsedToCreateValidatorWallet parentChainID, ) if err != nil { - log.Crit("error creating data poster to create validator wallet contract", "err", err) + Fatal(t, "error creating data poster to create validator wallet contract", "err", err) } getExtraGas := func() uint64 { return builder.nodeConfig.Staker.ExtraGas } From 468d6892432b23af5a77db119ab510f1541d52f4 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 2 Jan 2025 18:58:56 -0700 Subject: [PATCH 42/56] fix int to string conversions --- arbos/programs/api.go | 6 ++++-- arbos/programs/native_api.go | 5 +++-- precompiles/precompile.go | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/arbos/programs/api.go b/arbos/programs/api.go index cd2143f5d0..a622f55397 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -4,6 +4,8 @@ package programs import ( + "strconv" + "github.com/holiman/uint256" "github.com/ethereum/go-ethereum/common" @@ -265,7 +267,7 @@ func newApiClosures( original := input crash := func(reason string) { - panic("bad API call reason: " + reason + " request: " + string(req) + " len: " + string(len(original)) + " remaining: " + string(len(input))) + panic("bad API call reason: " + reason + " request: " + strconv.Itoa(int(req)) + " len: " + strconv.Itoa(len(original)) + " remaining: " + strconv.Itoa(len(input))) } takeInput := func(needed int, reason string) []byte { if len(input) < needed { @@ -413,7 +415,7 @@ func newApiClosures( captureHostio(name, args, outs, startInk, endInk) return []byte{}, nil, 0 default: - panic("unsupported call type: " + string(req)) + panic("unsupported call type: " + strconv.Itoa(int(req))) } } } diff --git a/arbos/programs/native_api.go b/arbos/programs/native_api.go index bf9cda658d..ad8cc0477b 100644 --- a/arbos/programs/native_api.go +++ b/arbos/programs/native_api.go @@ -25,6 +25,7 @@ import "C" import ( "runtime" + "strconv" "sync" "sync/atomic" @@ -68,11 +69,11 @@ func newApi( func getApi(id usize) NativeApi { any, ok := apiObjects.Load(uintptr(id)) if !ok { - panic("failed to load stylus Go API id: " + string(id)) + panic("failed to load stylus Go API id: " + strconv.Itoa(int(id))) } api, ok := any.(NativeApi) if !ok { - panic("wrong type for stylus Go API id: " + string(id)) + panic("wrong type for stylus Go API id: " + strconv.Itoa(int(id))) } return api } diff --git a/precompiles/precompile.go b/precompiles/precompile.go index f34c418313..7ca9d409c6 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -751,7 +751,7 @@ func (p *Precompile) Call( reflectArgs = append(reflectArgs, reflect.ValueOf(evm)) reflectArgs = append(reflectArgs, reflect.ValueOf(value)) default: - panic("Unknown state mutability " + string(method.purity)) + panic("Unknown state mutability " + strconv.Itoa(int(method.purity))) } args, err := method.template.Inputs.Unpack(input[4:]) From 89516a0075cb11a17b48b5ab2d9829062ffcb32a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 2 Jan 2025 19:32:49 -0700 Subject: [PATCH 43/56] remove dead code --- arbos/programs/programs.go | 1 - 1 file changed, 1 deletion(-) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 6b2a0c318b..4c81de1308 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -220,7 +220,6 @@ func (p Programs) CallProgram( localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), contract.Code, contract.CodeHash, params.PageLimit, evm.Context.Time, debugMode, program) if err != nil { panic("failed to get local wasm for activated program: " + contract.Address().Hex()) - return nil, err } evmData := &EvmData{ From 7200ff5f706693e026e3c70dcee73f640cb718b5 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Fri, 3 Jan 2025 18:05:45 +0530 Subject: [PATCH 44/56] Changes based on PR comments --- arbnode/message_pruner.go | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index b18796a4c5..dedc579a01 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -126,8 +126,7 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun if m.cachedPrunedDelayedMessages == 0 { m.cachedPrunedDelayedMessages = fetchLastPrunedKey(m.inboxTracker.db, lastPrunedDelayedMessageKey) } - lastPrunedMessage := m.cachedPrunedMessages - prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, &lastPrunedMessage, uint64(messageCount)) + prunedKeysRange, _, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messageResultPrefix, m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting message results: %w", err) } @@ -135,8 +134,7 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned message results:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - lastPrunedMessage = m.cachedPrunedMessages - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, &lastPrunedMessage, uint64(messageCount)) + prunedKeysRange, _, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting expected block hashes: %w", err) } @@ -144,8 +142,7 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun log.Info("Pruned expected block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - lastPrunedMessage = m.cachedPrunedMessages - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &lastPrunedMessage, uint64(messageCount)) + prunedKeysRange, lastPrunedMessage, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting last batch messages: %w", err) } @@ -155,40 +152,34 @@ func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCoun insertLastPrunedKey(m.transactionStreamer.db, lastPrunedMessageKey, lastPrunedMessage) m.cachedPrunedMessages = lastPrunedMessage - lastPrunedDelayedMessage := m.cachedPrunedDelayedMessages - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, &lastPrunedDelayedMessage, delayedMessageCount) + prunedKeysRange, lastPrunedDelayedMessage, err := deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, m.cachedPrunedDelayedMessages, delayedMessageCount) if err != nil { return fmt.Errorf("error deleting last batch delayed messages: %w", err) } if len(prunedKeysRange) > 0 { log.Info("Pruned last batch delayed messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - insertLastPrunedKey(m.inboxTracker.db, lastPrunedDelayedMessageKey, lastPrunedMessage) + insertLastPrunedKey(m.inboxTracker.db, lastPrunedDelayedMessageKey, lastPrunedDelayedMessage) m.cachedPrunedDelayedMessages = lastPrunedDelayedMessage return nil } -// deleteFromLastPrunedUptoEndKey is similar to deleteFromRange but automatically populates the start key -// cachedStartMinKey must not be nil. It's set to the new start key at the end of this function if successful. -func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, cachedStartMinKey *uint64, endMinKey uint64) ([]uint64, error) { - startMinKey := *cachedStartMinKey +// deleteFromLastPrunedUptoEndKey is similar to deleteFromRange but automatically populates the start key if it's not set. +// It's returns the new start key (i.e. last pruned key) at the end of this function if successful. +func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, startMinKey uint64, endMinKey uint64) ([]uint64, uint64, error) { if startMinKey == 0 { startIter := db.NewIterator(prefix, uint64ToKey(1)) if !startIter.Next() { - return nil, nil + return nil, 0, nil } startMinKey = binary.BigEndian.Uint64(bytes.TrimPrefix(startIter.Key(), prefix)) startIter.Release() } if endMinKey <= startMinKey { - *cachedStartMinKey = startMinKey - return nil, nil + return nil, startMinKey, nil } keys, err := deleteFromRange(ctx, db, prefix, startMinKey, endMinKey-1) - if err == nil { - *cachedStartMinKey = endMinKey - 1 - } - return keys, err + return keys, endMinKey - 1, err } func insertLastPrunedKey(db ethdb.Database, lastPrunedKey []byte, lastPrunedValue uint64) { From 830a9245293f296cf9bcd16286abeb4c31dd5279 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 3 Jan 2025 10:00:08 -0600 Subject: [PATCH 45/56] update bold pin to latest commit on main --- bold | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold b/bold index 81f1b421b2..cb6922fb4f 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit 81f1b421b2dbbf96c7a2b427a9458667b07b0b27 +Subproject commit cb6922fb4f50d1cdb12af467a18ecee6f1c688a4 From f9fa7827f2e8683f6aa43f93d63a96dc3b1f10fc Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Fri, 3 Jan 2025 20:22:58 -0600 Subject: [PATCH 46/56] update bold pin --- bold | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold b/bold index eae8d51fcf..70c9755ae1 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit eae8d51fcf02002d3216a0b15f23b66f819f792d +Subproject commit 70c9755ae1b731f1b2fdedb986461754e4da2e8f From 55c41f58fab13878c75f3a1ffad8245926360305 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Tue, 7 Jan 2025 14:09:57 -0600 Subject: [PATCH 47/56] bold submod to main --- bold | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold b/bold index 70c9755ae1..a537dac0c5 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit 70c9755ae1b731f1b2fdedb986461754e4da2e8f +Subproject commit a537dac0c5fc95a07afe54dad4d7691121a4f484 From 7acdd8fa8dcb11d54b63746416ab5c0e1dce6d8c Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 7 Jan 2025 18:07:44 -0700 Subject: [PATCH 48/56] update nightly version and fix wasm --- .github/workflows/arbitrator-ci.yml | 2 +- .github/workflows/ci.yml | 2 +- Makefile | 26 ++++++++++++++++++++++ arbitrator/stylus/tests/.cargo/config.toml | 1 + 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index dd58a30571..d9c4618e8b 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -76,7 +76,7 @@ jobs: uses: dtolnay/rust-toolchain@nightly id: install-rust-nightly with: - toolchain: 'nightly-2024-08-06' + toolchain: 'nightly-2024-10-06' targets: 'wasm32-wasi, wasm32-unknown-unknown' components: 'rust-src, rustfmt, clippy' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1eda1d9b7e..e9e184f786 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,7 +64,7 @@ jobs: uses: dtolnay/rust-toolchain@nightly id: install-rust-nightly with: - toolchain: 'nightly-2024-08-06' + toolchain: 'nightly-2024-10-06' targets: 'wasm32-wasi, wasm32-unknown-unknown' components: 'rust-src, rustfmt, clippy' diff --git a/Makefile b/Makefile index 12dfb07cf8..39b221dcee 100644 --- a/Makefile +++ b/Makefile @@ -440,54 +440,80 @@ $(stylus_test_dir)/%.wasm: $(stylus_test_dir)/%.b $(stylus_lang_bf) $(stylus_test_keccak_wasm): $(stylus_test_keccak_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_keccak-100_wasm): $(stylus_test_keccak-100_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_fallible_wasm): $(stylus_test_fallible_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_storage_wasm): $(stylus_test_storage_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_multicall_wasm): $(stylus_test_multicall_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_log_wasm): $(stylus_test_log_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_create_wasm): $(stylus_test_create_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_math_wasm): $(stylus_test_math_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_evm-data_wasm): $(stylus_test_evm-data_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_read-return-data_wasm): $(stylus_test_read-return-data_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_sdk-storage_wasm): $(stylus_test_sdk-storage_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_erc20_wasm): $(stylus_test_erc20_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary $(stylus_test_hostio-test_wasm): $(stylus_test_hostio-test_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + wasm2wat $@ > $@.wat #removing reference types + wat2wasm $@.wat -o $@ @touch -c $@ # cargo might decide to not rebuild the binary contracts/test/prover/proofs/float%.json: $(arbitrator_cases)/float%.wasm $(prover_bin) $(output_latest)/soft-float.wasm diff --git a/arbitrator/stylus/tests/.cargo/config.toml b/arbitrator/stylus/tests/.cargo/config.toml index 702a5c04b3..6ca5e18651 100644 --- a/arbitrator/stylus/tests/.cargo/config.toml +++ b/arbitrator/stylus/tests/.cargo/config.toml @@ -5,6 +5,7 @@ target = "wasm32-unknown-unknown" rustflags = [ "-C", "target-cpu=mvp", "-C", "link-arg=-zstack-size=8192", + "-C", "target-feature=-reference-types", # "-C", "link-arg=--export=__heap_base", # "-C", "link-arg=--export=__data_end", ] From 25ea73540716bf602e90bdb0587f35162dcf2531 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 7 Jan 2025 18:35:22 -0700 Subject: [PATCH 49/56] fix rust stable to 1.80.1 --- .github/workflows/arbitrator-ci.yml | 2 +- .github/workflows/ci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index d9c4618e8b..b765acee99 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -69,7 +69,7 @@ jobs: - name: Install rust stable uses: dtolnay/rust-toolchain@stable with: - toolchain: 'stable' + toolchain: '1.80.1' components: 'llvm-tools-preview, rustfmt, clippy' - name: Install rust nightly diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e9e184f786..b943b34686 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -56,7 +56,7 @@ jobs: - name: Install rust stable uses: dtolnay/rust-toolchain@stable with: - toolchain: 'stable' + toolchain: '1.80.1' targets: 'wasm32-wasi, wasm32-unknown-unknown' components: 'llvm-tools-preview, rustfmt, clippy' From 6a66ea4db34886e13f1a317b97737b70f3bf3908 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 7 Jan 2025 19:44:52 -0700 Subject: [PATCH 50/56] ci: only use stable clippy --- .github/workflows/arbitrator-ci.yml | 3 ++- .github/workflows/ci.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index b765acee99..3ef8d336ff 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -67,6 +67,7 @@ jobs: cache-dependency-path: '**/yarn.lock' - name: Install rust stable + id: install-rust uses: dtolnay/rust-toolchain@stable with: toolchain: '1.80.1' @@ -78,7 +79,7 @@ jobs: with: toolchain: 'nightly-2024-10-06' targets: 'wasm32-wasi, wasm32-unknown-unknown' - components: 'rust-src, rustfmt, clippy' + components: 'rust-src, rustfmt' - name: Set STYLUS_NIGHTLY_VER environment variable run: echo "STYLUS_NIGHTLY_VER=+$(rustup toolchain list | grep '^nightly' | head -n1 | cut -d' ' -f1)" >> "$GITHUB_ENV" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b943b34686..006c01a378 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,6 +55,7 @@ jobs: - name: Install rust stable uses: dtolnay/rust-toolchain@stable + id: install-rust with: toolchain: '1.80.1' targets: 'wasm32-wasi, wasm32-unknown-unknown' @@ -66,7 +67,7 @@ jobs: with: toolchain: 'nightly-2024-10-06' targets: 'wasm32-wasi, wasm32-unknown-unknown' - components: 'rust-src, rustfmt, clippy' + components: 'rust-src, rustfmt' - name: Set STYLUS_NIGHTLY_VER environment variable run: echo "STYLUS_NIGHTLY_VER=+$(rustup toolchain list | grep '^nightly' | head -n1 | cut -d' ' -f1)" >> "$GITHUB_ENV" From fb03fe67e1bec2fe9959915448e484add6d8d61f Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 7 Jan 2025 19:31:06 -0700 Subject: [PATCH 51/56] clippy fixes: initial --- arbitrator/arbutil/src/operator.rs | 4 ++-- arbitrator/jit/src/caller_env.rs | 2 +- arbitrator/prover/src/binary.rs | 2 +- arbitrator/prover/src/programs/counter.rs | 2 +- arbitrator/prover/src/programs/depth.rs | 4 ++-- arbitrator/prover/src/programs/meter.rs | 2 +- arbitrator/prover/src/programs/mod.rs | 6 +++--- arbitrator/stylus/src/env.rs | 10 +++++----- arbitrator/stylus/src/host.rs | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arbitrator/arbutil/src/operator.rs b/arbitrator/arbutil/src/operator.rs index cc1f684366..9abf237a66 100644 --- a/arbitrator/arbutil/src/operator.rs +++ b/arbitrator/arbutil/src/operator.rs @@ -595,13 +595,13 @@ impl Display for OperatorCode { } } -impl<'a> From> for OperatorCode { +impl From> for OperatorCode { fn from(op: Operator) -> Self { OperatorCode::from(&op) } } -impl<'a> From<&Operator<'a>> for OperatorCode { +impl From<&Operator<'_>> for OperatorCode { fn from(op: &Operator) -> Self { use Operator as O; diff --git a/arbitrator/jit/src/caller_env.rs b/arbitrator/jit/src/caller_env.rs index 41240d3d98..9fe4288d21 100644 --- a/arbitrator/jit/src/caller_env.rs +++ b/arbitrator/jit/src/caller_env.rs @@ -34,7 +34,7 @@ impl<'a> JitEnv<'a> for WasmEnvMut<'a> { } } -impl<'s> JitMemAccess<'s> { +impl JitMemAccess<'_> { fn view(&self) -> MemoryView { self.memory.view(&self.store) } diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs index 2260f6bf48..77bc44ec4c 100644 --- a/arbitrator/prover/src/binary.rs +++ b/arbitrator/prover/src/binary.rs @@ -499,7 +499,7 @@ pub fn parse<'a>(input: &'a [u8], path: &'_ Path) -> Result> { Ok(binary) } -impl<'a> Debug for WasmBinary<'a> { +impl Debug for WasmBinary<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("WasmBinary") .field("types", &self.types) diff --git a/arbitrator/prover/src/programs/counter.rs b/arbitrator/prover/src/programs/counter.rs index cd54178cf8..4006e70ee6 100644 --- a/arbitrator/prover/src/programs/counter.rs +++ b/arbitrator/prover/src/programs/counter.rs @@ -75,7 +75,7 @@ pub struct FuncCounter<'a> { block: Vec>, } -impl<'a> FuncCounter<'a> { +impl FuncCounter<'_> { fn new(counters: Arc>>) -> Self { let block = vec![]; Self { counters, block } diff --git a/arbitrator/prover/src/programs/depth.rs b/arbitrator/prover/src/programs/depth.rs index 2000190917..fb0e0cb6d2 100644 --- a/arbitrator/prover/src/programs/depth.rs +++ b/arbitrator/prover/src/programs/depth.rs @@ -107,7 +107,7 @@ pub struct FuncDepthChecker<'a> { done: bool, } -impl<'a> FuncDepthChecker<'a> { +impl FuncDepthChecker<'_> { fn new( global: GlobalIndex, funcs: Arc>, @@ -227,7 +227,7 @@ impl<'a> FuncMiddleware<'a> for FuncDepthChecker<'a> { } } -impl<'a> FuncDepthChecker<'a> { +impl FuncDepthChecker<'_> { fn worst_case_depth(&self) -> Result { use Operator::*; diff --git a/arbitrator/prover/src/programs/meter.rs b/arbitrator/prover/src/programs/meter.rs index 0d7b3151d7..258c932944 100644 --- a/arbitrator/prover/src/programs/meter.rs +++ b/arbitrator/prover/src/programs/meter.rs @@ -122,7 +122,7 @@ pub struct FuncMeter<'a, F: OpcodePricer> { sigs: Arc, } -impl<'a, F: OpcodePricer> FuncMeter<'a, F> { +impl FuncMeter<'_, F> { fn new( ink_global: GlobalIndex, status_global: GlobalIndex, diff --git a/arbitrator/prover/src/programs/mod.rs b/arbitrator/prover/src/programs/mod.rs index a35308e7ff..517ccc1971 100644 --- a/arbitrator/prover/src/programs/mod.rs +++ b/arbitrator/prover/src/programs/mod.rs @@ -244,7 +244,7 @@ impl ModuleMod for ModuleInfo { fn drop_exports_and_names(&mut self, keep: &HashMap<&str, ExportKind>) { self.exports.retain(|name, export| { keep.get(name.as_str()) - .map_or(false, |x| *x == (*export).into()) + .is_some_and(|x| *x == (*export).into()) }); self.function_names.clear(); } @@ -263,7 +263,7 @@ impl ModuleMod for ModuleInfo { } } -impl<'a> ModuleMod for WasmBinary<'a> { +impl ModuleMod for WasmBinary<'_> { fn add_global(&mut self, name: &str, _ty: Type, init: GlobalInit) -> Result { let global = match init { GlobalInit::I32Const(x) => Value::I32(x as u32), @@ -364,7 +364,7 @@ impl<'a> ModuleMod for WasmBinary<'a> { fn drop_exports_and_names(&mut self, keep: &HashMap<&str, ExportKind>) { self.exports - .retain(|name, ty| keep.get(name.as_str()).map_or(false, |x| *x == ty.1)); + .retain(|name, ty| keep.get(name.as_str()).is_some_and(|x| *x == ty.1)); self.names.functions.clear(); } diff --git a/arbitrator/stylus/src/env.rs b/arbitrator/stylus/src/env.rs index a2c8189029..ef12d2480a 100644 --- a/arbitrator/stylus/src/env.rs +++ b/arbitrator/stylus/src/env.rs @@ -147,7 +147,7 @@ pub struct HostioInfo<'a, D: DataReader, E: EvmApi> { pub start_ink: Ink, } -impl<'a, D: DataReader, E: EvmApi> HostioInfo<'a, D, E> { +impl> HostioInfo<'_, D, E> { pub fn config(&self) -> StylusConfig { self.config.expect("no config") } @@ -172,7 +172,7 @@ impl<'a, D: DataReader, E: EvmApi> HostioInfo<'a, D, E> { } } -impl<'a, D: DataReader, E: EvmApi> MeteredMachine for HostioInfo<'a, D, E> { +impl> MeteredMachine for HostioInfo<'_, D, E> { fn ink_left(&self) -> MachineMeter { let vm = self.env.meter(); match vm.status() { @@ -188,13 +188,13 @@ impl<'a, D: DataReader, E: EvmApi> MeteredMachine for HostioInfo<'a, D, E> { } } -impl<'a, D: DataReader, E: EvmApi> GasMeteredMachine for HostioInfo<'a, D, E> { +impl> GasMeteredMachine for HostioInfo<'_, D, E> { fn pricing(&self) -> PricingParams { self.config().pricing } } -impl<'a, D: DataReader, E: EvmApi> Deref for HostioInfo<'a, D, E> { +impl> Deref for HostioInfo<'_, D, E> { type Target = WasmEnv; fn deref(&self) -> &Self::Target { @@ -202,7 +202,7 @@ impl<'a, D: DataReader, E: EvmApi> Deref for HostioInfo<'a, D, E> { } } -impl<'a, D: DataReader, E: EvmApi> DerefMut for HostioInfo<'a, D, E> { +impl> DerefMut for HostioInfo<'_, D, E> { fn deref_mut(&mut self) -> &mut Self::Target { self.env } diff --git a/arbitrator/stylus/src/host.rs b/arbitrator/stylus/src/host.rs index 67497302a1..c4fc7cea1e 100644 --- a/arbitrator/stylus/src/host.rs +++ b/arbitrator/stylus/src/host.rs @@ -22,7 +22,7 @@ use std::{ use user_host_trait::UserHost; use wasmer::{MemoryAccessError, WasmPtr}; -impl<'a, DR, A> UserHost for HostioInfo<'a, DR, A> +impl UserHost for HostioInfo<'_, DR, A> where DR: DataReader, A: EvmApi, From 00cb2f9b5ab025e13dd156c6cda346149aa10985 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 7 Jan 2025 20:30:29 -0700 Subject: [PATCH 52/56] circumvent lifetime errors from clippy --- arbitrator/prover/src/programs/meter.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/arbitrator/prover/src/programs/meter.rs b/arbitrator/prover/src/programs/meter.rs index 258c932944..cfb91e647d 100644 --- a/arbitrator/prover/src/programs/meter.rs +++ b/arbitrator/prover/src/programs/meter.rs @@ -1,5 +1,6 @@ // Copyright 2022-2023, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE +#![allow(clippy::needless_lifetimes)] use crate::{ programs::{ From 82274225c91ce0a294485cac8c5472dc468768f1 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 7 Jan 2025 20:54:53 -0700 Subject: [PATCH 53/56] ci: add nightly clippy back --- .github/workflows/arbitrator-ci.yml | 16 ++++++++-------- .github/workflows/ci.yml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 3ef8d336ff..45cf3c1a3d 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -66,20 +66,20 @@ jobs: cache: 'yarn' cache-dependency-path: '**/yarn.lock' - - name: Install rust stable - id: install-rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: '1.80.1' - components: 'llvm-tools-preview, rustfmt, clippy' - - name: Install rust nightly uses: dtolnay/rust-toolchain@nightly id: install-rust-nightly with: toolchain: 'nightly-2024-10-06' targets: 'wasm32-wasi, wasm32-unknown-unknown' - components: 'rust-src, rustfmt' + components: 'rust-src, rustfmt, clippy' + + - name: Install rust stable + id: install-rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: '1.80.1' + components: 'llvm-tools-preview, rustfmt, clippy' - name: Set STYLUS_NIGHTLY_VER environment variable run: echo "STYLUS_NIGHTLY_VER=+$(rustup toolchain list | grep '^nightly' | head -n1 | cut -d' ' -f1)" >> "$GITHUB_ENV" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 006c01a378..b4ce5bf27f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,7 +67,7 @@ jobs: with: toolchain: 'nightly-2024-10-06' targets: 'wasm32-wasi, wasm32-unknown-unknown' - components: 'rust-src, rustfmt' + components: 'rust-src, rustfmt, clippy' - name: Set STYLUS_NIGHTLY_VER environment variable run: echo "STYLUS_NIGHTLY_VER=+$(rustup toolchain list | grep '^nightly' | head -n1 | cut -d' ' -f1)" >> "$GITHUB_ENV" From 12c76eac50b8b30d90874452f449128a91897067 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 7 Jan 2025 21:10:09 -0700 Subject: [PATCH 54/56] arbitrator ci: stable after nightly --- .github/workflows/arbitrator-ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 3ef8d336ff..0552fc4a3a 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -66,13 +66,6 @@ jobs: cache: 'yarn' cache-dependency-path: '**/yarn.lock' - - name: Install rust stable - id: install-rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: '1.80.1' - components: 'llvm-tools-preview, rustfmt, clippy' - - name: Install rust nightly uses: dtolnay/rust-toolchain@nightly id: install-rust-nightly @@ -81,6 +74,13 @@ jobs: targets: 'wasm32-wasi, wasm32-unknown-unknown' components: 'rust-src, rustfmt' + - name: Install rust stable + id: install-rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: '1.80.1' + components: 'llvm-tools-preview, rustfmt, clippy' + - name: Set STYLUS_NIGHTLY_VER environment variable run: echo "STYLUS_NIGHTLY_VER=+$(rustup toolchain list | grep '^nightly' | head -n1 | cut -d' ' -f1)" >> "$GITHUB_ENV" From ffa2f67140f44223eab539524d9019983ed32c07 Mon Sep 17 00:00:00 2001 From: Pepper Lebeck-Jobe Date: Wed, 8 Jan 2025 13:38:54 +0100 Subject: [PATCH 55/56] Add the wasm targets to the stable rust installation Without these targets the build was complaining about the missing `core` crate and suggested installing them. --- .github/workflows/arbitrator-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 45cf3c1a3d..51c0617f3e 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -79,6 +79,7 @@ jobs: uses: dtolnay/rust-toolchain@stable with: toolchain: '1.80.1' + targets: 'wasm32-wasi, wasm32-unknown-unknown' components: 'llvm-tools-preview, rustfmt, clippy' - name: Set STYLUS_NIGHTLY_VER environment variable From 79b3af04cbe77ea8f071658968e04278c9757d93 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 8 Jan 2025 10:07:23 -0600 Subject: [PATCH 56/56] update main --- bold | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bold b/bold index a537dac0c5..290743f517 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit a537dac0c5fc95a07afe54dad4d7691121a4f484 +Subproject commit 290743f517f7a94d62460231399fb095cb18c3a4