diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1c5e071230a..5112f1492aa 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -784,6 +784,11 @@ var ( Usage: "Contracts that will have all of their storage added to the witness every time", Value: "", } + BadTxAllowance = cli.Uint64Flag{ + Name: "zkevm.bad-tx-allowance", + Usage: "The maximum number of times a transaction that consumes too many counters to fit into a batch will be attempted before it is rejected outright by eth_sendRawTransaction", + Value: 2, + } ACLPrintHistory = cli.IntFlag{ Name: "acl.print-history", Usage: "Number of entries to print from the ACL history on node start up", diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index fbad01569de..ea5f708ef44 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -88,6 +88,24 @@ func (bcc *BatchCounterCollector) AddNewTransactionCounters(txCounters *Transact return bcc.CheckForOverflow(false) //no need to calculate the merkle proof here } +// SingleTransactionOverflowCheck is used to check if a single transaction would overflow the batch counters +// by creating a new counter collector and priming it for a single block with just this transaction +// in it. There is an assumption here that the tx counters being passed in have already been through execution +func (bcc *BatchCounterCollector) SingleTransactionOverflowCheck(txCounters *TransactionCounter) (bool, error) { + err := txCounters.CalculateRlp() + if err != nil { + return true, err + } + + bcc.transactions = append(bcc.transactions, txCounters) + bcc.UpdateRlpCountersCache(txCounters) + bcc.blockCount = 1 // simulate a single block + + bcc.UpdateExecutionAndProcessingCountersCache(txCounters) + + return bcc.CheckForOverflow(false) //no need to calculate the merkle proof here +} + func (bcc *BatchCounterCollector) RemovePreviousTransactionCounters() { lastTx := bcc.transactions[len(bcc.transactions)-1] bcc.UndoTransactionCountersCache(lastTx) diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 69703fc7961..87a7c8c919a 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -98,6 +98,7 @@ type Zk struct { SealBatchImmediatelyOnOverflow bool MockWitnessGeneration bool WitnessContractInclusion []common.Address + BadTxAllowance uint64 } var DefaultZkConfig = &Zk{} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 31350eee8d3..05e63ebd488 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -293,4 +293,5 @@ var DefaultFlags = []cli.Flag{ &utils.SealBatchImmediatelyOnOverflow, &utils.MockWitnessGeneration, &utils.WitnessContractInclusion, + &utils.BadTxAllowance, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index be95dd54b4b..ccd782d66b3 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -224,6 +224,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { SealBatchImmediatelyOnOverflow: ctx.Bool(utils.SealBatchImmediatelyOnOverflow.Name), MockWitnessGeneration: ctx.Bool(utils.MockWitnessGeneration.Name), WitnessContractInclusion: witnessInclusion, + BadTxAllowance: ctx.Uint64(utils.BadTxAllowance.Name), } utils2.EnableTimer(cfg.DebugTimers) diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index 3908558027e..421c49d1f4c 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -376,6 +376,7 @@ type APIImpl struct { logger log.Logger VirtualCountersSmtReduction float64 RejectLowGasPriceTransactions bool + BadTxAllowance uint64 } // NewEthAPI returns APIImpl instance @@ -409,6 +410,7 @@ func NewEthAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend, txPool txpoo logger: logger, VirtualCountersSmtReduction: ethCfg.VirtualCountersSmtReduction, RejectLowGasPriceTransactions: ethCfg.RejectLowGasPriceTransactions, + BadTxAllowance: ethCfg.BadTxAllowance, } } diff --git a/turbo/jsonrpc/send_transaction.go b/turbo/jsonrpc/send_transaction.go index fb9637e0d37..569ace6796a 100644 --- a/turbo/jsonrpc/send_transaction.go +++ b/turbo/jsonrpc/send_transaction.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/utils" ) @@ -82,7 +83,6 @@ func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutility if err != nil { return common.Hash{}, err } - defer tx.Rollback() cc, err = api.chainConfig(ctx, tx) @@ -99,6 +99,17 @@ func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutility } hash := txn.Hash() + + // [zkevm] - check if the transaction is a bad one + hermezDb := hermez_db.NewHermezDbReader(tx) + badTxHashCounter, err := hermezDb.GetBadTxHashCounter(hash) + if err != nil { + return common.Hash{}, err + } + if badTxHashCounter >= api.BadTxAllowance { + return common.Hash{}, errors.New("transaction uses too many counters to fit into a batch") + } + res, err := api.txPool.Add(ctx, &txPoolProto.AddRequest{RlpTxs: [][]byte{encodedTx}}) if err != nil { return common.Hash{}, err diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 01e8bdfe3e4..70b60940946 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -52,6 +52,7 @@ const JUST_UNWOUND = "just_unwound" // batch const PLAIN_STATE_VERSION = "plain_state_version" // batch number -> true const ERIGON_VERSIONS = "erigon_versions" // erigon version -> timestamp of startup const BATCH_ENDS = "batch_ends" // +const BAD_TX_HASHES = "bad_tx_hashes" // tx hash -> integer counter var HermezDbTables = []string{ L1VERIFICATIONS, @@ -88,6 +89,7 @@ var HermezDbTables = []string{ PLAIN_STATE_VERSION, ERIGON_VERSIONS, BATCH_ENDS, + BAD_TX_HASHES, } type HermezDb struct { @@ -1887,3 +1889,18 @@ func (db *HermezDbReader) getForkIntervals(forkIdFilter *uint64) ([]types.ForkIn return forkIntervals, nil } + +func (db *HermezDb) WriteBadTxHashCounter(txHash common.Hash, counter uint64) error { + return db.tx.Put(BAD_TX_HASHES, txHash.Bytes(), Uint64ToBytes(counter)) +} + +func (db *HermezDbReader) GetBadTxHashCounter(txHash common.Hash) (uint64, error) { + v, err := db.tx.GetOne(BAD_TX_HASHES, txHash.Bytes()) + if err != nil { + return 0, err + } + if len(v) == 0 { + return 0, nil + } + return BytesToUint64(v), nil +} diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 2754bb55642..6c7bd1fbcea 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -15,6 +15,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk" + "github.com/ledgerwatch/erigon/zk/hermez_db" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" ) @@ -175,10 +176,7 @@ func sequencingBatchStep( return err } - batchCounters, err := prepareBatchCounters(batchContext, batchState) - if err != nil { - return err - } + batchCounters := prepareBatchCounters(batchContext, batchState) if batchState.isL1Recovery() { if cfg.zk.L1SyncStopBatch > 0 && batchState.batchNumber > cfg.zk.L1SyncStopBatch { @@ -409,7 +407,7 @@ func sequencingBatchStep( // The copying of this structure is intentional backupDataSizeChecker := *blockDataSizeChecker - receipt, execResult, anyOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1TreeUpdateIndex, &backupDataSizeChecker) + receipt, execResult, txCounters, anyOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1TreeUpdateIndex, &backupDataSizeChecker) if err != nil { if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not fail while re-executing") @@ -447,8 +445,9 @@ func sequencingBatchStep( switch anyOverflow { case overflowCounters: - // remove the last attempted counters as we may want to continue processing this batch with other transactions - batchCounters.RemovePreviousTransactionCounters() + // as we know this has caused an overflow we need to make sure we don't keep it in the inclusion list and attempt to add it again in the + // next block + badTxHashes = append(badTxHashes, txHash) if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not overflow counters while re-executing") @@ -456,29 +455,40 @@ func sequencingBatchStep( if !batchState.isL1Recovery() { /* - There are two cases when overflow could occur. - 1. The block DOES not contain any transactions. - In this case it means that a single tx overflow entire zk-counters. - In this case we mark it so. Once marked it will be discarded from the tx-pool async (once the tx-pool process the creation of a new batch) - Block production then continues as normal looking for more suitable transactions - NB: The tx SHOULD not be removed from yielded set, because if removed, it will be picked again on next block. That's why there is i++. It ensures that removing from yielded will start after the problematic tx - 2. The block contains transactions. - In this case we make note that we have had a transaction that overflowed and continue attempting to process transactions - Once we reach the cap for these attempts we will stop producing blocks and consider the batch done + here we check if the transaction on it's own would overdflow the batch counters + by creating a new counter collector and priming it for a single block with just this transaction + in it. We already have the computed execution counters so we don't need to recompute them and + can just combine collectors as normal to see if it would overflow. + + If it does overflow then we mark the hash as a bad one and move on. Calls to the RPC will + check if this hash has appeared too many times and stop allowing it through if required. */ - if !batchState.hasAnyTransactionsInThisBatch && len(batchState.builtBlocks) == 0 { + + // now check if this transaction on it's own would overflow counters for the batch + tempCounters := prepareBatchCounters(batchContext, batchState) + singleTxOverflow, err := tempCounters.SingleTransactionOverflowCheck(txCounters) + if err != nil { + return err + } + + // if the transaction overflows or if there are no transactions in the batch and no blocks built yet + // then we mark the transaction as bad and move on + if singleTxOverflow || (!batchState.hasAnyTransactionsInThisBatch && len(batchState.builtBlocks) == 0) { + ocs, _ := tempCounters.CounterStats(l1TreeUpdateIndex != 0) // mark the transaction to be removed from the pool cfg.txPool.MarkForDiscardFromPendingBest(txHash) - log.Info(fmt.Sprintf("[%s] single transaction %s cannot fit into batch", logPrefix, txHash)) + counter, err := handleBadTxHashCounter(sdb.hermezDb, txHash) + if err != nil { + return err + } + log.Info(fmt.Sprintf("[%s] single transaction %s cannot fit into batch - overflow", logPrefix, txHash), "context", ocs, "times_seen", counter) } else { batchState.newOverflowTransaction() - // batchCounters. transactionNotAddedText := fmt.Sprintf("[%s] transaction %s was not included in this batch because it overflowed.", logPrefix, txHash) ocs, _ := batchCounters.CounterStats(l1TreeUpdateIndex != 0) - // was not included in this batch because it overflowed: counter x, counter y log.Info(transactionNotAddedText, "Counters context:", ocs, "overflow transactions", batchState.overflowTransactions) if batchState.reachedOverflowTransactionLimit() || cfg.zk.SealBatchImmediatelyOnOverflow { - log.Info(fmt.Sprintf("[%s] closing batch due to counters", logPrefix), "counters: ", batchState.overflowTransactions, "immediate", cfg.zk.SealBatchImmediatelyOnOverflow) + log.Info(fmt.Sprintf("[%s] closing batch due to overflow counters", logPrefix), "counters: ", batchState.overflowTransactions, "immediate", cfg.zk.SealBatchImmediatelyOnOverflow) runLoopBlocks = false if len(batchState.blockState.builtBlockElements.transactions) == 0 { emptyBlockOverflow = true @@ -487,6 +497,9 @@ func sequencingBatchStep( } } + // now we have finished with logging the overflow,remove the last attempted counters as we may want to continue processing this batch with other transactions + batchCounters.RemovePreviousTransactionCounters() + // continue on processing other transactions and skip this one continue } @@ -665,3 +678,13 @@ func removeInclusionTransaction(orig []types.Transaction, index int) []types.Tra } return append(orig[:index], orig[index+1:]...) } + +func handleBadTxHashCounter(hermezDb *hermez_db.HermezDb, txHash common.Hash) (uint64, error) { + counter, err := hermezDb.GetBadTxHashCounter(txHash) + if err != nil { + return 0, err + } + newCounter := counter + 1 + hermezDb.WriteBadTxHashCounter(txHash, newCounter) + return newCounter, nil +} diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 5f110235fe4..f574c1800cd 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -41,8 +41,8 @@ func prepareBatchNumber(sdb *stageDb, forkId, lastBatch uint64, isL1Recovery boo return lastBatch + 1, nil } -func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState) (*vm.BatchCounterCollector, error) { - return vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), nil), nil +func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState) *vm.BatchCounterCollector { + return vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), nil) } func doCheckForBadBatch(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) (bool, error) { diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index 192ec3e175c..8b83486b36d 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -138,7 +138,7 @@ func handleInjectedBatch( // process the tx and we can ignore the counters as an overflow at this stage means no network anyway effectiveGas := DeriveEffectiveGasPrice(*batchContext.cfg, decodedBlocks[0].Transactions[0]) - receipt, execResult, _, err := attemptAddTransaction(*batchContext.cfg, batchContext.sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil) + receipt, execResult, _, _, err := attemptAddTransaction(*batchContext.cfg, batchContext.sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil) if err != nil { return nil, nil, nil, 0, err } diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index 713dc462d39..75a9b49af1d 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -129,7 +129,7 @@ func attemptAddTransaction( l1Recovery bool, forkId, l1InfoIndex uint64, blockDataSizeChecker *BlockDataChecker, -) (*types.Receipt, *core.ExecutionResult, overflowType, error) { +) (*types.Receipt, *core.ExecutionResult, *vm.TransactionCounter, overflowType, error) { var batchDataOverflow, overflow bool var err error @@ -140,7 +140,7 @@ func attemptAddTransaction( if blockDataSizeChecker != nil { txL2Data, err := txCounters.GetL2DataCache() if err != nil { - return nil, nil, overflowNone, err + return nil, nil, txCounters, overflowNone, err } batchDataOverflow = blockDataSizeChecker.AddTransactionData(txL2Data) if batchDataOverflow { @@ -148,12 +148,12 @@ func attemptAddTransaction( } } if err != nil { - return nil, nil, overflowNone, err + return nil, nil, txCounters, overflowNone, err } anyOverflow := overflow || batchDataOverflow if anyOverflow && !l1Recovery { log.Debug("Transaction preexecute overflow detected", "txHash", transaction.Hash(), "counters", batchCounters.CombineCollectorsNoChanges().UsedAsString()) - return nil, nil, overflowCounters, nil + return nil, nil, txCounters, overflowCounters, nil } gasPool := new(core.GasPool).AddGas(transactionGasLimit) @@ -185,29 +185,29 @@ func attemptAddTransaction( ) if err != nil { - return nil, nil, overflowNone, err + return nil, nil, txCounters, overflowNone, err } if err = txCounters.ProcessTx(ibs, execResult.ReturnData); err != nil { - return nil, nil, overflowNone, err + return nil, nil, txCounters, overflowNone, err } batchCounters.UpdateExecutionAndProcessingCountersCache(txCounters) // now that we have executed we can check again for an overflow if overflow, err = batchCounters.CheckForOverflow(l1InfoIndex != 0); err != nil { - return nil, nil, overflowNone, err + return nil, nil, txCounters, overflowNone, err } counters := batchCounters.CombineCollectorsNoChanges().UsedAsString() if overflow { log.Debug("Transaction overflow detected", "txHash", transaction.Hash(), "coutners", counters) ibs.RevertToSnapshot(snapshot) - return nil, nil, overflowCounters, nil + return nil, nil, txCounters, overflowCounters, nil } if gasUsed > header.GasLimit { log.Debug("Transaction overflows block gas limit", "txHash", transaction.Hash(), "txGas", receipt.GasUsed, "blockGasUsed", header.GasUsed) ibs.RevertToSnapshot(snapshot) - return nil, nil, overflowGas, nil + return nil, nil, txCounters, overflowGas, nil } log.Debug("Transaction added", "txHash", transaction.Hash(), "coutners", counters) @@ -217,10 +217,10 @@ func attemptAddTransaction( // we need to keep hold of the effective percentage used // todo [zkevm] for now we're hard coding to the max value but we need to calc this properly if err = sdb.hermezDb.WriteEffectiveGasPricePercentage(transaction.Hash(), effectiveGasPrice); err != nil { - return nil, nil, overflowNone, err + return nil, nil, txCounters, overflowNone, err } ibs.FinalizeTx(evm.ChainRules(), noop) - return receipt, execResult, overflowNone, nil + return receipt, execResult, txCounters, overflowNone, nil } diff --git a/zk/txpool/pool_zk.go b/zk/txpool/pool_zk.go index 950b1990a02..f6373284ea5 100644 --- a/zk/txpool/pool_zk.go +++ b/zk/txpool/pool_zk.go @@ -303,7 +303,7 @@ func (p *TxPool) RemoveMinedTransactions(ids []common.Hash) { // deletes the discarded txs from the overflowZkCounters func (p *TxPool) discardOverflowZkCountersFromPending(pending *PendingPool, discard func(*metaTx, DiscardReason), sendersWithChangedState map[uint64]struct{}) { for _, mt := range p.overflowZkCounters { - log.Info("[tx_pool] Removing TX from pending due to counter overflow", "tx", mt.Tx.IDHash) + log.Info("[tx_pool] Removing TX from pending due to counter overflow", "tx", common.BytesToHash(mt.Tx.IDHash[:])) pending.Remove(mt) discard(mt, OverflowZkCounters) sendersWithChangedState[mt.Tx.SenderID] = struct{}{}