diff --git a/README.md b/README.md index 9c189f9662c2..6cbea5624271 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,9 @@ Optimism

Optimism is Ethereum, scaled.

+

+ This Optimism Stack is tailored to run in PBS in NodeKit's system, check section NodeKit PBS for more information +


@@ -13,6 +16,7 @@ - [What is Optimism?](#what-is-optimism) - [Documentation](#documentation) +- [NodeKit PBS](#NodeKit PBS) - [Specification](#specification) - [Community](#community) - [Contributing](#contributing) @@ -37,6 +41,10 @@ In this repository, you'll find numerous core components of the OP Stack, the de - If you want to build on top of OP Mainnet, refer to the [Optimism Documentation](https://docs.optimism.io) - If you want to build your own OP Stack based blockchain, refer to the [OP Stack Guide](https://docs.optimism.io/stack/getting-started), and make sure to understand this repository's [Development and Release Process](#development-and-release-process) +## NodeKit PBS + +In NodeKit PBS, we have two types of builder, the super builder and the MEV builder, both types of builder will build blocks then submit to [Arcadia](https://github.com/AnomalyFi/Arcadia/tree/arcadia), then Arcadia will produce valid block for op-node to be fetched from. In Arcadia based version, for ease of integration, we have a [sidecar](https://github.com/AnomalyFi/sidecar) that manages rollup registration and exit on NodeKit system, when the rollup is registered, the sidecar will fetch blocks from Arcadia; when the rollup is exited or not join NodeKit system, it simply fallback to the legacy mode, which produces block based on its mempool. For detail of launching the NodeKit based Optimism rollup, goto [op-javelin-deployment](https://github.com/AnomalyFi/op-javelin-deployment/tree/arcadia) + ## Specification If you're interested in the technical details of how Optimism works, refer to the [Optimism Protocol Specification](https://github.com/ethereum-optimism/specs). diff --git a/bedrock-devnet/devnet/__init__.py b/bedrock-devnet/devnet/__init__.py index 1fb436150668..099d2dc7e083 100644 --- a/bedrock-devnet/devnet/__init__.py +++ b/bedrock-devnet/devnet/__init__.py @@ -15,14 +15,6 @@ from multiprocessing import Process, Queue import concurrent.futures from collections import namedtuple -# from hdwallet import BIP44HDWallet -# from hdwallet.cryptocurrencies import EthereumMainnet -# from hdwallet.derivations import BIP44Derivation -# from hdwallet.utils import generate_mnemonic -# from typing import Optional - - -import devnet.log_setup pjoin = os.path.join @@ -51,7 +43,7 @@ parser.add_argument('--nodekit-l1-dir', help='directory of nodekit-l1', type=str, default='nodekit-l1') parser.add_argument('--nodekit-contract', help='nodekit commitment contract address on l1', type=str, default='') parser.add_argument('--seq-url', help='seq url', type=str, default='http://127.0.0.1:37029/ext/bc/56iQygPt5wrSCqZSLVwKyT7hAEdraXqDsYqWtWoAWaZSKDSDm') -parser.add_argument('--seq-signer', help='signing wallet of SEQ', type=str, default='323b1d8f4eed5f0da9da93071b034f2dce9d2d22692c172f3cb252a64ddfafd01b057de320297c29ad0c1f589ea216869cf1938d88c9fbd70d6748323dbf2fa7') +parser.add_argument('--builder-sk', help='bls secret key of l2-builder', type=str, default='0x2fc12ae741f29701f8e30f5de6350766c020cb80768a0ff01e6838ffd2431e11') parser.add_argument('--l1-chain-id', help='chain id of l1', type=str, default='32382') parser.add_argument('--l2-chain-id', help='chain id of l2', type=str, default='45200') parser.add_argument('--deploy-contracts', help='deploy contracts for l2 and nodekit-zk', type=bool, action=argparse.BooleanOptionalAction) @@ -59,7 +51,14 @@ parser.add_argument('--subnet', help='the static subnet opstack will be deployed on', type=str, default='172.20') parser.add_argument('--proposer-hdpath', help='the hd path of proposer mnemonic will be used to post roots to l1', type=str, default="m/44'/60'/0'/0/1") parser.add_argument('--batcher-hdpath', help='the hd path of batcher mnemonic will be used to post batches to l1', type=str, default="m/44'/60'/0'/0/2") -parser.add_argument('--baton-url', help='rpc url of baton', type=str, default='http://baton.url') +parser.add_argument('--arcadia-url', help='rpc url of arcadia', type=str, default='http://arcadia.url') +parser.add_argument('--block-time', help='block time of chain in seconds', type=str, default='2') +parser.add_argument('--builder-resubmit-interval', help='builder block production interval', type=str, default='200ms') +parser.add_argument('--builder-submission-offset', help='builder submission offset', type=str, default='500ms') +parser.add_argument('--builder-record-offset', help='builder record offset', type=str, default='1500ms') +parser.add_argument('--builder-rate-limit-duration', help='builder rate limit duration', type=str, default='100ms') +parser.add_argument('--sidecar-url', help='sidecar url', type=str, default='http://sidecar.url') +parser.add_argument('--sidecar-secret-key', help='sidecar bls signing key', type=str, default='0xblskey') # Global environment variables @@ -415,7 +414,6 @@ def devnet_deploy(paths, args): nodekit = args.nodekit l2 = args.l2 l2_chain_id = int(args.l2_chain_id) - baton_url: str = args.baton_url # which will be prepended to names of docker volumnes and services so we can run several rollups composer_project_name = f'op-devnet_{l2_chain_id}' l2_provider_url = args.l2_provider_url @@ -424,11 +422,19 @@ def devnet_deploy(paths, args): l1_ws_url = args.l1_ws_url seq_addr: str = args.seq_url seq_chain_id = seq_addr.split('/')[-1] - seq_signer: str = args.seq_signer + builder_sk: str = args.builder_sk subnet = args.subnet mnemonic_words = args.mnemonic_words batcher_hdpath = args.batcher_hdpath proposer_hdpath = args.proposer_hdpath + arcadia_url: str = args.arcadia_url + block_time: str = args.block_time + builder_resubmit_interval: str = args.builder_resubmit_interval + builder_submission_offset: str = args.builder_submission_offset + builder_record_offset: str = args.builder_record_offset + builder_rate_limit_duration: str = args.builder_rate_limit_duration + sidecar_url: str = args.sidecar_url + sidecar_secret_key: str = args.sidecar_secret_key conf = { l2_provider_url, @@ -441,53 +447,6 @@ def devnet_deploy(paths, args): print(f'using config {conf}') - # TODO: to be removed since we don't need to launch l2 ourselves - # if os.path.exists(paths.genesis_l1_path) and os.path.isfile(paths.genesis_l1_path): - # log.info('L1 genesis already generated.') - # elif not args.deploy_l2: - # # Generate the L1 genesis, unless we are deploying an L2 onto an existing L1. - # log.info('Generating L1 genesis.') - # if os.path.exists(paths.allocs_path) == False: - # devnet_l1_genesis(paths, args.deploy_config) - - # # It's odd that we want to regenerate the devnetL1.json file with - # # an updated timestamp different than the one used in the devnet_l1_genesis - # # function. But, without it, CI flakes on this test rather consistently. - # # If someone reads this comment and understands why this is being done, please - # # update this comment to explain. - # init_devnet_l1_deploy_config(paths, update_timestamp=True) - # outfile_l1 = pjoin(paths.devnet_dir, 'genesis-l1.json') - # run_command([ - # 'go', 'run', 'cmd/main.go', 'genesis', 'l1', - # '--deploy-config', paths.devnet_config_path, - # '--l1-allocs', paths.allocs_path, - # '--l1-deployments', paths.addresses_json_path, - # '--outfile.l1', outfile_l1, - # ], cwd=paths.op_node_dir) - - # if args.deploy_l2: - # # L1 and sequencer already exist, just create the deploy config and deploy the L1 contracts - # # for the new L2. - # init_devnet_l1_deploy_config(paths, update_timestamp=True) - # deploy_contracts(paths, args.deploy_config, args.deploy_l2) - # else: - # # Deploy L1 and sequencer network. - # log.info('Starting L1.') - # run_command(['docker', 'compose', '-f', compose_file, 'up', '-d', 'l1'], cwd=paths.ops_bedrock_dir, env={ - # 'PWD': paths.ops_bedrock_dir, - # 'DEVNET_DIR': paths.devnet_dir - # }) - # #wait_up(8545) - # wait_for_rpc_server('devnet.nodekit.xyz') - - # log.info('Bringing up `artifact-server`') - # run_command(['docker', 'compose', 'up', '-d', 'artifact-server'], cwd=paths.ops_bedrock_dir, env={ - # 'PWD': paths.ops_bedrock_dir, - # 'DEVNET_DIR': paths.devnet_dir - # }) - - - # Re-build the L2 genesis unconditionally in NodeKit mode, since we require the timestamps to be recent. # if not nodekit and os.path.exists(paths.genesis_l2_path) and os.path.isfile(paths.genesis_l2_path): if os.path.exists(paths.genesis_l2_path) and os.path.isfile(paths.genesis_l2_path): @@ -559,7 +518,10 @@ def devnet_deploy(paths, args): 'OP_BATCHER_SEQUENCER_HD_PATH': batcher_hdpath, 'OP_PROPOSER_MNEMONIC': mnemonic_words, 'OP_PROPOSER_L2_OUTPUT_HD_PATH': proposer_hdpath, - 'COMPOSE_PROJECT_NAME': composer_project_name + 'COMPOSE_PROJECT_NAME': composer_project_name, + 'L2_CHAINID': hex(int(l2_chain_id)), + 'SIDECAR_URL': sidecar_url, + 'SIDECAR_SECRET_KEY': sidecar_secret_key, }) enr = get_enr(composer_project_name, "op1-node", paths.ops_bedrock_dir) @@ -571,13 +533,18 @@ def devnet_deploy(paths, args): env={ "PWD": paths.ops_bedrock_dir, 'SUBNET': subnet, - 'BATON_URL': baton_url, "ENODE": enode, 'SEQ_ADDR': seq_addr, 'SEQ_CHAIN_ID': seq_chain_id, - 'SEQ_SIGNER_HEX': seq_signer, + 'BUILDER_SECRET_KEY': builder_sk, 'L2_CHAINID': f'{45200+inc}', - "COMPOSE_PROJECT_NAME": composer_project_name + "COMPOSE_PROJECT_NAME": composer_project_name, + 'ARCADIA_URL': arcadia_url, + 'BUILDER_RESUBMIT_INTERVAL': builder_resubmit_interval, + 'BUILDER_SECONDS_IN_SLOT': block_time, + 'BUILDER_SUBMISSION_OFFSET': builder_submission_offset, + 'BUILDER_RECORD_OFFSET': builder_record_offset, + 'BUILDER_RATE_LIMIT_DURATION': builder_rate_limit_duration, }) # TODO: to be injected l2builder_rpc_port = 15545 + inc @@ -592,6 +559,7 @@ def devnet_deploy(paths, args): ["docker", "compose", "up", "-d", "op-node-builder"], cwd=paths.ops_bedrock_dir, env={ + 'DEVNET_DIR': paths.devnet_dir, 'L1WS': l1_ws_url, 'SUBNET': subnet, "PWD": paths.ops_bedrock_dir, diff --git a/go.mod b/go.mod index 13e93750c6d3..7f6a5287952e 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,8 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240318114348-52d3dbd1605d - github.com/ethereum/go-ethereum v1.13.8 + github.com/ethereum/go-ethereum v1.13.14 + github.com/flashbots/go-boost-utils v1.8.1 github.com/fsnotify/fsnotify v1.7.0 github.com/go-chi/chi/v5 v5.0.12 github.com/go-chi/docgen v1.2.0 @@ -90,15 +91,15 @@ require ( github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/fgprof v0.9.3 // indirect - github.com/fjl/memsize v0.0.1 // indirect + github.com/fjl/memsize v0.0.2 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -119,7 +120,7 @@ require ( github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect github.com/hashicorp/golang-lru v0.5.0 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect - github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect + github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect @@ -188,7 +189,7 @@ require ( github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rivo/uniseg v0.4.3 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/rs/cors v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect @@ -222,7 +223,7 @@ require ( rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/ethereum/go-ethereum v1.13.8 => github.com/anomalyfi/op-geth v0.7.0 +replace github.com/ethereum/go-ethereum v1.13.14 => github.com/anomalyfi/op-geth v0.7.0 // replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain diff --git a/go.sum b/go.sum index 7dbd9502ab2b..a79ac8c777dd 100644 --- a/go.sum +++ b/go.sum @@ -178,12 +178,15 @@ github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-2024031811434 github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240318114348-52d3dbd1605d/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= -github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ= -github.com/fjl/memsize v0.0.1/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= +github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/flashbots/go-boost-utils v1.8.1 h1:AD+1+4oCbBjXLK8IqWHYznD95K6/MmqXhozv5fFOCkU= +github.com/flashbots/go-boost-utils v1.8.1/go.mod h1:jFi2H1el7jGPr2ShkWpYPfKsY9vwsFNmBPJRCO7IPg8= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -222,8 +225,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= @@ -338,8 +342,8 @@ github.com/hashicorp/raft v1.6.1 h1:v/jm5fcYHvVkL0akByAp+IDdDSzCNCGhdO6VdB56HIM= github.com/hashicorp/raft v1.6.1/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e h1:SK4y8oR4ZMHPvwVHryKI88kJPJda4UyWYvG5A6iEQxc= github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e/go.mod h1:EMz/UIuG93P0MBeHh6CbXQAEe8ckVJLZjhD17lBzK5Q= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= @@ -692,8 +696,8 @@ github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -770,6 +774,8 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/trailofbits/go-fuzz-utils v0.0.0-20210901195358-9657fcfd256c h1:4WU+p200eLYtBsx3M5CKXvkjVdf5SC3W9nMg37y0TFI= +github.com/trailofbits/go-fuzz-utils v0.0.0-20210901195358-9657fcfd256c/go.mod h1:f3jBhpWvuZmue0HZK52GzRHJOYHYSILs/c8+K2S/J+o= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= @@ -955,6 +961,7 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index d3ec36382fbb..33a5436a195b 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -359,6 +359,24 @@ var ( EnvVars: prefixEnvVars("NODEKIT_URL"), Required: false, } + ChainID = &cli.StringFlag{ + Name: "chain-id", + Usage: "Chain ID of the chain, 0x prefixed", + EnvVars: prefixEnvVars("CHAIN_ID"), + Required: false, + } + SidecarURL = &cli.StringFlag{ + Name: "sidecar.url", + Usage: "URL of Sidecar, where op-node fetches blocks from", + EnvVars: prefixEnvVars("SIDECAR_URL"), + Required: false, + } + SidecarSecretKey = &cli.StringFlag{ + Name: "sidecar.secret-key", + Usage: "0x prefixed BLS Secret key of sidecar client, which is used to sign messages to sidecar server", + EnvVars: prefixEnvVars("SIDECAR_SECRET_KEY"), + Required: false, + } ) var requiredFlags = []cli.Flag{ @@ -368,6 +386,9 @@ var requiredFlags = []cli.Flag{ } var optionalFlags = []cli.Flag{ + ChainID, + SidecarURL, + SidecarSecretKey, NodeKitURL, BeaconAddr, BeaconHeader, diff --git a/op-node/node/config.go b/op-node/node/config.go index 5656e945f1ea..fffaa7ca59f8 100644 --- a/op-node/node/config.go +++ b/op-node/node/config.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/sync" plasma "github.com/ethereum-optimism/optimism/op-plasma" "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/sidecar" "github.com/ethereum/go-ethereum/log" ) @@ -80,6 +81,8 @@ type Config struct { // NodeKit SEQ URL NodeKitURL string + + Sidecar sidecar.ClientConfig } type RPCConfig struct { diff --git a/op-node/node/node.go b/op-node/node/node.go index a9931b20f647..45954e0ed976 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/derive" plasma "github.com/ethereum-optimism/optimism/op-plasma" "github.com/ethereum-optimism/optimism/op-service/httputil" + "github.com/ethereum-optimism/optimism/op-service/sidecar" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/event" @@ -35,8 +36,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/oppprof" "github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-service/sources" - - "github.com/ethereum-optimism/optimism/op-service/nodekit" ) var ErrAlreadyClosed = errors.New("node is already closed") @@ -420,12 +419,22 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger n.safeDB = safedb.Disabled } - var nodekitClient *nodekit.Client - if cfg.NodeKitURL != "" { - nodekitClient = nodekit.NewClient(n.log, cfg.NodeKitURL) + // var nodekitClient *nodekit.Client + // if cfg.NodeKitURL != "" { + // nodekitClient = nodekit.NewClient(n.log, cfg.NodeKitURL) + // } + + var sidecarClient *sidecar.Client = nil + if !cfg.Driver.SequencerEnabled { + n.log.Info("sequencer not enabled, sidecar not needed") + } else { + sidecarClient, err = sidecar.NewSidecarClient(&cfg.Sidecar) + if err != nil { + return fmt.Errorf("failed to instantiatte arcadia client: %w", err) + } } - n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n.beacon, nodekitClient, n, n, n.log, snapshotLog, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, plasmaDA, func(id string, data []byte) { + n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n.beacon, sidecarClient, n, n, n.log, snapshotLog, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, plasmaDA, func(id string, data []byte) { n.httpEventStreamServer.Publish(id, &sse.Event{ Data: data, }) diff --git a/op-node/rollup/derive/engine_controller.go b/op-node/rollup/derive/engine_controller.go index d9103fcd5cbc..12919beb839c 100644 --- a/op-node/rollup/derive/engine_controller.go +++ b/op-node/rollup/derive/engine_controller.go @@ -186,6 +186,8 @@ func (e *EngineController) StartPayload(ctx context.Context, parent eth.L2BlockR e.safeAttrs = attrs } + e.log.Debug("setting building info", "buildingOnto", e.buildingOnto, "safe", e.buildingSafe, "buildingInfo", e.buildingInfo) + return BlockInsertOK, nil } diff --git a/op-node/rollup/derive/engine_queue.go b/op-node/rollup/derive/engine_queue.go index 0e55105b51d7..a38d8f0bafd0 100644 --- a/op-node/rollup/derive/engine_queue.go +++ b/op-node/rollup/derive/engine_queue.go @@ -564,7 +564,6 @@ func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error { // If something other than a simple advance occurred, perform a full reset return NewResetError(fmt.Errorf("pending safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s", eq.ec.PendingSafeL2Head(), eq.ec.PendingSafeL2Head().ParentID(), eq.safeAttributes.parent)) - } if eq.ec.PendingSafeL2Head().Number < eq.ec.UnsafeL2Head().Number { return eq.consolidateNextSafeAttributes(ctx) diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index ac6d2f2143ab..f295c2784272 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -13,7 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/nodekit" + "github.com/ethereum-optimism/optimism/op-service/sidecar" ) type Metrics interface { @@ -121,7 +121,8 @@ func NewDriver( l2 L2Chain, l1 L1Chain, l1Blobs derive.L1BlobsFetcher, - nodekitClient *nodekit.Client, + // nodekitClient *nodekit.Client, + sidecarClient sidecar.RPCInterface, altSync AltSync, network Network, log log.Logger, @@ -145,7 +146,7 @@ func NewDriver( // derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, attrsSequencer, engine, metrics, syncCfg, safeHeadListener) derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, attrsSequencer, engine, metrics, syncCfg, safeHeadListener) meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics. - sequencer := NewSequencer(log, cfg, meteredEngine, l2, attrBuilder, findL1Origin, nodekitClient, metrics, broadcastPayloadAttrs) + sequencer := NewSequencer(log, cfg, meteredEngine, l2, attrBuilder, findL1Origin, sidecarClient, metrics, broadcastPayloadAttrs) driverCtx, driverCancel := context.WithCancel(context.Background()) asyncGossiper := async.NewAsyncGossiper(driverCtx, network, log, metrics) return &Driver{ diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index a5908a941afb..6ad7d2c0ca9d 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -15,7 +15,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/nodekit" + "github.com/ethereum-optimism/optimism/op-service/sidecar" ) type SequencerMode uint64 @@ -63,10 +63,11 @@ type Sequencer struct { engine derive.EngineControl - cfgFetcher derive.SystemConfigL2Fetcher - attrBuilder derive.AttributesBuilder - l1OriginSelector L1OriginSelectorIface - nodekit nodekit.RPCInterface + cfgFetcher derive.SystemConfigL2Fetcher + attrBuilder derive.AttributesBuilder + l1OriginSelector L1OriginSelectorIface + sidecar sidecar.RPCInterface + broadcastPayloadAttrs func(id string, data []byte) metrics SequencerMetrics @@ -76,10 +77,10 @@ type Sequencer struct { nextAction time.Time // The current NodeKit block we are building, if applicable. - nodekitBatch *InProgressBatch + arcadiaPayload *sidecar.GetPayloadResponse } -func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.EngineControl, cfgFetcher derive.SystemConfigL2Fetcher, attributesBuilder derive.AttributesBuilder, l1OriginSelector L1OriginSelectorIface, nodekit nodekit.RPCInterface, metrics SequencerMetrics, broadcastPayloadAttrs func(id string, data []byte)) *Sequencer { +func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.EngineControl, cfgFetcher derive.SystemConfigL2Fetcher, attributesBuilder derive.AttributesBuilder, l1OriginSelector L1OriginSelectorIface, sidecar sidecar.RPCInterface, metrics SequencerMetrics, broadcastPayloadAttrs func(id string, data []byte)) *Sequencer { return &Sequencer{ log: log, rollupCfg: rollupCfg, @@ -89,198 +90,198 @@ func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.Engine cfgFetcher: cfgFetcher, attrBuilder: attributesBuilder, l1OriginSelector: l1OriginSelector, - nodekit: nodekit, + sidecar: sidecar, metrics: metrics, - nodekitBatch: nil, broadcastPayloadAttrs: broadcastPayloadAttrs, + arcadiaPayload: nil, } } // startBuildingNodeKitBatch initiates an NodeKit block building job on top of the given L2 head, // safe and finalized blocks. After this function succeeds, `d.nodekitBatch` is guaranteed to be // non-nil. -func (d *Sequencer) startBuildingNodeKitBatch(ctx context.Context, l2Head eth.L2BlockRef) error { - windowStart := l2Head.Time + d.rollupCfg.BlockTime - windowEnd := windowStart + d.rollupCfg.BlockTime - - // Fetch the available SEQ blocks from this sequencing window. - d.log.Info("Starting FetchHeadersForWindow", "start", windowStart, "end", windowEnd) - - blocks, err := d.nodekit.FetchHeadersForWindow(ctx, windowStart, windowEnd) - if err != nil { - return err - } - - d.nodekitBatch = &InProgressBatch{ - onto: l2Head, - windowStart: windowStart, - windowEnd: windowEnd, - jst: eth.L2BatchJustification{ - Prev: blocks.Prev, - }, - } - return d.updateNodeKitBatch(ctx, blocks.Window, blocks.Next) -} +// func (d *Sequencer) startBuildingNodeKitBatch(ctx context.Context, l2Head eth.L2BlockRef) error { +// windowStart := l2Head.Time + d.rollupCfg.BlockTime +// windowEnd := windowStart + d.rollupCfg.BlockTime + +// // Fetch the available SEQ blocks from this sequencing window. +// d.log.Info("Starting FetchHeadersForWindow", "start", windowStart, "end", windowEnd) + +// blocks, err := d.nodekit.FetchHeadersForWindow(ctx, windowStart, windowEnd) +// if err != nil { +// return err +// } + +// d.nodekitBatch = &InProgressBatch{ +// onto: l2Head, +// windowStart: windowStart, +// windowEnd: windowEnd, +// jst: eth.L2BatchJustification{ +// Prev: blocks.Prev, +// }, +// } +// return d.updateNodeKitBatch(ctx, blocks.Window, blocks.Next) +// } // updateNodeKitBatch appends the transactions contained in the NodeKit blocks denoted by // `newHeaders` to the current in-progress batch. If `end`, the first block after the window of this // batch, is available, it will be saved in the `Next` field of the batch justification. -func (d *Sequencer) updateNodeKitBatch(ctx context.Context, newHeaders []nodekit.Header, end *nodekit.Header) error { - batch := d.nodekitBatch - for _, header := range newHeaders { - blocks := batch.jst.Blocks - numBlocks := len(blocks) - - // Validate that the given header is in the window and in the right order. - if header.Timestamp >= batch.windowEnd { - return derive.NewCriticalError(fmt.Errorf("inconsistent data from NodeKit query service: header %v in window has timestamp after window end %d", header, batch.windowEnd)) - } - if header.Timestamp < batch.windowStart { - // Eventually, we should return an error here. However due to a limitation in the - // current implementation of SEQ/NodeKit, block timestamps will sometimes decrease. - d.log.Error("inconsistent data from NodeKit query service: header is before window start", "header", header, "start", batch.windowStart) - } - prev := batch.jst.Prev - if numBlocks != 0 { - prev = &blocks[numBlocks-1].Header - } - if prev != nil && header.Timestamp < prev.Timestamp { - // Similarly, this should eventually be an error, but can happen with the current - // version of NodeKit. - d.log.Error("inconsistent data from NodeKit query service: header is before its predecessor", "header", header, "prev", prev) - } - - txs, err := d.nodekit.FetchTransactionsInBlock(ctx, &header, d.rollupCfg.L2ChainID.Uint64()) - if err != nil { - return err - } - d.log.Info("adding new transactions from NodeKit", "block", header, "count", len(txs.Transactions)) - batch.jst.Blocks = append(blocks, eth.NodeKitBlockJustification{ - Header: header, - //Proof: txs.Proof, - }) - for _, tx := range txs.Transactions { - batch.transactions = append(batch.transactions, []byte(tx)) - txETH := new(types.Transaction) - err := txETH.UnmarshalBinary(tx) - if err != nil { - d.log.Info("unable to unmarshal transaction into eth tx instance type") - } - - d.log.Info("tx from nodekit info", "txHash", txETH.Hash().Hex()) - } - } - - batch.jst.Next = end - return nil -} +// func (d *Sequencer) updateNodeKitBatch(ctx context.Context, newHeaders []nodekit.Header, end *nodekit.Header) error { +// batch := d.nodekitBatch +// for _, header := range newHeaders { +// blocks := batch.jst.Blocks +// numBlocks := len(blocks) + +// // Validate that the given header is in the window and in the right order. +// if header.Timestamp >= batch.windowEnd { +// return derive.NewCriticalError(fmt.Errorf("inconsistent data from NodeKit query service: header %v in window has timestamp after window end %d", header, batch.windowEnd)) +// } +// if header.Timestamp < batch.windowStart { +// // Eventually, we should return an error here. However due to a limitation in the +// // current implementation of SEQ/NodeKit, block timestamps will sometimes decrease. +// d.log.Error("inconsistent data from NodeKit query service: header is before window start", "header", header, "start", batch.windowStart) +// } +// prev := batch.jst.Prev +// if numBlocks != 0 { +// prev = &blocks[numBlocks-1].Header +// } +// if prev != nil && header.Timestamp < prev.Timestamp { +// // Similarly, this should eventually be an error, but can happen with the current +// // version of NodeKit. +// d.log.Error("inconsistent data from NodeKit query service: header is before its predecessor", "header", header, "prev", prev) +// } + +// txs, err := d.nodekit.FetchTransactionsInBlock(ctx, &header, d.rollupCfg.L2ChainID.Uint64()) +// if err != nil { +// return err +// } +// d.log.Info("adding new transactions from NodeKit", "block", header, "count", len(txs.Transactions)) +// batch.jst.Blocks = append(blocks, eth.NodeKitBlockJustification{ +// Header: header, +// //Proof: txs.Proof, +// }) +// for _, tx := range txs.Transactions { +// batch.transactions = append(batch.transactions, []byte(tx)) +// txETH := new(types.Transaction) +// err := txETH.UnmarshalBinary(tx) +// if err != nil { +// d.log.Info("unable to unmarshal transaction into eth tx instance type") +// } + +// d.log.Info("tx from nodekit info", "txHash", txETH.Hash().Hex()) +// } +// } + +// batch.jst.Next = end +// return nil +// } // tryToSealNodeKitBatch polls for new transactions from the NodeKit Sequencer to append to the // current NodeKit Block. If the resulting block is complete (NodeKit has sequenced at least one // block with a timestamp beyond the end of the current sequencing window) it will submit the block // to the engine and return the resulting execution payload. If the block cannot be sealed yet // because NodeKit hasn't sequenced enough blocks, returns nil. -func (d *Sequencer) tryToSealNodeKitBatch(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { - batch := d.nodekitBatch - if !batch.complete() { - blocks, err := d.nodekit.FetchRemainingHeadersForWindow(ctx, batch.jst.Last().Height+1, batch.windowEnd) - if err != nil { - return nil, err - } - if err := d.updateNodeKitBatch(ctx, blocks.Window, blocks.Next); err != nil { - return nil, err - } - } - if batch.complete() { - return d.sealNodeKitBatch(ctx, agossip, sequencerConductor) - } else { - return nil, nil - } -} +// func (d *Sequencer) tryToSealNodeKitBatch(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { +// batch := d.nodekitBatch +// if !batch.complete() { +// blocks, err := d.nodekit.FetchRemainingHeadersForWindow(ctx, batch.jst.Last().Height+1, batch.windowEnd) +// if err != nil { +// return nil, err +// } +// if err := d.updateNodeKitBatch(ctx, blocks.Window, blocks.Next); err != nil { +// return nil, err +// } +// } +// if batch.complete() { +// return d.sealNodeKitBatch(ctx, agossip, sequencerConductor) +// } else { +// return nil, nil +// } +// } // sealNodeKitBatch submits the current NodeKit batch to the engine and return the resulting // execution payload. -func (d *Sequencer) sealNodeKitBatch(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { - batch := d.nodekitBatch - - sysCfg, err := d.cfgFetcher.SystemConfigByL2Hash(ctx, batch.onto.Hash) - if err != nil { - return nil, err - } - // Deterministically choose an L1 origin for this L2 batch, based on the latest L1 block that - // NodeKit has told us exists, but adjusting as needed to meet the constraints of the - // derivation pipeline. - - l1Origin, err := derive.NodeKitL1Origin(ctx, d.rollupCfg, &sysCfg, batch.onto, - batch.jst.Next.L1Head, d.l1OriginSelector, d.log) - if err != nil { - return nil, err - } - - // In certain edge cases, like when the L2 has fallen too far behind the L1, we are required to - // submit empty batches until we catch up. - if derive.NodeKitBatchMustBeEmpty(d.rollupCfg, l1Origin, batch.windowStart) { - batch.transactions = nil - } - - attrs, err := d.attrBuilder.PreparePayloadAttributes(ctx, batch.onto, l1Origin.ID(), &batch.jst) - if err != nil { - return nil, err - } - attrs.NoTxPool = true - attrs.Transactions = append(attrs.Transactions, batch.transactions...) - - // trigger next block production by javalin - // go func() { - // attrsEvent := ð.BuilderPayloadAttributesEvent{ - // Version: "", - // Data: eth.BuilderPayloadAttributesEventData{ - // ProposalSlot: l2Head.Number + 1, - // ParentBlockHash: l2Head.Hash, - // PayloadAttributes: eth.BuilderPayloadAttributes{ - // Timestamp: uint64(attrs.Timestamp), - // PrevRandao: common.Hash(attrs.PrevRandao), - // SuggestedFeeRecipient: attrs.SuggestedFeeRecipient, - // GasLimit: uint64(*attrs.GasLimit), - // // here we include zero transactions just to trigger javalin block production - // // javalin will fetch transactions from op-geth mempool - // Transactions: types.Transactions{}, - // }, - // }, - // } - - // attrsData, err := json.Marshal(attrsEvent) - // if err != nil { - // d.log.Error("failed to marshal payload attributes", "err", err) - // } - // d.broadcastPayloadAttrs("payload_attributes", attrsData) - // }() - - d.log.Debug("prepared attributes for new NodeKit block", - "num", batch.onto.Number+1, "time", uint64(attrs.Timestamp), "origin", l1Origin) - - // Start a payload building process. - withParent := derive.NewAttributesWithParent(attrs, batch.onto, false) - errTyp, err := d.engine.StartPayload(ctx, batch.onto, withParent, false) - if err != nil { - return nil, fmt.Errorf("failed to start building on top of L2 chain %s, error (%d): %w", batch.onto, errTyp, err) - } - - // Immediately seal the block in the engine. - payload, errTyp, err := d.engine.ConfirmPayload(ctx, agossip, sequencerConductor) - if err != nil { - _ = d.engine.CancelPayload(ctx, true) - return nil, fmt.Errorf("failed to complete building block: error (%d): %w", errTyp, err) - } - d.nodekitBatch = nil - return payload, nil -} - -func (d *Sequencer) cancelBuildingNodeKitBatch() { - // If we're in the process of building an NodeKit batch, we haven't sent anything to the engine - // yet. All we have to do is forget the batch. - d.nodekitBatch = nil -} +// func (d *Sequencer) sealNodeKitBatch(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { +// batch := d.nodekitBatch + +// sysCfg, err := d.cfgFetcher.SystemConfigByL2Hash(ctx, batch.onto.Hash) +// if err != nil { +// return nil, err +// } +// // Deterministically choose an L1 origin for this L2 batch, based on the latest L1 block that +// // NodeKit has told us exists, but adjusting as needed to meet the constraints of the +// // derivation pipeline. + +// l1Origin, err := derive.NodeKitL1Origin(ctx, d.rollupCfg, &sysCfg, batch.onto, +// batch.jst.Next.L1Head, d.l1OriginSelector, d.log) +// if err != nil { +// return nil, err +// } + +// // In certain edge cases, like when the L2 has fallen too far behind the L1, we are required to +// // submit empty batches until we catch up. +// if derive.NodeKitBatchMustBeEmpty(d.rollupCfg, l1Origin, batch.windowStart) { +// batch.transactions = nil +// } + +// attrs, err := d.attrBuilder.PreparePayloadAttributes(ctx, batch.onto, l1Origin.ID(), &batch.jst) +// if err != nil { +// return nil, err +// } +// attrs.NoTxPool = true +// attrs.Transactions = append(attrs.Transactions, batch.transactions...) + +// // trigger next block production by javalin +// // go func() { +// // attrsEvent := ð.BuilderPayloadAttributesEvent{ +// // Version: "", +// // Data: eth.BuilderPayloadAttributesEventData{ +// // ProposalSlot: l2Head.Number + 1, +// // ParentBlockHash: l2Head.Hash, +// // PayloadAttributes: eth.BuilderPayloadAttributes{ +// // Timestamp: uint64(attrs.Timestamp), +// // PrevRandao: common.Hash(attrs.PrevRandao), +// // SuggestedFeeRecipient: attrs.SuggestedFeeRecipient, +// // GasLimit: uint64(*attrs.GasLimit), +// // // here we include zero transactions just to trigger javalin block production +// // // javalin will fetch transactions from op-geth mempool +// // Transactions: types.Transactions{}, +// // }, +// // }, +// // } + +// // attrsData, err := json.Marshal(attrsEvent) +// // if err != nil { +// // d.log.Error("failed to marshal payload attributes", "err", err) +// // } +// // d.broadcastPayloadAttrs("payload_attributes", attrsData) +// // }() + +// d.log.Debug("prepared attributes for new NodeKit block", +// "num", batch.onto.Number+1, "time", uint64(attrs.Timestamp), "origin", l1Origin) + +// // Start a payload building process. +// withParent := derive.NewAttributesWithParent(attrs, batch.onto, false) +// errTyp, err := d.engine.StartPayload(ctx, batch.onto, withParent, false) +// if err != nil { +// return nil, fmt.Errorf("failed to start building on top of L2 chain %s, error (%d): %w", batch.onto, errTyp, err) +// } + +// // Immediately seal the block in the engine. +// payload, errTyp, err := d.engine.ConfirmPayload(ctx, agossip, sequencerConductor) +// if err != nil { +// _ = d.engine.CancelPayload(ctx, true) +// return nil, fmt.Errorf("failed to complete building block: error (%d): %w", errTyp, err) +// } +// d.nodekitBatch = nil +// return payload, nil +// } + +// func (d *Sequencer) cancelBuildingNodeKitBatch() { +// // If we're in the process of building an NodeKit batch, we haven't sent anything to the engine +// // yet. All we have to do is forget the batch. +// d.nodekitBatch = nil +// } // startBuildingLegacyBlock initiates a legacy block building job on top of the given L2 head, safe and finalized blocks, and using the provided l1Origin. func (d *Sequencer) startBuildingLegacyBlock(ctx context.Context) error { @@ -361,30 +362,29 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { return time.Second * time.Duration(d.rollupCfg.BlockTime) } - switch d.mode { - case NodeKit: + rollupStatus, err := d.sidecar.RollupStatus() + if err != nil { + d.log.Warn("err querying rollup status", "err", err) + } + switch rollupStatus { + case sidecar.ROLLUP_REGISTERED: return d.planNextNodeKitSequencerAction() - case Legacy: - return d.planNextLegacySequencerAction() default: - // If we don't yet know what mode we are in, our first action is going to be discovering our - // mode based on the L2 system config. We should start this immediately, since it will - // impact our scheduling decisions for all future actions. - return 0 + return d.planNextLegacySequencerAction() } - } func (d *Sequencer) planNextNodeKitSequencerAction() time.Duration { - head := d.engine.UnsafeL2Head() + // head := d.engine.UnsafeL2Head() now := d.timeNow() // We may have to wait till the next sequencing action, e.g. upon an error. // However, we ignore this delay if we are building a block and the L2 head has changed, in // which case we need to respond immediately. delay := d.nextAction.Sub(now) - reorg := d.nodekitBatch != nil && d.nodekitBatch.onto.Hash != head.Hash - if delay > 0 && !reorg { + // reorg := d.nodekitBatch != nil && d.nodekitBatch.onto.Hash != head.Hash + // if delay > 0 && !reorg { + if delay > 0 { return delay } @@ -434,18 +434,21 @@ func (d *Sequencer) planNextLegacySequencerAction() time.Duration { // BuildingOnto returns the L2 head reference that the latest block is or was being built on top of. func (d *Sequencer) BuildingOnto() eth.L2BlockRef { - if d.nodekitBatch != nil { - return d.nodekitBatch.onto - } else { - ref, _, _ := d.engine.BuildingPayload() - return ref - } + // return d.engine.BuildingPayload() + // if d.nodekitBatch != nil { + // return d.nodekitBatch.onto + // } else { + ref, _, _ := d.engine.BuildingPayload() + return ref + // } } +// seems not used except op-e2e func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { switch d.mode { case NodeKit: - return d.startBuildingNodeKitBatch(ctx, d.engine.UnsafeL2Head()) + // return d.startBuildingNodeKitBatch(ctx, d.engine.UnsafeL2Head()) + fallthrough case Legacy: return d.startBuildingLegacyBlock(ctx) default: @@ -461,7 +464,8 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { func (d *Sequencer) CompleteBuildingBlock(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { switch d.mode { case NodeKit: - return d.tryToSealNodeKitBatch(ctx, agossip, sequencerConductor) + fallthrough + // return d.tryToSealNodeKitBatch(ctx, agossip, sequencerConductor) case Legacy: return d.completeBuildingLegacyBlock(ctx, agossip, sequencerConductor) default: @@ -472,7 +476,8 @@ func (d *Sequencer) CompleteBuildingBlock(ctx context.Context, agossip async.Asy func (d *Sequencer) CancelBuildingBlock(ctx context.Context) { switch d.mode { case NodeKit: - d.cancelBuildingNodeKitBatch() + // d.cancelBuildingNodeKitBatch() + fallthrough case Legacy: d.cancelBuildingLegacyBlock(ctx) default: @@ -503,71 +508,144 @@ func (d *Sequencer) CancelBuildingBlock(ctx context.Context) { // but the derivation can continue to reset until the chain is correct. // If the engine is currently building safe blocks, then that building is not interrupted, and sequencing is delayed. func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { + rollupStatus, err := d.sidecar.RollupStatus() + if err != nil { + d.log.Warn("error querying rollup status") + } + + onto, buildingID, safe := d.engine.BuildingPayload() + // if still building under legacy mode but rollup is registered on Arcadia, we cancel that + if buildingID != (eth.PayloadID{}) && rollupStatus == sidecar.ROLLUP_REGISTERED { + d.log.Debug("canceling payload", "payloadID", buildingID) + err := d.engine.CancelPayload(ctx, true) + if err != nil { + d.log.Warn("unable to force cancel payload", "err", err) + // delay force cancel + d.nextAction = d.timeNow().Add(time.Second) + return nil, nil + } + } // if the engine returns a non-empty payload, OR if the async gossiper already has a payload, we can CompleteBuildingBlock // Regardless of what mode we are in (NodeKit or Legacy) our first priority is to not bother // the engine if it is busy building safe blocks (and thus changing the head that we would sync // on top of). Give it time to sync up. - onto, buildingID, safe := d.engine.BuildingPayload() - if buildingID != (eth.PayloadID{}) || agossip.Get() != nil && safe { - d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) - // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) - return nil, nil + if buildingID != (eth.PayloadID{}) || agossip.Get() != nil { + if safe { + d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time, "agossip.Get()", agossip.Get(), "safe", safe) + // approximates the worst-case time it takes to build a block, to reattempt sequencing after. + d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) + return nil, nil + } } - switch d.mode { - case NodeKit: - return d.buildNodeKitBatch(ctx, agossip, sequencerConductor) - case Legacy: - return d.buildLegacyBlock(ctx, agossip, sequencerConductor, buildingID != eth.PayloadID{} || agossip.Get() != nil) + switch rollupStatus { + case sidecar.ROLLUP_REGISTERED: + d.log.Debug("rollup registered, building arcadia block") + return d.buildArcadiaBatch(ctx, agossip, sequencerConductor) default: - // If we don't know what mode we are in, figure it out and then schedule another action - // immediately. - if err := d.detectMode(ctx); err != nil { - return nil, d.handleNonEngineError("to determine mode", err) - } - // Now that we know what mode we're in, return to the scheduler to plan the next action. - return nil, nil + d.log.Debug("rollup exited or not registered, building legacy block") + return d.buildLegacyBlock(ctx, agossip, sequencerConductor, buildingID != eth.PayloadID{} || agossip.Get() != nil) } } -func (d *Sequencer) buildNodeKitBatch(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { - // First, check if there has been a reorg. If so, drop the current block and restart. - //TODO check this out for reorg +func (d *Sequencer) buildArcadiaBatch(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { + buildingStartAt := time.Now() + head := d.engine.UnsafeL2Head() - if d.nodekitBatch != nil && d.nodekitBatch.onto.Hash != head.Hash { - d.log.Warn("reorg detected", "head", head, "onto", d.nodekitBatch.onto) - d.nodekitBatch = nil + arcadiaTxs, err := d.sidecar.GetPayload(head.Number + 1) + if err != nil { + // delay and retry + d.log.Warn("unable to fetch payload from sidecar", "err", err) + d.nextAction = time.Now().Add(200 * time.Millisecond) + return nil, nil } - // Begin a new block if necessary. - if d.nodekitBatch == nil { - d.log.Info("building new NodeKit batch", "onto", head) - if err := d.startBuildingNodeKitBatch(ctx, head); err != nil { - return nil, d.handleNonEngineError("starting NodeKit block", err) - } + l1Origin, err := d.l1OriginSelector.FindL1Origin(ctx, head) + if err != nil { + d.log.Error("Error finding next L1 Origin", "err", err) + return nil, err + } + + if !(head.L1Origin.Hash == l1Origin.ParentHash || head.L1Origin.Hash == l1Origin.Hash) { + d.metrics.RecordSequencerInconsistentL1Origin(head.L1Origin, l1Origin.ID()) + return nil, derive.NewResetError(fmt.Errorf("cannot build new L2 block with L1 origin %s (parent L1 %s) on current L2 head %s with L1 origin %s", l1Origin, l1Origin.ParentHash, head, head.L1Origin)) } - // Poll for transactions from the NodeKit Sequencer and see if we can submit the block. - block, err := d.tryToSealNodeKitBatch(ctx, agossip, sequencerConductor) + d.log.Info("creating new block", "parent", head, "l1Origin", l1Origin) + + attrs, err := d.attrBuilder.PreparePayloadAttributes(ctx, head, l1Origin.ID(), nil) if err != nil { - return nil, d.handlePossibleEngineError("trying to seal NodeKit block", err) + return nil, err } - if block == nil { - // If we didn't seal the block, it means we reached the end of the NodeKit block stream. - // Wait a reasonable amount of time before checking for more transactions. - d.log.Debug("NodeKit batch was not ready to seal, will retry in 1 second") - d.nextAction = d.timeNow().Add(time.Second) - return nil, nil + + attrs.NoTxPool = true + attrs.Transactions = append(attrs.Transactions, arcadiaTxs...) + + d.log.Debug("prepared attributes for new NodeKit block", + "num", head.Number+1, "time", uint64(attrs.Timestamp), "origin", l1Origin) + + // Start a payload building process. + withParent := derive.NewAttributesWithParent(attrs, head, false) + errTyp, err := d.engine.StartPayload(ctx, head, withParent, false) + if err != nil { + return nil, fmt.Errorf("failed to start building on top of L2 chain %s, error (%d): %w", head, errTyp, err) + } + + // Immediately seal the block in the engine. + payload, errTyp, err := d.engine.ConfirmPayload(ctx, agossip, sequencerConductor) + if err != nil { + _ = d.engine.CancelPayload(ctx, true) + return nil, fmt.Errorf("failed to complete building block: error (%d): %w", errTyp, err) + } + timeBlockProductionUsed := time.Since(buildingStartAt) + + // plan next production + if time.Second*time.Duration(d.rollupCfg.BlockTime) <= timeBlockProductionUsed { + d.nextAction = time.Now() } else { - // If we did seal the block, return it and do not set a delay, so that the scheduler will - // start the next action (starting the next block) immediately. - d.log.Info("sealed NodeKit batch", "payload", block) - return block, nil + d.nextAction = time.Now().Add(time.Second*time.Duration(d.rollupCfg.BlockTime) - timeBlockProductionUsed) } + return payload, nil } +// func (d *Sequencer) buildNodeKitBatch(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { +// // First, check if there has been a reorg. If so, drop the current block and restart. +// //TODO check this out for reorg +// head := d.engine.UnsafeL2Head() +// if d.nodekitBatch != nil && d.nodekitBatch.onto.Hash != head.Hash { +// d.log.Warn("reorg detected", "head", head, "onto", d.nodekitBatch.onto) +// d.nodekitBatch = nil +// } + +// // Begin a new block if necessary. +// if d.nodekitBatch == nil { +// d.log.Info("building new NodeKit batch", "onto", head) +// if err := d.startBuildingNodeKitBatch(ctx, head); err != nil { +// return nil, d.handleNonEngineError("starting NodeKit block", err) +// } +// } + +// // Poll for transactions from the NodeKit Sequencer and see if we can submit the block. +// block, err := d.tryToSealNodeKitBatch(ctx, agossip, sequencerConductor) +// if err != nil { +// return nil, d.handlePossibleEngineError("trying to seal NodeKit block", err) +// } +// if block == nil { +// // If we didn't seal the block, it means we reached the end of the NodeKit block stream. +// // Wait a reasonable amount of time before checking for more transactions. +// d.log.Debug("NodeKit batch was not ready to seal, will retry in 1 second") +// d.nextAction = d.timeNow().Add(time.Second) +// return nil, nil +// } else { +// // If we did seal the block, return it and do not set a delay, so that the scheduler will +// // start the next action (starting the next block) immediately. +// d.log.Info("sealed NodeKit batch", "payload", block) +// return block, nil +// } + +// } + func (d *Sequencer) buildLegacyBlock(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor, building bool) (*eth.ExecutionPayloadEnvelope, error) { if building { envelope, err := d.completeBuildingLegacyBlock(ctx, agossip, sequencerConductor) @@ -620,7 +698,6 @@ func (d *Sequencer) buildLegacyBlock(ctx context.Context, agossip async.AsyncGos } return nil, nil } - } func (d *Sequencer) detectMode(ctx context.Context) error { diff --git a/op-node/service.go b/op-node/service.go index 21f6fbe8a7f8..df8d2d9fa038 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -12,10 +12,12 @@ import ( "github.com/ethereum-optimism/optimism/op-node/chaincfg" plasma "github.com/ethereum-optimism/optimism/op-plasma" "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/sidecar" "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" + "github.com/flashbots/go-boost-utils/bls" "github.com/urfave/cli/v2" "github.com/ethereum-optimism/optimism/op-node/flags" @@ -74,6 +76,14 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { haltOption = "" } + var sidecarConfig *sidecar.ClientConfig = &sidecar.ClientConfig{} + if driverConfig.SequencerEnabled { + sidecarConfig, err = NewSidecarConfig(ctx, log) + if err != nil { + return nil, fmt.Errorf("failed to create the sidecar config: %w", err) + } + } + cfg := &node.Config{ L1: l1Endpoint, L2: l2Endpoint, @@ -112,6 +122,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { Plasma: plasma.ReadCLIConfig(ctx), NodeKitURL: ctx.String(flags.NodeKitURL.Name), + Sidecar: *sidecarConfig, } if err := cfg.LoadPersisted(log); err != nil { @@ -199,6 +210,31 @@ func NewDriverConfig(ctx *cli.Context) *driver.Config { } } +func NewSidecarConfig(ctx *cli.Context, log log.Logger) (*sidecar.ClientConfig, error) { + skHex := ctx.String(flags.SidecarSecretKey.Name) + log.Debug("sidecar config received", "skHex", skHex, "url", ctx.String(flags.SidecarURL.Name), "chainID", ctx.String(flags.ChainID.Name)) + skBytes, err := hexutil.Decode(skHex) + if err != nil { + return nil, err + } + sk, err := bls.SecretKeyFromBytes(skBytes) + if err != nil { + return nil, err + } + pk, err := bls.PublicKeyFromSecretKey(sk) + if err != nil { + return nil, err + } + + return &sidecar.ClientConfig{ + SidecarUrl: ctx.String(flags.SidecarURL.Name), + Logger: log, + SequencerPubkey: pk, + SequencerSecretKey: sk, + ChainID: ctx.String(flags.ChainID.Name), + }, nil +} + func NewRollupConfigFromCLI(log log.Logger, ctx *cli.Context) (*rollup.Config, error) { network := ctx.String(opflags.NetworkFlagName) rollupConfigPath := ctx.String(opflags.RollupConfigFlagName) diff --git a/op-service/sidecar/client.go b/op-service/sidecar/client.go new file mode 100644 index 000000000000..749e5cdca60a --- /dev/null +++ b/op-service/sidecar/client.go @@ -0,0 +1,168 @@ +package sidecar + +import ( + "bytes" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/flashbots/go-boost-utils/bls" +) + +const HEADER_ROLLUP_SIG = "X-ROLLUP-SEQ-SIG" + +const ( + ROLLUP_REGISTERED = iota + ROLLUP_EXITED + ROLLUP_NOT_REGISTERED +) + +const ( + pathGetPayload = "/rollup/getpayload" + pathRollupStatus = "/rollup/status" +) + +type ClientConfig struct { + SidecarUrl string + Logger log.Logger + SequencerPubkey *bls.PublicKey + SequencerSecretKey *bls.SecretKey + + ChainID string +} + +type RPCInterface interface { + GetPayload(height uint64) ([]hexutil.Bytes, error) + RollupStatus() (int, error) +} + +var _ RPCInterface = (*Client)(nil) + +type Client struct { + cfg ClientConfig + log log.Logger + + sk *bls.SecretKey + pk *bls.PublicKey + httpClient *http.Client +} + +func NewSidecarClient(cfg *ClientConfig) (*Client, error) { + return &Client{ + cfg: *cfg, + log: cfg.Logger, + + sk: cfg.SequencerSecretKey, + pk: cfg.SequencerPubkey, + httpClient: &http.Client{}, + }, nil +} + +type GetPayloadRequest struct { + ChainID string `json:"chainId"` + BlockNumber uint64 `json:"blockNumber"` +} + +// the response we send back to the sidecar +type GetPayloadResponse struct { + // First TOB txs then ROB txs + Transactions []hexutil.Bytes `json:"transactions"` +} + +func (c *Client) GetPayload(height uint64) ([]hexutil.Bytes, error) { + endpoint := c.cfg.SidecarUrl + pathGetPayload + + payloadReq := GetPayloadRequest{ + ChainID: c.cfg.ChainID, + BlockNumber: height, + } + + reqBytes, err := json.Marshal(payloadReq) + if err != nil { + return nil, err + } + reqHash, err := sha256HashPayload(reqBytes) + if err != nil { + return nil, err + } + sig := bls.Sign(c.sk, reqHash) + sigBytes := sig.Bytes() + + req, err := http.NewRequest(http.MethodPost, endpoint, bytes.NewBuffer(reqBytes)) + if err != nil { + return nil, err + } + req.Header.Set(HEADER_ROLLUP_SIG, hexutil.Encode(sigBytes[:])) + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + respBody, err := io.ReadAll(resp.Body) + if err != nil { + c.log.Warn("unable to read response body", "err", err) + return nil, fmt.Errorf("status code not 200: %d", resp.StatusCode) + } + return nil, fmt.Errorf("status code: %d, err: %s", resp.StatusCode, string(respBody)) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var payloadResp GetPayloadResponse + if err := json.Unmarshal(body, &payloadResp); err != nil { + return nil, err + } + + return payloadResp.Transactions, nil +} + +func (c *Client) RollupStatus() (int, error) { + endpoint := c.cfg.SidecarUrl + pathRollupStatus + + req, err := http.NewRequest(http.MethodPost, endpoint, nil) + if err != nil { + return ROLLUP_NOT_REGISTERED, err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return ROLLUP_NOT_REGISTERED, err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusTooEarly { + return ROLLUP_NOT_REGISTERED, nil + } else if resp.StatusCode != http.StatusOK { + respBody, err := io.ReadAll(resp.Body) + if err != nil { + c.log.Warn("unable to read response body", "err", err) + return ROLLUP_NOT_REGISTERED, nil + } + c.log.Warn("rollup status error:", "err", string(respBody)) + return ROLLUP_NOT_REGISTERED, nil + } + + return ROLLUP_REGISTERED, nil +} + +func sha256HashPayload(payload []byte) ([]byte, error) { + h := sha256.New() + _, err := h.Write(payload) + if err != nil { + return nil, err + } + bs := h.Sum(nil) + return bs, nil +} diff --git a/ops-bedrock/docker-compose.yml b/ops-bedrock/docker-compose.yml index 461403cf131a..24b8fe795cc4 100644 --- a/ops-bedrock/docker-compose.yml +++ b/ops-bedrock/docker-compose.yml @@ -112,6 +112,9 @@ services: --metrics.port=7300 --pprof.enabled --rpc.enable-admin + --chain-id=${L2_CHAINID} + --sidecar.url=${SIDECAR_URL} + --sidecar.secret-key=${SIDECAR_SECRET_KEY} ports: - "$OP1_NODE_RPC_PORT:8545" - "$OP1_NODE_P2P_PORT:9003" @@ -227,20 +230,22 @@ services: - "--builder" - "--builder.dry-run" - "--builder.local_relay" - - "--builder.seconds_in_slot=2" - - "--builder.submission_offset=100ms" - - "--builder.rate_limit_duration=200ms" + - "--builder.secret_key=${BUILDER_SECRET_KEY}" + - "--builder.seconds_in_slot=${BUILDER_SECONDS_IN_SLOT}" + - "--builder.block_resubmit_interval=${BUILDER_RESUBMIT_INTERVAL}" + - "--builder.submission_offset=${BUILDER_SUBMISSION_OFFSET}" + - "--builder.record_offset=${BUILDER_RECORD_OFFSET}" + - "--builder.rate_limit_duration=${BUILDER_RATE_LIMIT_DURATION}" - "--builder.beacon_endpoints=http://op-node-builder:8501/eth/v1" - - "--builder.baton=$BATON_URL" # TODO: to be configured - - "--builder.chainid=$L2_CHAINID" # TODO: to be configured + - "--builder.arcadia=${ARCADIA_URL}" # TODO: to be configured + - "--builder.chainid=${L2_CHAINID}" # TODO: to be configured # - "--miner.extradata=''" # - "--miner.algotype=greedy" - "--bootnodes=${ENODE}" - "--seq.url=${SEQ_ADDR}" - "--seq.chainid=${SEQ_CHAIN_ID}" - - "--seq.signer=${SEQ_SIGNER_HEX}" + # - "--seq.signer=${SEQ_SIGNER_HEX}" - # TODO: do we need this? op-node-builder: restart: on-failure:5 networks: @@ -266,11 +271,14 @@ services: --p2p.bootnodes=${ENR} --p2p.sync.req-resp --syncmode=consensus-layer + --chain-id=${L2_CHAINID} + --sidecar.url=${SIDECAR_URL} + --sidecar.secret-key=${SIDECAR_SECRET_KEY} ports: - "$OPNODE_BUILDER_RPC:8500" - "$OPNODE_BUILDER_METRICS_PORT:7300" # - "7501:8501" volumes: - "${PWD}/test-jwt-secret.txt:/config/test-jwt-secret.txt" - - "${PWD}/../.devnet/rollup.json:/rollup.json" + - "${DEVNET_DIR}/rollup.json:/rollup.json" - op_log:/op_log